127 }
128
129 void OopStorage::ActiveArray::destroy(ActiveArray* ba) {
130 ba->~ActiveArray();
131 FREE_C_HEAP_ARRAY(char, ba);
132 }
133
134 size_t OopStorage::ActiveArray::size() const {
135 return _size;
136 }
137
138 size_t OopStorage::ActiveArray::block_count() const {
139 return _block_count;
140 }
141
142 size_t OopStorage::ActiveArray::block_count_acquire() const {
143 return Atomic::load_acquire(&_block_count);
144 }
145
146 void OopStorage::ActiveArray::increment_refcount() const {
147 int new_value = Atomic::add(1, &_refcount);
148 assert(new_value >= 1, "negative refcount %d", new_value - 1);
149 }
150
151 bool OopStorage::ActiveArray::decrement_refcount() const {
152 int new_value = Atomic::sub(1, &_refcount);
153 assert(new_value >= 0, "negative refcount %d", new_value);
154 return new_value == 0;
155 }
156
157 bool OopStorage::ActiveArray::push(Block* block) {
158 size_t index = _block_count;
159 if (index < _size) {
160 block->set_active_index(index);
161 *block_ptr(index) = block;
162 // Use a release_store to ensure all the setup is complete before
163 // making the block visible.
164 Atomic::release_store(&_block_count, index + 1);
165 return true;
166 } else {
167 return false;
993
994 bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
995 data->_processed += data->_segment_end - data->_segment_start;
996 size_t start = Atomic::load_acquire(&_next_block);
997 if (start >= _block_count) {
998 return finish_iteration(data); // No more blocks available.
999 }
1000 // Try to claim several at a time, but not *too* many. We want to
1001 // avoid deciding there are many available and selecting a large
1002 // quantity, get delayed, and then end up claiming most or all of
1003 // the remaining largish amount of work, leaving nothing for other
1004 // threads to do. But too small a step can lead to contention
1005 // over _next_block, esp. when the work per block is small.
1006 size_t max_step = 10;
1007 size_t remaining = _block_count - start;
1008 size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
1009 // Atomic::add with possible overshoot. This can perform better
1010 // than a CAS loop on some platforms when there is contention.
1011 // We can cope with the uncertainty by recomputing start/end from
1012 // the result of the add, and dealing with potential overshoot.
1013 size_t end = Atomic::add(step, &_next_block);
1014 // _next_block may have changed, so recompute start from result of add.
1015 start = end - step;
1016 // _next_block may have changed so much that end has overshot.
1017 end = MIN2(end, _block_count);
1018 // _next_block may have changed so much that even start has overshot.
1019 if (start < _block_count) {
1020 // Record claimed segment for iteration.
1021 data->_segment_start = start;
1022 data->_segment_end = end;
1023 return true; // Success.
1024 } else {
1025 // No more blocks to claim.
1026 return finish_iteration(data);
1027 }
1028 }
1029
1030 bool OopStorage::BasicParState::finish_iteration(const IterationData* data) const {
1031 log_info(oopstorage, blocks, stats)
1032 ("Parallel iteration on %s: blocks = " SIZE_FORMAT
1033 ", processed = " SIZE_FORMAT " (%2.f%%)",
|
127 }
128
129 void OopStorage::ActiveArray::destroy(ActiveArray* ba) {
130 ba->~ActiveArray();
131 FREE_C_HEAP_ARRAY(char, ba);
132 }
133
134 size_t OopStorage::ActiveArray::size() const {
135 return _size;
136 }
137
138 size_t OopStorage::ActiveArray::block_count() const {
139 return _block_count;
140 }
141
142 size_t OopStorage::ActiveArray::block_count_acquire() const {
143 return Atomic::load_acquire(&_block_count);
144 }
145
146 void OopStorage::ActiveArray::increment_refcount() const {
147 int new_value = Atomic::add(&_refcount, 1);
148 assert(new_value >= 1, "negative refcount %d", new_value - 1);
149 }
150
151 bool OopStorage::ActiveArray::decrement_refcount() const {
152 int new_value = Atomic::sub(1, &_refcount);
153 assert(new_value >= 0, "negative refcount %d", new_value);
154 return new_value == 0;
155 }
156
157 bool OopStorage::ActiveArray::push(Block* block) {
158 size_t index = _block_count;
159 if (index < _size) {
160 block->set_active_index(index);
161 *block_ptr(index) = block;
162 // Use a release_store to ensure all the setup is complete before
163 // making the block visible.
164 Atomic::release_store(&_block_count, index + 1);
165 return true;
166 } else {
167 return false;
993
994 bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
995 data->_processed += data->_segment_end - data->_segment_start;
996 size_t start = Atomic::load_acquire(&_next_block);
997 if (start >= _block_count) {
998 return finish_iteration(data); // No more blocks available.
999 }
1000 // Try to claim several at a time, but not *too* many. We want to
1001 // avoid deciding there are many available and selecting a large
1002 // quantity, get delayed, and then end up claiming most or all of
1003 // the remaining largish amount of work, leaving nothing for other
1004 // threads to do. But too small a step can lead to contention
1005 // over _next_block, esp. when the work per block is small.
1006 size_t max_step = 10;
1007 size_t remaining = _block_count - start;
1008 size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
1009 // Atomic::add with possible overshoot. This can perform better
1010 // than a CAS loop on some platforms when there is contention.
1011 // We can cope with the uncertainty by recomputing start/end from
1012 // the result of the add, and dealing with potential overshoot.
1013 size_t end = Atomic::add(&_next_block, step);
1014 // _next_block may have changed, so recompute start from result of add.
1015 start = end - step;
1016 // _next_block may have changed so much that end has overshot.
1017 end = MIN2(end, _block_count);
1018 // _next_block may have changed so much that even start has overshot.
1019 if (start < _block_count) {
1020 // Record claimed segment for iteration.
1021 data->_segment_start = start;
1022 data->_segment_end = end;
1023 return true; // Success.
1024 } else {
1025 // No more blocks to claim.
1026 return finish_iteration(data);
1027 }
1028 }
1029
1030 bool OopStorage::BasicParState::finish_iteration(const IterationData* data) const {
1031 log_info(oopstorage, blocks, stats)
1032 ("Parallel iteration on %s: blocks = " SIZE_FORMAT
1033 ", processed = " SIZE_FORMAT " (%2.f%%)",
|