123 size_t size_in_bytes = blocks_offset() + sizeof(Block*) * size;
124 void* mem = NEW_C_HEAP_ARRAY3(char, size_in_bytes, mtGC, CURRENT_PC, alloc_fail);
125 if (mem == NULL) return NULL;
126 return new (mem) ActiveArray(size);
127 }
128
129 void OopStorage::ActiveArray::destroy(ActiveArray* ba) {
130 ba->~ActiveArray();
131 FREE_C_HEAP_ARRAY(char, ba);
132 }
133
134 size_t OopStorage::ActiveArray::size() const {
135 return _size;
136 }
137
138 size_t OopStorage::ActiveArray::block_count() const {
139 return _block_count;
140 }
141
142 size_t OopStorage::ActiveArray::block_count_acquire() const {
143 return OrderAccess::load_acquire(&_block_count);
144 }
145
146 void OopStorage::ActiveArray::increment_refcount() const {
147 int new_value = Atomic::add(1, &_refcount);
148 assert(new_value >= 1, "negative refcount %d", new_value - 1);
149 }
150
151 bool OopStorage::ActiveArray::decrement_refcount() const {
152 int new_value = Atomic::sub(1, &_refcount);
153 assert(new_value >= 0, "negative refcount %d", new_value);
154 return new_value == 0;
155 }
156
157 bool OopStorage::ActiveArray::push(Block* block) {
158 size_t index = _block_count;
159 if (index < _size) {
160 block->set_active_index(index);
161 *block_ptr(index) = block;
162 // Use a release_store to ensure all the setup is complete before
163 // making the block visible.
164 OrderAccess::release_store(&_block_count, index + 1);
165 return true;
166 } else {
167 return false;
168 }
169 }
170
171 void OopStorage::ActiveArray::remove(Block* block) {
172 assert(_block_count > 0, "array is empty");
173 size_t index = block->active_index();
174 assert(*block_ptr(index) == block, "block not present");
175 size_t last_index = _block_count - 1;
176 Block* last_block = *block_ptr(last_index);
177 last_block->set_active_index(index);
178 *block_ptr(index) = last_block;
179 _block_count = last_index;
180 }
181
182 void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
183 assert(_block_count == 0, "array must be empty");
184 size_t count = from->_block_count;
247 bool OopStorage::Block::is_full() const {
248 return is_full_bitmask(allocated_bitmask());
249 }
250
251 bool OopStorage::Block::is_empty() const {
252 return is_empty_bitmask(allocated_bitmask());
253 }
254
255 uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
256 return bitmask_for_index(get_index(ptr));
257 }
258
259 // An empty block is not yet deletable if either:
260 // (1) There is a release() operation currently operating on it.
261 // (2) It is in the deferred updates list.
262 // For interaction with release(), these must follow the empty check,
263 // and the order of these checks is important.
264 bool OopStorage::Block::is_safe_to_delete() const {
265 assert(is_empty(), "precondition");
266 OrderAccess::loadload();
267 return (OrderAccess::load_acquire(&_release_refcount) == 0) &&
268 (OrderAccess::load_acquire(&_deferred_updates_next) == NULL);
269 }
270
271 OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
272 return _deferred_updates_next;
273 }
274
275 void OopStorage::Block::set_deferred_updates_next(Block* block) {
276 _deferred_updates_next = block;
277 }
278
279 bool OopStorage::Block::contains(const oop* ptr) const {
280 const oop* base = get_pointer(0);
281 return (base <= ptr) && (ptr < (base + ARRAY_SIZE(_data)));
282 }
283
284 size_t OopStorage::Block::active_index() const {
285 return _active_index;
286 }
287
288 void OopStorage::Block::set_active_index(size_t index) {
497 size_t new_size = 2 * old_array->size();
498 log_debug(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
499 name(), new_size);
500 ActiveArray* new_array = ActiveArray::create(new_size, AllocFailStrategy::RETURN_NULL);
501 if (new_array == NULL) return false;
502 new_array->copy_from(old_array);
503 replace_active_array(new_array);
504 relinquish_block_array(old_array);
505 return true;
506 }
507
508 // Make new_array the _active_array. Increments new_array's refcount
509 // to account for the new reference. The assignment is atomic wrto
510 // obtain_active_array; once this function returns, it is safe for the
511 // caller to relinquish the old array.
512 void OopStorage::replace_active_array(ActiveArray* new_array) {
513 // Caller has the old array that is the current value of _active_array.
514 // Update new_array refcount to account for the new reference.
515 new_array->increment_refcount();
516 // Install new_array, ensuring its initialization is complete first.
517 OrderAccess::release_store(&_active_array, new_array);
518 // Wait for any readers that could read the old array from _active_array.
519 // Can't use GlobalCounter here, because this is called from allocate(),
520 // which may be called in the scope of a GlobalCounter critical section
521 // when inserting a StringTable entry.
522 _protect_active.synchronize();
523 // All obtain critical sections that could see the old array have
524 // completed, having incremented the refcount of the old array. The
525 // caller can now safely relinquish the old array.
526 }
527
528 // Atomically (wrto replace_active_array) get the active array and
529 // increment its refcount. This provides safe access to the array,
530 // even if an allocate operation expands and replaces the value of
531 // _active_array. The caller must relinquish the array when done
532 // using it.
533 OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
534 SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
535 ActiveArray* result = OrderAccess::load_acquire(&_active_array);
536 result->increment_refcount();
537 return result;
538 }
539
540 // Decrement refcount of array and destroy if refcount is zero.
541 void OopStorage::relinquish_block_array(ActiveArray* array) const {
542 if (array->decrement_refcount()) {
543 assert(array != _active_array, "invariant");
544 ActiveArray::destroy(array);
545 }
546 }
547
548 class OopStorage::WithActiveArray : public StackObj {
549 const OopStorage* _storage;
550 ActiveArray* _active_array;
551
552 public:
553 WithActiveArray(const OopStorage* storage) :
554 _storage(storage),
555 _active_array(storage->obtain_active_array())
628 // will reduce deferrals before allocating new blocks, so may process
629 // some. And the service thread will drain the entire deferred list
630 // if there are any pending to-empty transitions.
631 if (releasing == old_allocated) {
632 owner->record_needs_cleanup();
633 }
634 log_trace(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
635 owner->name(), p2i(this));
636 }
637 }
638 // Release hold on empty block deletion.
639 Atomic::dec(&_release_refcount);
640 }
641
642 // Process one available deferred update. Returns true if one was processed.
643 bool OopStorage::reduce_deferred_updates() {
644 assert_lock_strong(_allocation_mutex);
645 // Atomically pop a block off the list, if any available.
646 // No ABA issue because this is only called by one thread at a time.
647 // The atomicity is wrto pushes by release().
648 Block* block = OrderAccess::load_acquire(&_deferred_updates);
649 while (true) {
650 if (block == NULL) return false;
651 // Try atomic pop of block from list.
652 Block* tail = block->deferred_updates_next();
653 if (block == tail) tail = NULL; // Handle self-loop end marker.
654 Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block);
655 if (fetched == block) break; // Update successful.
656 block = fetched; // Retry with updated block.
657 }
658 block->set_deferred_updates_next(NULL); // Clear tail after updating head.
659 // Ensure bitmask read after pop is complete, including clearing tail, for
660 // ordering with release(). Without this, we may be processing a stale
661 // bitmask state here while blocking a release() operation from recording
662 // the deferred update needed for its bitmask change.
663 OrderAccess::fence();
664 // Process popped block.
665 uintx allocated = block->allocated_bitmask();
666
667 // Make membership in list consistent with bitmask state.
668 if ((_allocation_list.ctail() != NULL) &&
816 ml.notify_all();
817 }
818 }
819
820 bool OopStorage::has_cleanup_work_and_reset() {
821 assert_lock_strong(Service_lock);
822 cleanup_trigger_permit_time =
823 os::javaTimeNanos() + cleanup_trigger_defer_period;
824 needs_cleanup_triggered = false;
825 // Set the request flag false and return its old value.
826 // Needs to be atomic to avoid dropping a concurrent request.
827 // Can't use Atomic::xchg, which may not support bool.
828 return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
829 }
830
831 // Record that cleanup is needed, without notifying the Service thread.
832 // Used by release(), where we can't lock even Service_lock.
833 void OopStorage::record_needs_cleanup() {
834 // Set local flag first, else service thread could wake up and miss
835 // the request. This order may instead (rarely) unnecessarily notify.
836 OrderAccess::release_store(&_needs_cleanup, true);
837 OrderAccess::release_store_fence(&needs_cleanup_requested, true);
838 }
839
840 bool OopStorage::delete_empty_blocks() {
841 // Service thread might have oopstorage work, but not for this object.
842 // Check for deferred updates even though that's not a service thread
843 // trigger; since we're here, we might as well process them.
844 if (!OrderAccess::load_acquire(&_needs_cleanup) &&
845 (OrderAccess::load_acquire(&_deferred_updates) == NULL)) {
846 return false;
847 }
848
849 MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
850
851 // Clear the request before processing.
852 OrderAccess::release_store_fence(&_needs_cleanup, false);
853
854 // Other threads could be adding to the empty block count or the
855 // deferred update list while we're working. Set an upper bound on
856 // how many updates we'll process and blocks we'll try to release,
857 // so other threads can't cause an unbounded stay in this function.
858 // We add a bit of slop because the reduce_deferred_updates clause
859 // can cause blocks to be double counted. If there are few blocks
860 // and many of them are deferred and empty, we might hit the limit
861 // and spin the caller without doing very much work. Otherwise,
862 // we don't normally hit the limit anyway, instead running out of
863 // work to do.
864 size_t limit = block_count() + 10;
865
866 for (size_t i = 0; i < limit; ++i) {
867 // Process deferred updates, which might make empty blocks available.
868 // Continue checking once deletion starts, since additional updates
869 // might become available while we're working.
870 if (reduce_deferred_updates()) {
871 // Be safepoint-polite while looping.
872 MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
976
977 OopStorage::BasicParState::~BasicParState() {
978 _storage->relinquish_block_array(_active_array);
979 update_concurrent_iteration_count(-1);
980 if (_concurrent) {
981 // We may have deferred some cleanup work.
982 const_cast<OopStorage*>(_storage)->record_needs_cleanup();
983 }
984 }
985
986 void OopStorage::BasicParState::update_concurrent_iteration_count(int value) {
987 if (_concurrent) {
988 MutexLocker ml(_storage->_active_mutex, Mutex::_no_safepoint_check_flag);
989 _storage->_concurrent_iteration_count += value;
990 assert(_storage->_concurrent_iteration_count >= 0, "invariant");
991 }
992 }
993
994 bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
995 data->_processed += data->_segment_end - data->_segment_start;
996 size_t start = OrderAccess::load_acquire(&_next_block);
997 if (start >= _block_count) {
998 return finish_iteration(data); // No more blocks available.
999 }
1000 // Try to claim several at a time, but not *too* many. We want to
1001 // avoid deciding there are many available and selecting a large
1002 // quantity, get delayed, and then end up claiming most or all of
1003 // the remaining largish amount of work, leaving nothing for other
1004 // threads to do. But too small a step can lead to contention
1005 // over _next_block, esp. when the work per block is small.
1006 size_t max_step = 10;
1007 size_t remaining = _block_count - start;
1008 size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
1009 // Atomic::add with possible overshoot. This can perform better
1010 // than a CAS loop on some platforms when there is contention.
1011 // We can cope with the uncertainty by recomputing start/end from
1012 // the result of the add, and dealing with potential overshoot.
1013 size_t end = Atomic::add(step, &_next_block);
1014 // _next_block may have changed, so recompute start from result of add.
1015 start = end - step;
1016 // _next_block may have changed so much that end has overshot.
|
123 size_t size_in_bytes = blocks_offset() + sizeof(Block*) * size;
124 void* mem = NEW_C_HEAP_ARRAY3(char, size_in_bytes, mtGC, CURRENT_PC, alloc_fail);
125 if (mem == NULL) return NULL;
126 return new (mem) ActiveArray(size);
127 }
128
129 void OopStorage::ActiveArray::destroy(ActiveArray* ba) {
130 ba->~ActiveArray();
131 FREE_C_HEAP_ARRAY(char, ba);
132 }
133
134 size_t OopStorage::ActiveArray::size() const {
135 return _size;
136 }
137
138 size_t OopStorage::ActiveArray::block_count() const {
139 return _block_count;
140 }
141
142 size_t OopStorage::ActiveArray::block_count_acquire() const {
143 return Atomic::load_acquire(&_block_count);
144 }
145
146 void OopStorage::ActiveArray::increment_refcount() const {
147 int new_value = Atomic::add(1, &_refcount);
148 assert(new_value >= 1, "negative refcount %d", new_value - 1);
149 }
150
151 bool OopStorage::ActiveArray::decrement_refcount() const {
152 int new_value = Atomic::sub(1, &_refcount);
153 assert(new_value >= 0, "negative refcount %d", new_value);
154 return new_value == 0;
155 }
156
157 bool OopStorage::ActiveArray::push(Block* block) {
158 size_t index = _block_count;
159 if (index < _size) {
160 block->set_active_index(index);
161 *block_ptr(index) = block;
162 // Use a release_store to ensure all the setup is complete before
163 // making the block visible.
164 Atomic::release_store(&_block_count, index + 1);
165 return true;
166 } else {
167 return false;
168 }
169 }
170
171 void OopStorage::ActiveArray::remove(Block* block) {
172 assert(_block_count > 0, "array is empty");
173 size_t index = block->active_index();
174 assert(*block_ptr(index) == block, "block not present");
175 size_t last_index = _block_count - 1;
176 Block* last_block = *block_ptr(last_index);
177 last_block->set_active_index(index);
178 *block_ptr(index) = last_block;
179 _block_count = last_index;
180 }
181
182 void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
183 assert(_block_count == 0, "array must be empty");
184 size_t count = from->_block_count;
247 bool OopStorage::Block::is_full() const {
248 return is_full_bitmask(allocated_bitmask());
249 }
250
251 bool OopStorage::Block::is_empty() const {
252 return is_empty_bitmask(allocated_bitmask());
253 }
254
255 uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
256 return bitmask_for_index(get_index(ptr));
257 }
258
259 // An empty block is not yet deletable if either:
260 // (1) There is a release() operation currently operating on it.
261 // (2) It is in the deferred updates list.
262 // For interaction with release(), these must follow the empty check,
263 // and the order of these checks is important.
264 bool OopStorage::Block::is_safe_to_delete() const {
265 assert(is_empty(), "precondition");
266 OrderAccess::loadload();
267 return (Atomic::load_acquire(&_release_refcount) == 0) &&
268 (Atomic::load_acquire(&_deferred_updates_next) == NULL);
269 }
270
271 OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
272 return _deferred_updates_next;
273 }
274
275 void OopStorage::Block::set_deferred_updates_next(Block* block) {
276 _deferred_updates_next = block;
277 }
278
279 bool OopStorage::Block::contains(const oop* ptr) const {
280 const oop* base = get_pointer(0);
281 return (base <= ptr) && (ptr < (base + ARRAY_SIZE(_data)));
282 }
283
284 size_t OopStorage::Block::active_index() const {
285 return _active_index;
286 }
287
288 void OopStorage::Block::set_active_index(size_t index) {
497 size_t new_size = 2 * old_array->size();
498 log_debug(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
499 name(), new_size);
500 ActiveArray* new_array = ActiveArray::create(new_size, AllocFailStrategy::RETURN_NULL);
501 if (new_array == NULL) return false;
502 new_array->copy_from(old_array);
503 replace_active_array(new_array);
504 relinquish_block_array(old_array);
505 return true;
506 }
507
508 // Make new_array the _active_array. Increments new_array's refcount
509 // to account for the new reference. The assignment is atomic wrto
510 // obtain_active_array; once this function returns, it is safe for the
511 // caller to relinquish the old array.
512 void OopStorage::replace_active_array(ActiveArray* new_array) {
513 // Caller has the old array that is the current value of _active_array.
514 // Update new_array refcount to account for the new reference.
515 new_array->increment_refcount();
516 // Install new_array, ensuring its initialization is complete first.
517 Atomic::release_store(&_active_array, new_array);
518 // Wait for any readers that could read the old array from _active_array.
519 // Can't use GlobalCounter here, because this is called from allocate(),
520 // which may be called in the scope of a GlobalCounter critical section
521 // when inserting a StringTable entry.
522 _protect_active.synchronize();
523 // All obtain critical sections that could see the old array have
524 // completed, having incremented the refcount of the old array. The
525 // caller can now safely relinquish the old array.
526 }
527
528 // Atomically (wrto replace_active_array) get the active array and
529 // increment its refcount. This provides safe access to the array,
530 // even if an allocate operation expands and replaces the value of
531 // _active_array. The caller must relinquish the array when done
532 // using it.
533 OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
534 SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
535 ActiveArray* result = Atomic::load_acquire(&_active_array);
536 result->increment_refcount();
537 return result;
538 }
539
540 // Decrement refcount of array and destroy if refcount is zero.
541 void OopStorage::relinquish_block_array(ActiveArray* array) const {
542 if (array->decrement_refcount()) {
543 assert(array != _active_array, "invariant");
544 ActiveArray::destroy(array);
545 }
546 }
547
548 class OopStorage::WithActiveArray : public StackObj {
549 const OopStorage* _storage;
550 ActiveArray* _active_array;
551
552 public:
553 WithActiveArray(const OopStorage* storage) :
554 _storage(storage),
555 _active_array(storage->obtain_active_array())
628 // will reduce deferrals before allocating new blocks, so may process
629 // some. And the service thread will drain the entire deferred list
630 // if there are any pending to-empty transitions.
631 if (releasing == old_allocated) {
632 owner->record_needs_cleanup();
633 }
634 log_trace(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
635 owner->name(), p2i(this));
636 }
637 }
638 // Release hold on empty block deletion.
639 Atomic::dec(&_release_refcount);
640 }
641
642 // Process one available deferred update. Returns true if one was processed.
643 bool OopStorage::reduce_deferred_updates() {
644 assert_lock_strong(_allocation_mutex);
645 // Atomically pop a block off the list, if any available.
646 // No ABA issue because this is only called by one thread at a time.
647 // The atomicity is wrto pushes by release().
648 Block* block = Atomic::load_acquire(&_deferred_updates);
649 while (true) {
650 if (block == NULL) return false;
651 // Try atomic pop of block from list.
652 Block* tail = block->deferred_updates_next();
653 if (block == tail) tail = NULL; // Handle self-loop end marker.
654 Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block);
655 if (fetched == block) break; // Update successful.
656 block = fetched; // Retry with updated block.
657 }
658 block->set_deferred_updates_next(NULL); // Clear tail after updating head.
659 // Ensure bitmask read after pop is complete, including clearing tail, for
660 // ordering with release(). Without this, we may be processing a stale
661 // bitmask state here while blocking a release() operation from recording
662 // the deferred update needed for its bitmask change.
663 OrderAccess::fence();
664 // Process popped block.
665 uintx allocated = block->allocated_bitmask();
666
667 // Make membership in list consistent with bitmask state.
668 if ((_allocation_list.ctail() != NULL) &&
816 ml.notify_all();
817 }
818 }
819
820 bool OopStorage::has_cleanup_work_and_reset() {
821 assert_lock_strong(Service_lock);
822 cleanup_trigger_permit_time =
823 os::javaTimeNanos() + cleanup_trigger_defer_period;
824 needs_cleanup_triggered = false;
825 // Set the request flag false and return its old value.
826 // Needs to be atomic to avoid dropping a concurrent request.
827 // Can't use Atomic::xchg, which may not support bool.
828 return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
829 }
830
831 // Record that cleanup is needed, without notifying the Service thread.
832 // Used by release(), where we can't lock even Service_lock.
833 void OopStorage::record_needs_cleanup() {
834 // Set local flag first, else service thread could wake up and miss
835 // the request. This order may instead (rarely) unnecessarily notify.
836 Atomic::release_store(&_needs_cleanup, true);
837 Atomic::release_store_fence(&needs_cleanup_requested, true);
838 }
839
840 bool OopStorage::delete_empty_blocks() {
841 // Service thread might have oopstorage work, but not for this object.
842 // Check for deferred updates even though that's not a service thread
843 // trigger; since we're here, we might as well process them.
844 if (!Atomic::load_acquire(&_needs_cleanup) &&
845 (Atomic::load_acquire(&_deferred_updates) == NULL)) {
846 return false;
847 }
848
849 MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
850
851 // Clear the request before processing.
852 Atomic::release_store_fence(&_needs_cleanup, false);
853
854 // Other threads could be adding to the empty block count or the
855 // deferred update list while we're working. Set an upper bound on
856 // how many updates we'll process and blocks we'll try to release,
857 // so other threads can't cause an unbounded stay in this function.
858 // We add a bit of slop because the reduce_deferred_updates clause
859 // can cause blocks to be double counted. If there are few blocks
860 // and many of them are deferred and empty, we might hit the limit
861 // and spin the caller without doing very much work. Otherwise,
862 // we don't normally hit the limit anyway, instead running out of
863 // work to do.
864 size_t limit = block_count() + 10;
865
866 for (size_t i = 0; i < limit; ++i) {
867 // Process deferred updates, which might make empty blocks available.
868 // Continue checking once deletion starts, since additional updates
869 // might become available while we're working.
870 if (reduce_deferred_updates()) {
871 // Be safepoint-polite while looping.
872 MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
976
977 OopStorage::BasicParState::~BasicParState() {
978 _storage->relinquish_block_array(_active_array);
979 update_concurrent_iteration_count(-1);
980 if (_concurrent) {
981 // We may have deferred some cleanup work.
982 const_cast<OopStorage*>(_storage)->record_needs_cleanup();
983 }
984 }
985
986 void OopStorage::BasicParState::update_concurrent_iteration_count(int value) {
987 if (_concurrent) {
988 MutexLocker ml(_storage->_active_mutex, Mutex::_no_safepoint_check_flag);
989 _storage->_concurrent_iteration_count += value;
990 assert(_storage->_concurrent_iteration_count >= 0, "invariant");
991 }
992 }
993
994 bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
995 data->_processed += data->_segment_end - data->_segment_start;
996 size_t start = Atomic::load_acquire(&_next_block);
997 if (start >= _block_count) {
998 return finish_iteration(data); // No more blocks available.
999 }
1000 // Try to claim several at a time, but not *too* many. We want to
1001 // avoid deciding there are many available and selecting a large
1002 // quantity, get delayed, and then end up claiming most or all of
1003 // the remaining largish amount of work, leaving nothing for other
1004 // threads to do. But too small a step can lead to contention
1005 // over _next_block, esp. when the work per block is small.
1006 size_t max_step = 10;
1007 size_t remaining = _block_count - start;
1008 size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
1009 // Atomic::add with possible overshoot. This can perform better
1010 // than a CAS loop on some platforms when there is contention.
1011 // We can cope with the uncertainty by recomputing start/end from
1012 // the result of the add, and dealing with potential overshoot.
1013 size_t end = Atomic::add(step, &_next_block);
1014 // _next_block may have changed, so recompute start from result of add.
1015 start = end - step;
1016 // _next_block may have changed so much that end has overshot.
|