709 }
710
711 const char* dup_name(const char* name) {
712 char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC);
713 strcpy(dup, name);
714 return dup;
715 }
716
717 const size_t initial_active_array_size = 8;
718
719 OopStorage::OopStorage(const char* name,
720 Mutex* allocation_mutex,
721 Mutex* active_mutex) :
722 _name(dup_name(name)),
723 _active_array(ActiveArray::create(initial_active_array_size)),
724 _allocation_list(),
725 _deferred_updates(NULL),
726 _allocation_mutex(allocation_mutex),
727 _active_mutex(active_mutex),
728 _allocation_count(0),
729 _concurrent_iteration_active(false)
730 {
731 _active_array->increment_refcount();
732 assert(_active_mutex->rank() < _allocation_mutex->rank(),
733 "%s: active_mutex must have lower rank than allocation_mutex", _name);
734 assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
735 "%s: active mutex requires safepoint check", _name);
736 assert(_allocation_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
737 "%s: allocation mutex requires safepoint check", _name);
738 }
739
740 void OopStorage::delete_empty_block(const Block& block) {
741 assert(block.is_empty(), "discarding non-empty block");
742 log_info(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block));
743 Block::delete_block(block);
744 }
745
746 OopStorage::~OopStorage() {
747 Block* block;
748 while ((block = _deferred_updates) != NULL) {
|
709 }
710
711 const char* dup_name(const char* name) {
712 char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC);
713 strcpy(dup, name);
714 return dup;
715 }
716
717 const size_t initial_active_array_size = 8;
718
719 OopStorage::OopStorage(const char* name,
720 Mutex* allocation_mutex,
721 Mutex* active_mutex) :
722 _name(dup_name(name)),
723 _active_array(ActiveArray::create(initial_active_array_size)),
724 _allocation_list(),
725 _deferred_updates(NULL),
726 _allocation_mutex(allocation_mutex),
727 _active_mutex(active_mutex),
728 _allocation_count(0),
729 _concurrent_iteration_count(0)
730 {
731 _active_array->increment_refcount();
732 assert(_active_mutex->rank() < _allocation_mutex->rank(),
733 "%s: active_mutex must have lower rank than allocation_mutex", _name);
734 assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
735 "%s: active mutex requires safepoint check", _name);
736 assert(_allocation_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
737 "%s: allocation mutex requires safepoint check", _name);
738 }
739
740 void OopStorage::delete_empty_block(const Block& block) {
741 assert(block.is_empty(), "discarding non-empty block");
742 log_info(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block));
743 Block::delete_block(block);
744 }
745
746 OopStorage::~OopStorage() {
747 Block* block;
748 while ((block = _deferred_updates) != NULL) {
|
751 }
752 while ((block = _allocation_list.head()) != NULL) {
753 _allocation_list.unlink(*block);
754 }
755 bool unreferenced = _active_array->decrement_refcount();
756 assert(unreferenced, "deleting storage while _active_array is referenced");
757 for (size_t i = _active_array->block_count(); 0 < i; ) {
758 block = _active_array->at(--i);
759 Block::delete_block(*block);
760 }
761 ActiveArray::destroy(_active_array);
762 FREE_C_HEAP_ARRAY(char, _name);
763 }
764
765 void OopStorage::delete_empty_blocks_safepoint() {
766 assert_at_safepoint();
767 // Process any pending release updates, which may make more empty
768 // blocks available for deletion.
769 while (reduce_deferred_updates()) {}
770 // Don't interfere with a concurrent iteration.
771 if (_concurrent_iteration_active) return;
772 // Delete empty (and otherwise deletable) blocks from end of _allocation_list.
773 for (Block* block = _allocation_list.tail();
774 (block != NULL) && block->is_deletable();
775 block = _allocation_list.tail()) {
776 _active_array->remove(block);
777 _allocation_list.unlink(*block);
778 delete_empty_block(*block);
779 }
780 }
781
782 void OopStorage::delete_empty_blocks_concurrent() {
783 MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
784 // Other threads could be adding to the empty block count while we
785 // release the mutex across the block deletions. Set an upper bound
786 // on how many blocks we'll try to release, so other threads can't
787 // cause an unbounded stay in this function.
788 size_t limit = block_count();
789
790 for (size_t i = 0; i < limit; ++i) {
791 // Additional updates might become available while we dropped the
792 // lock. But limit number processed to limit lock duration.
793 reduce_deferred_updates();
794
795 Block* block = _allocation_list.tail();
796 if ((block == NULL) || !block->is_deletable()) {
797 // No block to delete, so done. There could be more pending
798 // deferred updates that could give us more work to do; deal with
799 // that in some later call, to limit lock duration here.
800 return;
801 }
802
803 {
804 MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag);
805 // Don't interfere with a concurrent iteration.
806 if (_concurrent_iteration_active) return;
807 _active_array->remove(block);
808 }
809 // Remove block from _allocation_list and delete it.
810 _allocation_list.unlink(*block);
811 // Release mutex while deleting block.
812 MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
813 delete_empty_block(*block);
814 }
815 }
816
817 OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
818 const Block* block = find_block_or_null(ptr);
819 if (block != NULL) {
820 // Prevent block deletion and _active_array modification.
821 MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
822 // Block could be a false positive, so get index carefully.
823 size_t index = Block::active_index_safe(block);
824 if ((index < _active_array->block_count()) &&
825 (block == _active_array->at(index)) &&
|
751 }
752 while ((block = _allocation_list.head()) != NULL) {
753 _allocation_list.unlink(*block);
754 }
755 bool unreferenced = _active_array->decrement_refcount();
756 assert(unreferenced, "deleting storage while _active_array is referenced");
757 for (size_t i = _active_array->block_count(); 0 < i; ) {
758 block = _active_array->at(--i);
759 Block::delete_block(*block);
760 }
761 ActiveArray::destroy(_active_array);
762 FREE_C_HEAP_ARRAY(char, _name);
763 }
764
765 void OopStorage::delete_empty_blocks_safepoint() {
766 assert_at_safepoint();
767 // Process any pending release updates, which may make more empty
768 // blocks available for deletion.
769 while (reduce_deferred_updates()) {}
770 // Don't interfere with a concurrent iteration.
771 if (_concurrent_iteration_count > 0) return;
772 // Delete empty (and otherwise deletable) blocks from end of _allocation_list.
773 for (Block* block = _allocation_list.tail();
774 (block != NULL) && block->is_deletable();
775 block = _allocation_list.tail()) {
776 _active_array->remove(block);
777 _allocation_list.unlink(*block);
778 delete_empty_block(*block);
779 }
780 }
781
782 void OopStorage::delete_empty_blocks_concurrent() {
783 MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
784 // Other threads could be adding to the empty block count while we
785 // release the mutex across the block deletions. Set an upper bound
786 // on how many blocks we'll try to release, so other threads can't
787 // cause an unbounded stay in this function.
788 size_t limit = block_count();
789
790 for (size_t i = 0; i < limit; ++i) {
791 // Additional updates might become available while we dropped the
792 // lock. But limit number processed to limit lock duration.
793 reduce_deferred_updates();
794
795 Block* block = _allocation_list.tail();
796 if ((block == NULL) || !block->is_deletable()) {
797 // No block to delete, so done. There could be more pending
798 // deferred updates that could give us more work to do; deal with
799 // that in some later call, to limit lock duration here.
800 return;
801 }
802
803 {
804 MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag);
805 // Don't interfere with a concurrent iteration.
806 if (_concurrent_iteration_count > 0) return;
807 _active_array->remove(block);
808 }
809 // Remove block from _allocation_list and delete it.
810 _allocation_list.unlink(*block);
811 // Release mutex while deleting block.
812 MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
813 delete_empty_block(*block);
814 }
815 }
816
817 OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
818 const Block* block = find_block_or_null(ptr);
819 if (block != NULL) {
820 // Prevent block deletion and _active_array modification.
821 MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
822 // Block could be a false positive, so get index carefully.
823 size_t index = Block::active_index_safe(block);
824 if ((index < _active_array->block_count()) &&
825 (block == _active_array->at(index)) &&
|
857 }
858
859 // Parallel iteration support
860
861 uint OopStorage::BasicParState::default_estimated_thread_count(bool concurrent) {
862 uint configured = concurrent ? ConcGCThreads : ParallelGCThreads;
863 return MAX2(1u, configured); // Never estimate zero threads.
864 }
865
866 OopStorage::BasicParState::BasicParState(const OopStorage* storage,
867 uint estimated_thread_count,
868 bool concurrent) :
869 _storage(storage),
870 _active_array(_storage->obtain_active_array()),
871 _block_count(0), // initialized properly below
872 _next_block(0),
873 _estimated_thread_count(estimated_thread_count),
874 _concurrent(concurrent)
875 {
876 assert(estimated_thread_count > 0, "estimated thread count must be positive");
877 update_iteration_state(true);
878 // Get the block count *after* iteration state updated, so concurrent
879 // empty block deletion is suppressed and can't reduce the count. But
880 // ensure the count we use was written after the block with that count
881 // was fully initialized; see ActiveArray::push.
882 _block_count = _active_array->block_count_acquire();
883 }
884
885 OopStorage::BasicParState::~BasicParState() {
886 _storage->relinquish_block_array(_active_array);
887 update_iteration_state(false);
888 }
889
890 void OopStorage::BasicParState::update_iteration_state(bool value) {
891 if (_concurrent) {
892 MutexLockerEx ml(_storage->_active_mutex, Mutex::_no_safepoint_check_flag);
893 assert(_storage->_concurrent_iteration_active != value, "precondition");
894 _storage->_concurrent_iteration_active = value;
895 }
896 }
897
898 bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
899 data->_processed += data->_segment_end - data->_segment_start;
900 size_t start = OrderAccess::load_acquire(&_next_block);
901 if (start >= _block_count) {
902 return finish_iteration(data); // No more blocks available.
903 }
904 // Try to claim several at a time, but not *too* many. We want to
905 // avoid deciding there are many available and selecting a large
906 // quantity, get delayed, and then end up claiming most or all of
907 // the remaining largish amount of work, leaving nothing for other
908 // threads to do. But too small a step can lead to contention
909 // over _next_block, esp. when the work per block is small.
910 size_t max_step = 10;
911 size_t remaining = _block_count - start;
912 size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
913 // Atomic::add with possible overshoot. This can perform better
|
857 }
858
859 // Parallel iteration support
860
861 uint OopStorage::BasicParState::default_estimated_thread_count(bool concurrent) {
862 uint configured = concurrent ? ConcGCThreads : ParallelGCThreads;
863 return MAX2(1u, configured); // Never estimate zero threads.
864 }
865
866 OopStorage::BasicParState::BasicParState(const OopStorage* storage,
867 uint estimated_thread_count,
868 bool concurrent) :
869 _storage(storage),
870 _active_array(_storage->obtain_active_array()),
871 _block_count(0), // initialized properly below
872 _next_block(0),
873 _estimated_thread_count(estimated_thread_count),
874 _concurrent(concurrent)
875 {
876 assert(estimated_thread_count > 0, "estimated thread count must be positive");
877 update_concurrent_iteration_count(1);
878 // Get the block count *after* iteration state updated, so concurrent
879 // empty block deletion is suppressed and can't reduce the count. But
880 // ensure the count we use was written after the block with that count
881 // was fully initialized; see ActiveArray::push.
882 _block_count = _active_array->block_count_acquire();
883 }
884
885 OopStorage::BasicParState::~BasicParState() {
886 _storage->relinquish_block_array(_active_array);
887 update_concurrent_iteration_count(-1);
888 }
889
890 void OopStorage::BasicParState::update_concurrent_iteration_count(int value) {
891 if (_concurrent) {
892 MutexLockerEx ml(_storage->_active_mutex, Mutex::_no_safepoint_check_flag);
893 _storage->_concurrent_iteration_count += value;
894 assert(_storage->_concurrent_iteration_count >= 0, "invariant");
895 }
896 }
897
898 bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
899 data->_processed += data->_segment_end - data->_segment_start;
900 size_t start = OrderAccess::load_acquire(&_next_block);
901 if (start >= _block_count) {
902 return finish_iteration(data); // No more blocks available.
903 }
904 // Try to claim several at a time, but not *too* many. We want to
905 // avoid deciding there are many available and selecting a large
906 // quantity, get delayed, and then end up claiming most or all of
907 // the remaining largish amount of work, leaving nothing for other
908 // threads to do. But too small a step can lead to contention
909 // over _next_block, esp. when the work per block is small.
910 size_t max_step = 10;
911 size_t remaining = _block_count - start;
912 size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
913 // Atomic::add with possible overshoot. This can perform better
|
936 ("Parallel iteration on %s: blocks = " SIZE_FORMAT
937 ", processed = " SIZE_FORMAT " (%2.f%%)",
938 _storage->name(), _block_count, data->_processed,
939 percent_of(data->_processed, _block_count));
940 return false;
941 }
942
943 const char* OopStorage::name() const { return _name; }
944
945 #ifndef PRODUCT
946
947 void OopStorage::print_on(outputStream* st) const {
948 size_t allocations = _allocation_count;
949 size_t blocks = _active_array->block_count();
950
951 double data_size = section_size * section_count;
952 double alloc_percentage = percent_of((double)allocations, blocks * data_size);
953
954 st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), " SIZE_FORMAT " bytes",
955 name(), allocations, blocks, alloc_percentage, total_memory_usage());
956 if (_concurrent_iteration_active) {
957 st->print(", concurrent iteration active");
958 }
959 }
960
961 #endif // !PRODUCT
|
936 ("Parallel iteration on %s: blocks = " SIZE_FORMAT
937 ", processed = " SIZE_FORMAT " (%2.f%%)",
938 _storage->name(), _block_count, data->_processed,
939 percent_of(data->_processed, _block_count));
940 return false;
941 }
942
943 const char* OopStorage::name() const { return _name; }
944
945 #ifndef PRODUCT
946
947 void OopStorage::print_on(outputStream* st) const {
948 size_t allocations = _allocation_count;
949 size_t blocks = _active_array->block_count();
950
951 double data_size = section_size * section_count;
952 double alloc_percentage = percent_of((double)allocations, blocks * data_size);
953
954 st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), " SIZE_FORMAT " bytes",
955 name(), allocations, blocks, alloc_percentage, total_memory_usage());
956 if (_concurrent_iteration_count > 0) {
957 st->print(", concurrent iteration active");
958 }
959 }
960
961 #endif // !PRODUCT
|