716 // If entry not in block, finish block and resume outer loop with entry.
717 if (!block->contains(entry)) break;
718 // Add entry to releasing bitmap.
719 log_trace(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
720 uintx entry_bitmask = block->bitmask_for_entry(entry);
721 assert((releasing & entry_bitmask) == 0,
722 "Duplicate entry: " PTR_FORMAT, p2i(entry));
723 releasing |= entry_bitmask;
724 ++count;
725 }
726 // Release the contiguous entries that are in block.
727 block->release_entries(releasing, this);
728 Atomic::sub(&_allocation_count, count);
729 }
730 }
731
732 const size_t initial_active_array_size = 8;
733
734 OopStorage::OopStorage(const char* name,
735 Mutex* allocation_mutex,
736 Mutex* active_mutex) :
737 _name(os::strdup(name)),
738 _active_array(ActiveArray::create(initial_active_array_size)),
739 _allocation_list(),
740 _deferred_updates(NULL),
741 _allocation_mutex(allocation_mutex),
742 _active_mutex(active_mutex),
743 _allocation_count(0),
744 _concurrent_iteration_count(0),
745 _needs_cleanup(false)
746 {
747 _active_array->increment_refcount();
748 assert(_active_mutex->rank() < _allocation_mutex->rank(),
749 "%s: active_mutex must have lower rank than allocation_mutex", _name);
750 assert(Service_lock->rank() < _active_mutex->rank(),
751 "%s: active_mutex must have higher rank than Service_lock", _name);
752 assert(_active_mutex->_safepoint_check_required == Mutex::_safepoint_check_never,
753 "%s: active mutex requires never safepoint check", _name);
754 assert(_allocation_mutex->_safepoint_check_required == Mutex::_safepoint_check_never,
755 "%s: allocation mutex requires never safepoint check", _name);
756 }
757
758 void OopStorage::delete_empty_block(const Block& block) {
759 assert(block.is_empty(), "discarding non-empty block");
760 log_debug(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block));
761 Block::delete_block(block);
762 }
763
764 OopStorage::~OopStorage() {
765 Block* block;
791 // request state. A safepoint cleanup task notifies the service thread when
792 // there may be cleanup work for any storage object, based on the global
793 // request state. But that notification is deferred if the service thread
794 // has run recently, and we also avoid duplicate notifications. The service
795 // thread updates the timestamp and resets the state flags on every iteration.
796
797 // Global cleanup request state.
798 static volatile bool needs_cleanup_requested = false;
799
800 // Flag for avoiding duplicate notifications.
801 static bool needs_cleanup_triggered = false;
802
803 // Time after which a notification can be made.
804 static jlong cleanup_trigger_permit_time = 0;
805
806 // Minimum time since last service thread check before notification is
807 // permitted. The value of 500ms was an arbitrary choice; frequent, but not
808 // too frequent.
809 const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
810
811 void OopStorage::trigger_cleanup_if_needed() {
812 MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
813 if (Atomic::load(&needs_cleanup_requested) &&
814 !needs_cleanup_triggered &&
815 (os::javaTimeNanos() > cleanup_trigger_permit_time)) {
816 needs_cleanup_triggered = true;
817 ml.notify_all();
818 }
819 }
820
821 bool OopStorage::has_cleanup_work_and_reset() {
822 assert_lock_strong(Service_lock);
823 cleanup_trigger_permit_time =
824 os::javaTimeNanos() + cleanup_trigger_defer_period;
825 needs_cleanup_triggered = false;
826 // Set the request flag false and return its old value.
827 // Needs to be atomic to avoid dropping a concurrent request.
828 // Can't use Atomic::xchg, which may not support bool.
829 return Atomic::cmpxchg(&needs_cleanup_requested, true, false);
830 }
947 total_size += blocks.block_count() * Block::allocation_size();
948 total_size += blocks.size() * sizeof(Block*);
949 return total_size;
950 }
951
952 // Parallel iteration support
953
954 uint OopStorage::BasicParState::default_estimated_thread_count(bool concurrent) {
955 uint configured = concurrent ? ConcGCThreads : ParallelGCThreads;
956 return MAX2(1u, configured); // Never estimate zero threads.
957 }
958
959 OopStorage::BasicParState::BasicParState(const OopStorage* storage,
960 uint estimated_thread_count,
961 bool concurrent) :
962 _storage(storage),
963 _active_array(_storage->obtain_active_array()),
964 _block_count(0), // initialized properly below
965 _next_block(0),
966 _estimated_thread_count(estimated_thread_count),
967 _concurrent(concurrent)
968 {
969 assert(estimated_thread_count > 0, "estimated thread count must be positive");
970 update_concurrent_iteration_count(1);
971 // Get the block count *after* iteration state updated, so concurrent
972 // empty block deletion is suppressed and can't reduce the count. But
973 // ensure the count we use was written after the block with that count
974 // was fully initialized; see ActiveArray::push.
975 _block_count = _active_array->block_count_acquire();
976 }
977
978 OopStorage::BasicParState::~BasicParState() {
979 _storage->relinquish_block_array(_active_array);
980 update_concurrent_iteration_count(-1);
981 if (_concurrent) {
982 // We may have deferred some cleanup work.
983 const_cast<OopStorage*>(_storage)->record_needs_cleanup();
984 }
985 }
986
987 void OopStorage::BasicParState::update_concurrent_iteration_count(int value) {
|
716 // If entry not in block, finish block and resume outer loop with entry.
717 if (!block->contains(entry)) break;
718 // Add entry to releasing bitmap.
719 log_trace(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
720 uintx entry_bitmask = block->bitmask_for_entry(entry);
721 assert((releasing & entry_bitmask) == 0,
722 "Duplicate entry: " PTR_FORMAT, p2i(entry));
723 releasing |= entry_bitmask;
724 ++count;
725 }
726 // Release the contiguous entries that are in block.
727 block->release_entries(releasing, this);
728 Atomic::sub(&_allocation_count, count);
729 }
730 }
731
732 const size_t initial_active_array_size = 8;
733
734 OopStorage::OopStorage(const char* name,
735 Mutex* allocation_mutex,
736 Mutex* active_mutex,
737 NotificationFunction notification_function) :
738 _name(os::strdup(name)),
739 _active_array(ActiveArray::create(initial_active_array_size)),
740 _allocation_list(),
741 _deferred_updates(NULL),
742 _allocation_mutex(allocation_mutex),
743 _active_mutex(active_mutex),
744 _allocation_count(0),
745 _concurrent_iteration_count(0),
746 _needs_cleanup(false),
747 _notification_function(notification_function)
748 {
749 _active_array->increment_refcount();
750 assert(_active_mutex->rank() < _allocation_mutex->rank(),
751 "%s: active_mutex must have lower rank than allocation_mutex", _name);
752 assert(Service_lock->rank() < _active_mutex->rank(),
753 "%s: active_mutex must have higher rank than Service_lock", _name);
754 assert(_active_mutex->_safepoint_check_required == Mutex::_safepoint_check_never,
755 "%s: active mutex requires never safepoint check", _name);
756 assert(_allocation_mutex->_safepoint_check_required == Mutex::_safepoint_check_never,
757 "%s: allocation mutex requires never safepoint check", _name);
758 }
759
760 void OopStorage::delete_empty_block(const Block& block) {
761 assert(block.is_empty(), "discarding non-empty block");
762 log_debug(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block));
763 Block::delete_block(block);
764 }
765
766 OopStorage::~OopStorage() {
767 Block* block;
793 // request state. A safepoint cleanup task notifies the service thread when
794 // there may be cleanup work for any storage object, based on the global
795 // request state. But that notification is deferred if the service thread
796 // has run recently, and we also avoid duplicate notifications. The service
797 // thread updates the timestamp and resets the state flags on every iteration.
798
799 // Global cleanup request state.
800 static volatile bool needs_cleanup_requested = false;
801
802 // Flag for avoiding duplicate notifications.
803 static bool needs_cleanup_triggered = false;
804
805 // Time after which a notification can be made.
806 static jlong cleanup_trigger_permit_time = 0;
807
808 // Minimum time since last service thread check before notification is
809 // permitted. The value of 500ms was an arbitrary choice; frequent, but not
810 // too frequent.
811 const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
812
813 void OopStorage::notify(size_t num_dead) const {
814 if (_notification_function != NULL) {
815 _notification_function(num_dead);
816 }
817 }
818
819 bool OopStorage::can_notify() const {
820 return _notification_function != NULL;
821 }
822
823 void OopStorage::trigger_cleanup_if_needed() {
824 MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
825 if (Atomic::load(&needs_cleanup_requested) &&
826 !needs_cleanup_triggered &&
827 (os::javaTimeNanos() > cleanup_trigger_permit_time)) {
828 needs_cleanup_triggered = true;
829 ml.notify_all();
830 }
831 }
832
833 bool OopStorage::has_cleanup_work_and_reset() {
834 assert_lock_strong(Service_lock);
835 cleanup_trigger_permit_time =
836 os::javaTimeNanos() + cleanup_trigger_defer_period;
837 needs_cleanup_triggered = false;
838 // Set the request flag false and return its old value.
839 // Needs to be atomic to avoid dropping a concurrent request.
840 // Can't use Atomic::xchg, which may not support bool.
841 return Atomic::cmpxchg(&needs_cleanup_requested, true, false);
842 }
959 total_size += blocks.block_count() * Block::allocation_size();
960 total_size += blocks.size() * sizeof(Block*);
961 return total_size;
962 }
963
964 // Parallel iteration support
965
966 uint OopStorage::BasicParState::default_estimated_thread_count(bool concurrent) {
967 uint configured = concurrent ? ConcGCThreads : ParallelGCThreads;
968 return MAX2(1u, configured); // Never estimate zero threads.
969 }
970
971 OopStorage::BasicParState::BasicParState(const OopStorage* storage,
972 uint estimated_thread_count,
973 bool concurrent) :
974 _storage(storage),
975 _active_array(_storage->obtain_active_array()),
976 _block_count(0), // initialized properly below
977 _next_block(0),
978 _estimated_thread_count(estimated_thread_count),
979 _concurrent(concurrent),
980 _num_dead(0)
981 {
982 assert(estimated_thread_count > 0, "estimated thread count must be positive");
983 update_concurrent_iteration_count(1);
984 // Get the block count *after* iteration state updated, so concurrent
985 // empty block deletion is suppressed and can't reduce the count. But
986 // ensure the count we use was written after the block with that count
987 // was fully initialized; see ActiveArray::push.
988 _block_count = _active_array->block_count_acquire();
989 }
990
991 OopStorage::BasicParState::~BasicParState() {
992 _storage->relinquish_block_array(_active_array);
993 update_concurrent_iteration_count(-1);
994 if (_concurrent) {
995 // We may have deferred some cleanup work.
996 const_cast<OopStorage*>(_storage)->record_needs_cleanup();
997 }
998 }
999
1000 void OopStorage::BasicParState::update_concurrent_iteration_count(int value) {
|