762 ThreadsList *old_list = xchg_java_thread_list(new_list);
763 free_list(old_list);
764 if (ThreadIdTable::is_initialized()) {
765 jlong tid = SharedRuntime::get_java_tid(thread);
766 ThreadIdTable::add_thread(tid, thread);
767 }
768 }
769
770 // set_delete_notify() and clear_delete_notify() are called
771 // under the protection of the delete_lock, but we also use an
772 // Atomic operation to ensure the memory update is seen earlier than
773 // when the delete_lock is dropped.
774 //
775 void ThreadsSMRSupport::clear_delete_notify() {
776 Atomic::dec(&_delete_notify);
777 }
778
779 bool ThreadsSMRSupport::delete_notify() {
780 // Use load_acquire() in order to see any updates to _delete_notify
781 // earlier than when delete_lock is grabbed.
782 return (OrderAccess::load_acquire(&_delete_notify) != 0);
783 }
784
785 // Safely free a ThreadsList after a Threads::add() or Threads::remove().
786 // The specified ThreadsList may not get deleted during this call if it
787 // is still in-use (referenced by a hazard ptr). Other ThreadsLists
788 // in the chain may get deleted by this call if they are no longer in-use.
789 void ThreadsSMRSupport::free_list(ThreadsList* threads) {
790 assert_locked_or_safepoint(Threads_lock);
791
792 if (is_bootstrap_list(threads)) {
793 // The bootstrap list cannot be freed and is empty so
794 // it does not need to be scanned. Nothing to do here.
795 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::free_list: bootstrap ThreadsList=" INTPTR_FORMAT " is no longer in use.", os::current_thread_id(), p2i(threads));
796 return;
797 }
798
799 threads->set_next_list(_to_delete_list);
800 _to_delete_list = threads;
801 if (EnableThreadSMRStatistics) {
802 _to_delete_list_cnt++;
|
762 ThreadsList *old_list = xchg_java_thread_list(new_list);
763 free_list(old_list);
764 if (ThreadIdTable::is_initialized()) {
765 jlong tid = SharedRuntime::get_java_tid(thread);
766 ThreadIdTable::add_thread(tid, thread);
767 }
768 }
769
770 // set_delete_notify() and clear_delete_notify() are called
771 // under the protection of the delete_lock, but we also use an
772 // Atomic operation to ensure the memory update is seen earlier than
773 // when the delete_lock is dropped.
774 //
775 void ThreadsSMRSupport::clear_delete_notify() {
776 Atomic::dec(&_delete_notify);
777 }
778
779 bool ThreadsSMRSupport::delete_notify() {
780 // Use load_acquire() in order to see any updates to _delete_notify
781 // earlier than when delete_lock is grabbed.
782 return (Atomic::load_acquire(&_delete_notify) != 0);
783 }
784
785 // Safely free a ThreadsList after a Threads::add() or Threads::remove().
786 // The specified ThreadsList may not get deleted during this call if it
787 // is still in-use (referenced by a hazard ptr). Other ThreadsLists
788 // in the chain may get deleted by this call if they are no longer in-use.
789 void ThreadsSMRSupport::free_list(ThreadsList* threads) {
790 assert_locked_or_safepoint(Threads_lock);
791
792 if (is_bootstrap_list(threads)) {
793 // The bootstrap list cannot be freed and is empty so
794 // it does not need to be scanned. Nothing to do here.
795 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::free_list: bootstrap ThreadsList=" INTPTR_FORMAT " is no longer in use.", os::current_thread_id(), p2i(threads));
796 return;
797 }
798
799 threads->set_next_list(_to_delete_list);
800 _to_delete_list = threads;
801 if (EnableThreadSMRStatistics) {
802 _to_delete_list_cnt++;
|