14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/logStream.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "runtime/thread.inline.hpp"
29 #include "runtime/threadSMR.inline.hpp"
30 #include "services/threadService.hpp"
31 #include "utilities/globalDefinitions.hpp"
32 #include "utilities/resourceHash.hpp"
33
34 Monitor* ThreadsSMRSupport::_smr_delete_lock =
35 new Monitor(Monitor::special, "smr_delete_lock",
36 false /* allow_vm_block */,
37 Monitor::_safepoint_check_never);
38 // The '_cnt', '_max' and '_times" fields are enabled via
39 // -XX:+EnableThreadSMRStatistics:
40
41 // # of parallel threads in _smr_delete_lock->wait().
42 // Impl note: Hard to imagine > 64K waiting threads so this could be 16-bit,
43 // but there is no nice 16-bit _FORMAT support.
44 uint ThreadsSMRSupport::_smr_delete_lock_wait_cnt = 0;
45
46 // Max # of parallel threads in _smr_delete_lock->wait().
47 // Impl note: See _smr_delete_lock_wait_cnt note.
48 uint ThreadsSMRSupport::_smr_delete_lock_wait_max = 0;
49
50 // Flag to indicate when an _smr_delete_lock->notify() is needed.
51 // Impl note: See _smr_delete_lock_wait_cnt note.
52 volatile uint ThreadsSMRSupport::_smr_delete_notify = 0;
53
54 // # of threads deleted over VM lifetime.
55 // Impl note: Atomically incremented over VM lifetime so use unsigned for more
56 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
57 // isn't available everywhere (or is it?).
58 volatile uint ThreadsSMRSupport::_smr_deleted_thread_cnt = 0;
59
60 // Max time in millis to delete a thread.
61 // Impl note: 16-bit might be too small on an overloaded machine. Use
62 // unsigned since this is a time value. Set via Atomic::cmpxchg() in a
63 // loop for correctness.
64 volatile uint ThreadsSMRSupport::_smr_deleted_thread_time_max = 0;
65
66 // Cumulative time in millis to delete threads.
67 // Impl note: Atomically added to over VM lifetime so use unsigned for more
68 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
69 // isn't available everywhere (or is it?).
70 volatile uint ThreadsSMRSupport::_smr_deleted_thread_times = 0;
71
72 ThreadsList* volatile ThreadsSMRSupport::_smr_java_thread_list = new ThreadsList(0);
73
74 // # of ThreadsLists allocated over VM lifetime.
75 // Impl note: We allocate a new ThreadsList for every thread create and
76 // every thread delete so we need a bigger type than the
77 // _smr_deleted_thread_cnt field.
78 uint64_t ThreadsSMRSupport::_smr_java_thread_list_alloc_cnt = 1;
79
80 // # of ThreadsLists freed over VM lifetime.
81 // Impl note: See _smr_java_thread_list_alloc_cnt note.
82 uint64_t ThreadsSMRSupport::_smr_java_thread_list_free_cnt = 0;
83
84 // Max size ThreadsList allocated.
85 // Impl note: Max # of threads alive at one time should fit in unsigned 32-bit.
86 uint ThreadsSMRSupport::_smr_java_thread_list_max = 0;
87
88 // Max # of nested ThreadsLists for a thread.
89 // Impl note: Hard to imagine > 64K nested ThreadsLists so this could be
90 // 16-bit, but there is no nice 16-bit _FORMAT support.
91 uint ThreadsSMRSupport::_smr_nested_thread_list_max = 0;
92
93 // # of ThreadsListHandles deleted over VM lifetime.
94 // Impl note: Atomically incremented over VM lifetime so use unsigned for
95 // more range. There will be fewer ThreadsListHandles than threads so
96 // unsigned 32-bit should be fine.
97 volatile uint ThreadsSMRSupport::_smr_tlh_cnt = 0;
98
99 // Max time in millis to delete a ThreadsListHandle.
100 // Impl note: 16-bit might be too small on an overloaded machine. Use
101 // unsigned since this is a time value. Set via Atomic::cmpxchg() in a
102 // loop for correctness.
103 volatile uint ThreadsSMRSupport::_smr_tlh_time_max = 0;
104
105 // Cumulative time in millis to delete ThreadsListHandles.
106 // Impl note: Atomically added to over VM lifetime so use unsigned for more
107 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
108 // isn't available everywhere (or is it?).
109 volatile uint ThreadsSMRSupport::_smr_tlh_times = 0;
110
111 ThreadsList* ThreadsSMRSupport::_smr_to_delete_list = NULL;
112
113 // # of parallel ThreadsLists on the to-delete list.
114 // Impl note: Hard to imagine > 64K ThreadsLists needing to be deleted so
115 // this could be 16-bit, but there is no nice 16-bit _FORMAT support.
116 uint ThreadsSMRSupport::_smr_to_delete_list_cnt = 0;
117
118 // Max # of parallel ThreadsLists on the to-delete list.
119 // Impl note: See _smr_to_delete_list_cnt note.
120 uint ThreadsSMRSupport::_smr_to_delete_list_max = 0;
121
122
123 // 'inline' functions first so the definitions are before first use:
124
125 inline void ThreadsSMRSupport::add_smr_deleted_thread_times(uint add_value) {
126 Atomic::add(add_value, &_smr_deleted_thread_times);
127 }
128
129 inline void ThreadsSMRSupport::inc_smr_deleted_thread_cnt() {
130 Atomic::inc(&_smr_deleted_thread_cnt);
131 }
132
133 inline void ThreadsSMRSupport::inc_smr_java_thread_list_alloc_cnt() {
134 _smr_java_thread_list_alloc_cnt++;
135 }
136
137 inline void ThreadsSMRSupport::update_smr_deleted_thread_time_max(uint new_value) {
138 while (true) {
139 uint cur_value = _smr_deleted_thread_time_max;
140 if (new_value <= cur_value) {
141 // No need to update max value so we're done.
142 break;
143 }
144 if (Atomic::cmpxchg(new_value, &_smr_deleted_thread_time_max, cur_value) == cur_value) {
145 // Updated max value so we're done. Otherwise try it all again.
146 break;
147 }
148 }
149 }
150
151 inline void ThreadsSMRSupport::update_smr_java_thread_list_max(uint new_value) {
152 if (new_value > _smr_java_thread_list_max) {
153 _smr_java_thread_list_max = new_value;
154 }
155 }
156
157 inline ThreadsList* ThreadsSMRSupport::xchg_smr_java_thread_list(ThreadsList* new_list) {
158 return (ThreadsList*)Atomic::xchg(new_list, &_smr_java_thread_list);
159 }
160
161
162 // Hash table of pointers found by a scan. Used for collecting hazard
163 // pointers (ThreadsList references). Also used for collecting JavaThreads
164 // that are indirectly referenced by hazard ptrs. An instance of this
165 // class only contains one type of pointer.
166 //
167 class ThreadScanHashtable : public CHeapObj<mtThread> {
168 private:
169 static bool ptr_equals(void * const& s1, void * const& s2) {
170 return s1 == s2;
171 }
172
173 static unsigned int ptr_hash(void * const& s1) {
174 // 2654435761 = 2^32 * Phi (golden ratio)
175 return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
176 }
177
178 int _table_size;
251 assert(thread->get_nested_threads_hazard_ptr() == NULL,
252 "cannot have a nested hazard ptr with a NULL regular hazard ptr");
253 return;
254 }
255
256 // If the hazard ptr is verified as stable (since it is not tagged),
257 // then it is safe to use.
258 if (!Thread::is_hazard_ptr_tagged(current_list)) break;
259
260 // The hazard ptr is tagged as not yet verified as being stable
261 // so we are racing with acquire_stable_list(). This exchange
262 // attempts to invalidate the hazard ptr. If we win the race,
263 // then we can ignore this unstable hazard ptr and the other
264 // thread will retry the attempt to publish a stable hazard ptr.
265 // If we lose the race, then we retry our attempt to look at the
266 // hazard ptr.
267 if (thread->cmpxchg_threads_hazard_ptr(NULL, current_list) == current_list) return;
268 }
269
270 // The current JavaThread has a hazard ptr (ThreadsList reference)
271 // which might be _smr_java_thread_list or it might be an older
272 // ThreadsList that has been removed but not freed. In either case,
273 // the hazard ptr is protecting all the JavaThreads on that
274 // ThreadsList.
275 AddThreadHazardPointerThreadClosure add_cl(_table);
276 current_list->threads_do(&add_cl);
277
278 // Any NestedThreadsLists are also protecting JavaThreads so
279 // gather those also; the ThreadsLists may be different.
280 for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
281 node != NULL; node = node->next()) {
282 node->t_list()->threads_do(&add_cl);
283 }
284 }
285 };
286
287 // Closure to gather hazard ptrs (ThreadsList references) into a hash table.
288 //
289 class ScanHazardPtrGatherThreadsListClosure : public ThreadClosure {
290 private:
291 ThreadScanHashtable *_table;
330 class ScanHazardPtrPrintMatchingThreadsClosure : public ThreadClosure {
331 private:
332 JavaThread *_thread;
333 public:
334 ScanHazardPtrPrintMatchingThreadsClosure(JavaThread *thread) : _thread(thread) {}
335
336 virtual void do_thread(Thread *thread) {
337 assert_locked_or_safepoint(Threads_lock);
338
339 if (thread == NULL) return;
340 ThreadsList *current_list = thread->get_threads_hazard_ptr();
341 if (current_list == NULL) {
342 assert(thread->get_nested_threads_hazard_ptr() == NULL,
343 "cannot have a nested hazard ptr with a NULL regular hazard ptr");
344 return;
345 }
346 // If the hazard ptr is unverified, then ignore it.
347 if (Thread::is_hazard_ptr_tagged(current_list)) return;
348
349 // The current JavaThread has a hazard ptr (ThreadsList reference)
350 // which might be _smr_java_thread_list or it might be an older
351 // ThreadsList that has been removed but not freed. In either case,
352 // the hazard ptr is protecting all the JavaThreads on that
353 // ThreadsList, but we only care about matching a specific JavaThread.
354 JavaThreadIterator jti(current_list);
355 for (JavaThread *p = jti.first(); p != NULL; p = jti.next()) {
356 if (p == _thread) {
357 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread1=" INTPTR_FORMAT " has a hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread));
358 break;
359 }
360 }
361
362 // Any NestedThreadsLists are also protecting JavaThreads so
363 // check those also; the ThreadsLists may be different.
364 for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
365 node != NULL; node = node->next()) {
366 JavaThreadIterator jti(node->t_list());
367 for (JavaThread *p = jti.first(); p != NULL; p = jti.next()) {
368 if (p == _thread) {
369 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread1=" INTPTR_FORMAT " has a nested hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread));
370 return;
459 }
460 if (tail_length > 0) {
461 Copy::disjoint_words((HeapWord*)list->_threads + index + 1, (HeapWord*)new_list->_threads + index, tail_length);
462 }
463
464 return new_list;
465 }
466
467 ThreadsListHandle::ThreadsListHandle(Thread *self) : _list(ThreadsSMRSupport::acquire_stable_list(self, /* is_ThreadsListSetter */ false)), _self(self) {
468 assert(self == Thread::current(), "sanity check");
469 if (EnableThreadSMRStatistics) {
470 _timer.start();
471 }
472 }
473
474 ThreadsListHandle::~ThreadsListHandle() {
475 ThreadsSMRSupport::release_stable_list(_self);
476 if (EnableThreadSMRStatistics) {
477 _timer.stop();
478 uint millis = (uint)_timer.milliseconds();
479 ThreadsSMRSupport::update_smr_tlh_stats(millis);
480 }
481 }
482
483 // Convert an internal thread reference to a JavaThread found on the
484 // associated ThreadsList. This ThreadsListHandle "protects" the
485 // returned JavaThread *.
486 //
487 // If thread_oop_p is not NULL, then the caller wants to use the oop
488 // after this call so the oop is returned. On success, *jt_pp is set
489 // to the converted JavaThread * and true is returned. On error,
490 // returns false.
491 //
492 bool ThreadsListHandle::cv_internal_thread_to_JavaThread(jobject jthread,
493 JavaThread ** jt_pp,
494 oop * thread_oop_p) {
495 assert(this->list() != NULL, "must have a ThreadsList");
496 assert(jt_pp != NULL, "must have a return JavaThread pointer");
497 // thread_oop_p is optional so no assert()
498
499 // The JVM_* interfaces don't allow a NULL thread parameter; JVM/TI
557 if (self->get_threads_hazard_ptr() == NULL) {
558 // The typical case is first.
559 return acquire_stable_list_fast_path(self);
560 }
561
562 // The nested case is rare.
563 return acquire_stable_list_nested_path(self);
564 }
565
566 // Fast path (and lock free) way to acquire a stable ThreadsList.
567 //
568 ThreadsList *ThreadsSMRSupport::acquire_stable_list_fast_path(Thread *self) {
569 assert(self != NULL, "sanity check");
570 assert(self->get_threads_hazard_ptr() == NULL, "sanity check");
571 assert(self->get_nested_threads_hazard_ptr() == NULL,
572 "cannot have a nested hazard ptr with a NULL regular hazard ptr");
573
574 ThreadsList* threads;
575
576 // Stable recording of a hazard ptr for SMR. This code does not use
577 // locks so its use of the _smr_java_thread_list & _threads_hazard_ptr
578 // fields is racy relative to code that uses those fields with locks.
579 // OrderAccess and Atomic functions are used to deal with those races.
580 //
581 while (true) {
582 threads = get_smr_java_thread_list();
583
584 // Publish a tagged hazard ptr to denote that the hazard ptr is not
585 // yet verified as being stable. Due to the fence after the hazard
586 // ptr write, it will be sequentially consistent w.r.t. the
587 // sequentially consistent writes of the ThreadsList, even on
588 // non-multiple copy atomic machines where stores can be observed
589 // in different order from different observer threads.
590 ThreadsList* unverified_threads = Thread::tag_hazard_ptr(threads);
591 self->set_threads_hazard_ptr(unverified_threads);
592
593 // If _smr_java_thread_list has changed, we have lost a race with
594 // Threads::add() or Threads::remove() and have to try again.
595 if (get_smr_java_thread_list() != threads) {
596 continue;
597 }
598
599 // We try to remove the tag which will verify the hazard ptr as
600 // being stable. This exchange can race with a scanning thread
601 // which might invalidate the tagged hazard ptr to keep it from
602 // being followed to access JavaThread ptrs. If we lose the race,
603 // we simply retry. If we win the race, then the stable hazard
604 // ptr is officially published.
605 if (self->cmpxchg_threads_hazard_ptr(threads, unverified_threads) == unverified_threads) {
606 break;
607 }
608 }
609
610 // A stable hazard ptr has been published letting other threads know
611 // that the ThreadsList and the JavaThreads reachable from this list
612 // are protected and hence they should not be deleted until everyone
613 // agrees it is safe to do so.
614
615 return threads;
617
618 // Acquire a nested stable ThreadsList; this is rare so it uses
619 // Threads_lock.
620 //
621 ThreadsList *ThreadsSMRSupport::acquire_stable_list_nested_path(Thread *self) {
622 assert(self != NULL, "sanity check");
623 assert(self->get_threads_hazard_ptr() != NULL,
624 "cannot have a NULL regular hazard ptr when acquiring a nested hazard ptr");
625
626 // The thread already has a hazard ptr (ThreadsList ref) so we need
627 // to create a nested ThreadsListHandle with the current ThreadsList
628 // since it might be different than our current hazard ptr. The need
629 // for a nested ThreadsListHandle is rare so we do this while holding
630 // the Threads_lock so we don't race with the scanning code; the code
631 // is so much simpler this way.
632
633 NestedThreadsList* node;
634 {
635 // Only grab the Threads_lock if we don't already own it.
636 MutexLockerEx ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
637 node = new NestedThreadsList(get_smr_java_thread_list());
638 // We insert at the front of the list to match up with the delete
639 // in release_stable_list().
640 node->set_next(self->get_nested_threads_hazard_ptr());
641 self->set_nested_threads_hazard_ptr(node);
642 if (EnableThreadSMRStatistics) {
643 self->inc_nested_threads_hazard_ptr_cnt();
644 if (self->nested_threads_hazard_ptr_cnt() > _smr_nested_thread_list_max) {
645 _smr_nested_thread_list_max = self->nested_threads_hazard_ptr_cnt();
646 }
647 }
648 }
649 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::acquire_stable_list: add NestedThreadsList node containing ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(node->t_list()));
650
651 return node->t_list();
652 }
653
654 void ThreadsSMRSupport::add_thread(JavaThread *thread){
655 ThreadsList *new_list = ThreadsList::add_thread(ThreadsSMRSupport::get_smr_java_thread_list(), thread);
656 if (EnableThreadSMRStatistics) {
657 ThreadsSMRSupport::inc_smr_java_thread_list_alloc_cnt();
658 ThreadsSMRSupport::update_smr_java_thread_list_max(new_list->length());
659 }
660 // Initial _smr_java_thread_list will not generate a "Threads::add" mesg.
661 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::add: new ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(new_list));
662
663 ThreadsList *old_list = ThreadsSMRSupport::xchg_smr_java_thread_list(new_list);
664 ThreadsSMRSupport::smr_free_list(old_list);
665 }
666
667 // set_smr_delete_notify() and clear_smr_delete_notify() are called
668 // under the protection of the smr_delete_lock, but we also use an
669 // Atomic operation to ensure the memory update is seen earlier than
670 // when the smr_delete_lock is dropped.
671 //
672 void ThreadsSMRSupport::clear_smr_delete_notify() {
673 Atomic::dec(&_smr_delete_notify);
674 }
675
676 // Return true if the specified JavaThread is protected by a hazard
677 // pointer (ThreadsList reference). Otherwise, returns false.
678 //
679 bool ThreadsSMRSupport::is_a_protected_JavaThread(JavaThread *thread) {
680 assert_locked_or_safepoint(Threads_lock);
681
682 // Hash table size should be first power of two higher than twice
683 // the length of the Threads list.
684 int hash_table_size = MIN2((int)get_smr_java_thread_list()->length(), 32) << 1;
685 hash_table_size--;
686 hash_table_size |= hash_table_size >> 1;
687 hash_table_size |= hash_table_size >> 2;
688 hash_table_size |= hash_table_size >> 4;
689 hash_table_size |= hash_table_size >> 8;
690 hash_table_size |= hash_table_size >> 16;
691 hash_table_size++;
692
693 // Gather a hash table of the JavaThreads indirectly referenced by
694 // hazard ptrs.
695 ThreadScanHashtable *scan_table = new ThreadScanHashtable(hash_table_size);
696 ScanHazardPtrGatherProtectedThreadsClosure scan_cl(scan_table);
697 Threads::threads_do(&scan_cl);
698
699 bool thread_is_protected = false;
700 if (scan_table->has_entry((void*)thread)) {
701 thread_is_protected = true;
702 }
703 delete scan_table;
704 return thread_is_protected;
719 }
720
721 // The nested case is rare.
722 release_stable_list_nested_path(self);
723 }
724
725 // Fast path way to release a stable ThreadsList. The release portion
726 // is lock-free, but the wake up portion is not.
727 //
728 void ThreadsSMRSupport::release_stable_list_fast_path(Thread *self) {
729 assert(self != NULL, "sanity check");
730 assert(self->get_threads_hazard_ptr() != NULL, "sanity check");
731 assert(self->get_nested_threads_hazard_ptr() == NULL,
732 "cannot have a nested hazard ptr when releasing a regular hazard ptr");
733
734 // After releasing the hazard ptr, other threads may go ahead and
735 // free up some memory temporarily used by a ThreadsList snapshot.
736 self->set_threads_hazard_ptr(NULL);
737
738 // We use double-check locking to reduce traffic on the system
739 // wide smr_delete_lock.
740 if (ThreadsSMRSupport::smr_delete_notify()) {
741 // An exiting thread might be waiting in smr_delete(); we need to
742 // check with smr_delete_lock to be sure.
743 release_stable_list_wake_up((char *) "regular hazard ptr");
744 }
745 }
746
747 // Release a nested stable ThreadsList; this is rare so it uses
748 // Threads_lock.
749 //
750 void ThreadsSMRSupport::release_stable_list_nested_path(Thread *self) {
751 assert(self != NULL, "sanity check");
752 assert(self->get_nested_threads_hazard_ptr() != NULL, "sanity check");
753 assert(self->get_threads_hazard_ptr() != NULL,
754 "must have a regular hazard ptr to have nested hazard ptrs");
755
756 // We have a nested ThreadsListHandle so we have to release it first.
757 // The need for a nested ThreadsListHandle is rare so we do this while
758 // holding the Threads_lock so we don't race with the scanning code;
759 // the code is so much simpler this way.
760
761 NestedThreadsList *node;
762 {
763 // Only grab the Threads_lock if we don't already own it.
764 MutexLockerEx ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
765 // We remove from the front of the list to match up with the insert
766 // in acquire_stable_list().
767 node = self->get_nested_threads_hazard_ptr();
768 self->set_nested_threads_hazard_ptr(node->next());
769 if (EnableThreadSMRStatistics) {
770 self->dec_nested_threads_hazard_ptr_cnt();
771 }
772 }
773
774 // An exiting thread might be waiting in smr_delete(); we need to
775 // check with smr_delete_lock to be sure.
776 release_stable_list_wake_up((char *) "nested hazard ptr");
777
778 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::release_stable_list: delete NestedThreadsList node containing ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(node->t_list()));
779
780 delete node;
781 }
782
783 // Wake up portion of the release stable ThreadsList protocol;
784 // uses the smr_delete_lock().
785 //
786 void ThreadsSMRSupport::release_stable_list_wake_up(char *log_str) {
787 assert(log_str != NULL, "sanity check");
788
789 // Note: smr_delete_lock is held in smr_delete() for the entire
790 // hazard ptr search so that we do not lose this notify() if
791 // the exiting thread has to wait. That code path also holds
792 // Threads_lock (which was grabbed before smr_delete_lock) so that
793 // threads_do() can be called. This means the system can't start a
794 // safepoint which means this thread can't take too long to get to
795 // a safepoint because of being blocked on smr_delete_lock.
796 //
797 MonitorLockerEx ml(ThreadsSMRSupport::smr_delete_lock(), Monitor::_no_safepoint_check_flag);
798 if (ThreadsSMRSupport::smr_delete_notify()) {
799 // Notify any exiting JavaThreads that are waiting in smr_delete()
800 // that we've released a ThreadsList.
801 ml.notify_all();
802 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::release_stable_list notified %s", os::current_thread_id(), log_str);
803 }
804 }
805
806 void ThreadsSMRSupport::remove_thread(JavaThread *thread) {
807 ThreadsList *new_list = ThreadsList::remove_thread(ThreadsSMRSupport::get_smr_java_thread_list(), thread);
808 if (EnableThreadSMRStatistics) {
809 ThreadsSMRSupport::inc_smr_java_thread_list_alloc_cnt();
810 // This list is smaller so no need to check for a "longest" update.
811 }
812
813 // Final _smr_java_thread_list will not generate a "Threads::remove" mesg.
814 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::remove: new ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(new_list));
815
816 ThreadsList *old_list = ThreadsSMRSupport::xchg_smr_java_thread_list(new_list);
817 ThreadsSMRSupport::smr_free_list(old_list);
818 }
819
820 // See note for clear_smr_delete_notify().
821 //
822 void ThreadsSMRSupport::set_smr_delete_notify() {
823 Atomic::inc(&_smr_delete_notify);
824 }
825
826 // Safely delete a JavaThread when it is no longer in use by a
827 // ThreadsListHandle.
828 //
829 void ThreadsSMRSupport::smr_delete(JavaThread *thread) {
830 assert(!Threads_lock->owned_by_self(), "sanity");
831
832 bool has_logged_once = false;
833 elapsedTimer timer;
834 if (EnableThreadSMRStatistics) {
835 timer.start();
836 }
837
838 while (true) {
839 {
840 // No safepoint check because this JavaThread is not on the
841 // Threads list.
842 MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
843 // Cannot use a MonitorLockerEx helper here because we have
844 // to drop the Threads_lock first if we wait.
845 ThreadsSMRSupport::smr_delete_lock()->lock_without_safepoint_check();
846 // Set the smr_delete_notify flag after we grab smr_delete_lock
847 // and before we scan hazard ptrs because we're doing
848 // double-check locking in release_stable_list().
849 ThreadsSMRSupport::set_smr_delete_notify();
850
851 if (!is_a_protected_JavaThread(thread)) {
852 // This is the common case.
853 ThreadsSMRSupport::clear_smr_delete_notify();
854 ThreadsSMRSupport::smr_delete_lock()->unlock();
855 break;
856 }
857 if (!has_logged_once) {
858 has_logged_once = true;
859 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread=" INTPTR_FORMAT " is not deleted.", os::current_thread_id(), p2i(thread));
860 if (log_is_enabled(Debug, os, thread)) {
861 ScanHazardPtrPrintMatchingThreadsClosure scan_cl(thread);
862 Threads::threads_do(&scan_cl);
863 }
864 }
865 } // We have to drop the Threads_lock to wait or delete the thread
866
867 if (EnableThreadSMRStatistics) {
868 _smr_delete_lock_wait_cnt++;
869 if (_smr_delete_lock_wait_cnt > _smr_delete_lock_wait_max) {
870 _smr_delete_lock_wait_max = _smr_delete_lock_wait_cnt;
871 }
872 }
873 // Wait for a release_stable_list() call before we check again. No
874 // safepoint check, no timeout, and not as suspend equivalent flag
875 // because this JavaThread is not on the Threads list.
876 ThreadsSMRSupport::smr_delete_lock()->wait(Mutex::_no_safepoint_check_flag, 0,
877 !Mutex::_as_suspend_equivalent_flag);
878 if (EnableThreadSMRStatistics) {
879 _smr_delete_lock_wait_cnt--;
880 }
881
882 ThreadsSMRSupport::clear_smr_delete_notify();
883 ThreadsSMRSupport::smr_delete_lock()->unlock();
884 // Retry the whole scenario.
885 }
886
887 if (ThreadLocalHandshakes) {
888 // The thread is about to be deleted so cancel any handshake.
889 thread->cancel_handshake();
890 }
891
892 delete thread;
893 if (EnableThreadSMRStatistics) {
894 timer.stop();
895 uint millis = (uint)timer.milliseconds();
896 ThreadsSMRSupport::inc_smr_deleted_thread_cnt();
897 ThreadsSMRSupport::add_smr_deleted_thread_times(millis);
898 ThreadsSMRSupport::update_smr_deleted_thread_time_max(millis);
899 }
900
901 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread=" INTPTR_FORMAT " is deleted.", os::current_thread_id(), p2i(thread));
902 }
903
904 bool ThreadsSMRSupport::smr_delete_notify() {
905 // Use load_acquire() in order to see any updates to _smr_delete_notify
906 // earlier than when smr_delete_lock is grabbed.
907 return (OrderAccess::load_acquire(&_smr_delete_notify) != 0);
908 }
909
910 // Safely free a ThreadsList after a Threads::add() or Threads::remove().
911 // The specified ThreadsList may not get deleted during this call if it
912 // is still in-use (referenced by a hazard ptr). Other ThreadsLists
913 // in the chain may get deleted by this call if they are no longer in-use.
914 void ThreadsSMRSupport::smr_free_list(ThreadsList* threads) {
915 assert_locked_or_safepoint(Threads_lock);
916
917 threads->set_next_list(_smr_to_delete_list);
918 _smr_to_delete_list = threads;
919 if (EnableThreadSMRStatistics) {
920 _smr_to_delete_list_cnt++;
921 if (_smr_to_delete_list_cnt > _smr_to_delete_list_max) {
922 _smr_to_delete_list_max = _smr_to_delete_list_cnt;
923 }
924 }
925
926 // Hash table size should be first power of two higher than twice the length of the ThreadsList
927 int hash_table_size = MIN2((int)get_smr_java_thread_list()->length(), 32) << 1;
928 hash_table_size--;
929 hash_table_size |= hash_table_size >> 1;
930 hash_table_size |= hash_table_size >> 2;
931 hash_table_size |= hash_table_size >> 4;
932 hash_table_size |= hash_table_size >> 8;
933 hash_table_size |= hash_table_size >> 16;
934 hash_table_size++;
935
936 // Gather a hash table of the current hazard ptrs:
937 ThreadScanHashtable *scan_table = new ThreadScanHashtable(hash_table_size);
938 ScanHazardPtrGatherThreadsListClosure scan_cl(scan_table);
939 Threads::threads_do(&scan_cl);
940
941 // Walk through the linked list of pending freeable ThreadsLists
942 // and free the ones that are not referenced from hazard ptrs.
943 ThreadsList* current = _smr_to_delete_list;
944 ThreadsList* prev = NULL;
945 ThreadsList* next = NULL;
946 bool threads_is_freed = false;
947 while (current != NULL) {
948 next = current->next_list();
949 if (!scan_table->has_entry((void*)current)) {
950 // This ThreadsList is not referenced by a hazard ptr.
951 if (prev != NULL) {
952 prev->set_next_list(next);
953 }
954 if (_smr_to_delete_list == current) {
955 _smr_to_delete_list = next;
956 }
957
958 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_free_list: threads=" INTPTR_FORMAT " is freed.", os::current_thread_id(), p2i(current));
959 if (current == threads) threads_is_freed = true;
960 delete current;
961 if (EnableThreadSMRStatistics) {
962 _smr_java_thread_list_free_cnt++;
963 _smr_to_delete_list_cnt--;
964 }
965 } else {
966 prev = current;
967 }
968 current = next;
969 }
970
971 if (!threads_is_freed) {
972 // Only report "is not freed" on the original call to
973 // smr_free_list() for this ThreadsList.
974 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_free_list: threads=" INTPTR_FORMAT " is not freed.", os::current_thread_id(), p2i(threads));
975 }
976
977 delete scan_table;
978 }
979
980
981 // Debug, logging, and printing stuff at the end:
982
983 // Log Threads class SMR info.
984 void ThreadsSMRSupport::log_smr_statistics() {
985 LogTarget(Info, thread, smr) log;
986 if (log.is_enabled()) {
987 LogStream out(log);
988 print_smr_info_on(&out);
989 }
990 }
991
992 // Print Threads class SMR info.
993 void ThreadsSMRSupport::print_smr_info_on(outputStream* st) {
994 // Only grab the Threads_lock if we don't already own it
995 // and if we are not reporting an error.
996 MutexLockerEx ml((Threads_lock->owned_by_self() || VMError::is_error_reported()) ? NULL : Threads_lock);
997
998 st->print_cr("Threads class SMR info:");
999 st->print_cr("_smr_java_thread_list=" INTPTR_FORMAT ", length=%u, "
1000 "elements={", p2i(_smr_java_thread_list),
1001 _smr_java_thread_list->length());
1002 print_smr_info_elements_on(st, _smr_java_thread_list);
1003 st->print_cr("}");
1004 if (_smr_to_delete_list != NULL) {
1005 st->print_cr("_smr_to_delete_list=" INTPTR_FORMAT ", length=%u, "
1006 "elements={", p2i(_smr_to_delete_list),
1007 _smr_to_delete_list->length());
1008 print_smr_info_elements_on(st, _smr_to_delete_list);
1009 st->print_cr("}");
1010 for (ThreadsList *t_list = _smr_to_delete_list->next_list();
1011 t_list != NULL; t_list = t_list->next_list()) {
1012 st->print("next-> " INTPTR_FORMAT ", length=%u, "
1013 "elements={", p2i(t_list), t_list->length());
1014 print_smr_info_elements_on(st, t_list);
1015 st->print_cr("}");
1016 }
1017 }
1018 if (!EnableThreadSMRStatistics) {
1019 return;
1020 }
1021 st->print_cr("_smr_java_thread_list_alloc_cnt=" UINT64_FORMAT ","
1022 "_smr_java_thread_list_free_cnt=" UINT64_FORMAT ","
1023 "_smr_java_thread_list_max=%u, "
1024 "_smr_nested_thread_list_max=%u",
1025 _smr_java_thread_list_alloc_cnt,
1026 _smr_java_thread_list_free_cnt,
1027 _smr_java_thread_list_max,
1028 _smr_nested_thread_list_max);
1029 if (_smr_tlh_cnt > 0) {
1030 st->print_cr("_smr_tlh_cnt=%u"
1031 ", _smr_tlh_times=%u"
1032 ", avg_smr_tlh_time=%0.2f"
1033 ", _smr_tlh_time_max=%u",
1034 _smr_tlh_cnt, _smr_tlh_times,
1035 ((double) _smr_tlh_times / _smr_tlh_cnt),
1036 _smr_tlh_time_max);
1037 }
1038 if (_smr_deleted_thread_cnt > 0) {
1039 st->print_cr("_smr_deleted_thread_cnt=%u"
1040 ", _smr_deleted_thread_times=%u"
1041 ", avg_smr_deleted_thread_time=%0.2f"
1042 ", _smr_deleted_thread_time_max=%u",
1043 _smr_deleted_thread_cnt, _smr_deleted_thread_times,
1044 ((double) _smr_deleted_thread_times / _smr_deleted_thread_cnt),
1045 _smr_deleted_thread_time_max);
1046 }
1047 st->print_cr("_smr_delete_lock_wait_cnt=%u, _smr_delete_lock_wait_max=%u",
1048 _smr_delete_lock_wait_cnt, _smr_delete_lock_wait_max);
1049 st->print_cr("_smr_to_delete_list_cnt=%u, _smr_to_delete_list_max=%u",
1050 _smr_to_delete_list_cnt, _smr_to_delete_list_max);
1051 }
1052
1053 // Print ThreadsList elements (4 per line).
1054 void ThreadsSMRSupport::print_smr_info_elements_on(outputStream* st,
1055 ThreadsList* t_list) {
1056 uint cnt = 0;
1057 JavaThreadIterator jti(t_list);
1058 for (JavaThread *jt = jti.first(); jt != NULL; jt = jti.next()) {
1059 st->print(INTPTR_FORMAT, p2i(jt));
1060 if (cnt < t_list->length() - 1) {
1061 // Separate with comma or comma-space except for the last one.
1062 if (((cnt + 1) % 4) == 0) {
1063 // Four INTPTR_FORMAT fit on an 80 column line so end the
1064 // current line with just a comma.
1065 st->print_cr(",");
1066 } else {
1067 // Not the last one on the current line so use comma-space:
1068 st->print(", ");
1069 }
1070 } else {
1071 // Last one so just end the current line.
1072 st->cr();
1073 }
1074 cnt++;
1075 }
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/logStream.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "runtime/thread.inline.hpp"
29 #include "runtime/threadSMR.inline.hpp"
30 #include "services/threadService.hpp"
31 #include "utilities/globalDefinitions.hpp"
32 #include "utilities/resourceHash.hpp"
33
34 Monitor* ThreadsSMRSupport::_delete_lock =
35 new Monitor(Monitor::special, "Thread_SMR_delete_lock",
36 false /* allow_vm_block */,
37 Monitor::_safepoint_check_never);
38 // The '_cnt', '_max' and '_times" fields are enabled via
39 // -XX:+EnableThreadSMRStatistics:
40
41 // # of parallel threads in _delete_lock->wait().
42 // Impl note: Hard to imagine > 64K waiting threads so this could be 16-bit,
43 // but there is no nice 16-bit _FORMAT support.
44 uint ThreadsSMRSupport::_delete_lock_wait_cnt = 0;
45
46 // Max # of parallel threads in _delete_lock->wait().
47 // Impl note: See _delete_lock_wait_cnt note.
48 uint ThreadsSMRSupport::_delete_lock_wait_max = 0;
49
50 // Flag to indicate when an _delete_lock->notify() is needed.
51 // Impl note: See _delete_lock_wait_cnt note.
52 volatile uint ThreadsSMRSupport::_delete_notify = 0;
53
54 // # of threads deleted over VM lifetime.
55 // Impl note: Atomically incremented over VM lifetime so use unsigned for more
56 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
57 // isn't available everywhere (or is it?).
58 volatile uint ThreadsSMRSupport::_deleted_thread_cnt = 0;
59
60 // Max time in millis to delete a thread.
61 // Impl note: 16-bit might be too small on an overloaded machine. Use
62 // unsigned since this is a time value. Set via Atomic::cmpxchg() in a
63 // loop for correctness.
64 volatile uint ThreadsSMRSupport::_deleted_thread_time_max = 0;
65
66 // Cumulative time in millis to delete threads.
67 // Impl note: Atomically added to over VM lifetime so use unsigned for more
68 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
69 // isn't available everywhere (or is it?).
70 volatile uint ThreadsSMRSupport::_deleted_thread_times = 0;
71
72 ThreadsList* volatile ThreadsSMRSupport::_java_thread_list = new ThreadsList(0);
73
74 // # of ThreadsLists allocated over VM lifetime.
75 // Impl note: We allocate a new ThreadsList for every thread create and
76 // every thread delete so we need a bigger type than the
77 // _deleted_thread_cnt field.
78 uint64_t ThreadsSMRSupport::_java_thread_list_alloc_cnt = 1;
79
80 // # of ThreadsLists freed over VM lifetime.
81 // Impl note: See _java_thread_list_alloc_cnt note.
82 uint64_t ThreadsSMRSupport::_java_thread_list_free_cnt = 0;
83
84 // Max size ThreadsList allocated.
85 // Impl note: Max # of threads alive at one time should fit in unsigned 32-bit.
86 uint ThreadsSMRSupport::_java_thread_list_max = 0;
87
88 // Max # of nested ThreadsLists for a thread.
89 // Impl note: Hard to imagine > 64K nested ThreadsLists so this could be
90 // 16-bit, but there is no nice 16-bit _FORMAT support.
91 uint ThreadsSMRSupport::_nested_thread_list_max = 0;
92
93 // # of ThreadsListHandles deleted over VM lifetime.
94 // Impl note: Atomically incremented over VM lifetime so use unsigned for
95 // more range. There will be fewer ThreadsListHandles than threads so
96 // unsigned 32-bit should be fine.
97 volatile uint ThreadsSMRSupport::_tlh_cnt = 0;
98
99 // Max time in millis to delete a ThreadsListHandle.
100 // Impl note: 16-bit might be too small on an overloaded machine. Use
101 // unsigned since this is a time value. Set via Atomic::cmpxchg() in a
102 // loop for correctness.
103 volatile uint ThreadsSMRSupport::_tlh_time_max = 0;
104
105 // Cumulative time in millis to delete ThreadsListHandles.
106 // Impl note: Atomically added to over VM lifetime so use unsigned for more
107 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
108 // isn't available everywhere (or is it?).
109 volatile uint ThreadsSMRSupport::_tlh_times = 0;
110
111 ThreadsList* ThreadsSMRSupport::_to_delete_list = NULL;
112
113 // # of parallel ThreadsLists on the to-delete list.
114 // Impl note: Hard to imagine > 64K ThreadsLists needing to be deleted so
115 // this could be 16-bit, but there is no nice 16-bit _FORMAT support.
116 uint ThreadsSMRSupport::_to_delete_list_cnt = 0;
117
118 // Max # of parallel ThreadsLists on the to-delete list.
119 // Impl note: See _to_delete_list_cnt note.
120 uint ThreadsSMRSupport::_to_delete_list_max = 0;
121
122
123 // 'inline' functions first so the definitions are before first use:
124
125 inline void ThreadsSMRSupport::add_deleted_thread_times(uint add_value) {
126 Atomic::add(add_value, &_deleted_thread_times);
127 }
128
129 inline void ThreadsSMRSupport::inc_deleted_thread_cnt() {
130 Atomic::inc(&_deleted_thread_cnt);
131 }
132
133 inline void ThreadsSMRSupport::inc_java_thread_list_alloc_cnt() {
134 _java_thread_list_alloc_cnt++;
135 }
136
137 inline void ThreadsSMRSupport::update_deleted_thread_time_max(uint new_value) {
138 while (true) {
139 uint cur_value = _deleted_thread_time_max;
140 if (new_value <= cur_value) {
141 // No need to update max value so we're done.
142 break;
143 }
144 if (Atomic::cmpxchg(new_value, &_deleted_thread_time_max, cur_value) == cur_value) {
145 // Updated max value so we're done. Otherwise try it all again.
146 break;
147 }
148 }
149 }
150
151 inline void ThreadsSMRSupport::update_java_thread_list_max(uint new_value) {
152 if (new_value > _java_thread_list_max) {
153 _java_thread_list_max = new_value;
154 }
155 }
156
157 inline ThreadsList* ThreadsSMRSupport::xchg_java_thread_list(ThreadsList* new_list) {
158 return (ThreadsList*)Atomic::xchg(new_list, &_java_thread_list);
159 }
160
161
162 // Hash table of pointers found by a scan. Used for collecting hazard
163 // pointers (ThreadsList references). Also used for collecting JavaThreads
164 // that are indirectly referenced by hazard ptrs. An instance of this
165 // class only contains one type of pointer.
166 //
167 class ThreadScanHashtable : public CHeapObj<mtThread> {
168 private:
169 static bool ptr_equals(void * const& s1, void * const& s2) {
170 return s1 == s2;
171 }
172
173 static unsigned int ptr_hash(void * const& s1) {
174 // 2654435761 = 2^32 * Phi (golden ratio)
175 return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
176 }
177
178 int _table_size;
251 assert(thread->get_nested_threads_hazard_ptr() == NULL,
252 "cannot have a nested hazard ptr with a NULL regular hazard ptr");
253 return;
254 }
255
256 // If the hazard ptr is verified as stable (since it is not tagged),
257 // then it is safe to use.
258 if (!Thread::is_hazard_ptr_tagged(current_list)) break;
259
260 // The hazard ptr is tagged as not yet verified as being stable
261 // so we are racing with acquire_stable_list(). This exchange
262 // attempts to invalidate the hazard ptr. If we win the race,
263 // then we can ignore this unstable hazard ptr and the other
264 // thread will retry the attempt to publish a stable hazard ptr.
265 // If we lose the race, then we retry our attempt to look at the
266 // hazard ptr.
267 if (thread->cmpxchg_threads_hazard_ptr(NULL, current_list) == current_list) return;
268 }
269
270 // The current JavaThread has a hazard ptr (ThreadsList reference)
271 // which might be _java_thread_list or it might be an older
272 // ThreadsList that has been removed but not freed. In either case,
273 // the hazard ptr is protecting all the JavaThreads on that
274 // ThreadsList.
275 AddThreadHazardPointerThreadClosure add_cl(_table);
276 current_list->threads_do(&add_cl);
277
278 // Any NestedThreadsLists are also protecting JavaThreads so
279 // gather those also; the ThreadsLists may be different.
280 for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
281 node != NULL; node = node->next()) {
282 node->t_list()->threads_do(&add_cl);
283 }
284 }
285 };
286
287 // Closure to gather hazard ptrs (ThreadsList references) into a hash table.
288 //
289 class ScanHazardPtrGatherThreadsListClosure : public ThreadClosure {
290 private:
291 ThreadScanHashtable *_table;
330 class ScanHazardPtrPrintMatchingThreadsClosure : public ThreadClosure {
331 private:
332 JavaThread *_thread;
333 public:
334 ScanHazardPtrPrintMatchingThreadsClosure(JavaThread *thread) : _thread(thread) {}
335
336 virtual void do_thread(Thread *thread) {
337 assert_locked_or_safepoint(Threads_lock);
338
339 if (thread == NULL) return;
340 ThreadsList *current_list = thread->get_threads_hazard_ptr();
341 if (current_list == NULL) {
342 assert(thread->get_nested_threads_hazard_ptr() == NULL,
343 "cannot have a nested hazard ptr with a NULL regular hazard ptr");
344 return;
345 }
346 // If the hazard ptr is unverified, then ignore it.
347 if (Thread::is_hazard_ptr_tagged(current_list)) return;
348
349 // The current JavaThread has a hazard ptr (ThreadsList reference)
350 // which might be _java_thread_list or it might be an older
351 // ThreadsList that has been removed but not freed. In either case,
352 // the hazard ptr is protecting all the JavaThreads on that
353 // ThreadsList, but we only care about matching a specific JavaThread.
354 JavaThreadIterator jti(current_list);
355 for (JavaThread *p = jti.first(); p != NULL; p = jti.next()) {
356 if (p == _thread) {
357 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread1=" INTPTR_FORMAT " has a hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread));
358 break;
359 }
360 }
361
362 // Any NestedThreadsLists are also protecting JavaThreads so
363 // check those also; the ThreadsLists may be different.
364 for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
365 node != NULL; node = node->next()) {
366 JavaThreadIterator jti(node->t_list());
367 for (JavaThread *p = jti.first(); p != NULL; p = jti.next()) {
368 if (p == _thread) {
369 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread1=" INTPTR_FORMAT " has a nested hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread));
370 return;
459 }
460 if (tail_length > 0) {
461 Copy::disjoint_words((HeapWord*)list->_threads + index + 1, (HeapWord*)new_list->_threads + index, tail_length);
462 }
463
464 return new_list;
465 }
466
467 ThreadsListHandle::ThreadsListHandle(Thread *self) : _list(ThreadsSMRSupport::acquire_stable_list(self, /* is_ThreadsListSetter */ false)), _self(self) {
468 assert(self == Thread::current(), "sanity check");
469 if (EnableThreadSMRStatistics) {
470 _timer.start();
471 }
472 }
473
474 ThreadsListHandle::~ThreadsListHandle() {
475 ThreadsSMRSupport::release_stable_list(_self);
476 if (EnableThreadSMRStatistics) {
477 _timer.stop();
478 uint millis = (uint)_timer.milliseconds();
479 ThreadsSMRSupport::update_tlh_stats(millis);
480 }
481 }
482
483 // Convert an internal thread reference to a JavaThread found on the
484 // associated ThreadsList. This ThreadsListHandle "protects" the
485 // returned JavaThread *.
486 //
487 // If thread_oop_p is not NULL, then the caller wants to use the oop
488 // after this call so the oop is returned. On success, *jt_pp is set
489 // to the converted JavaThread * and true is returned. On error,
490 // returns false.
491 //
492 bool ThreadsListHandle::cv_internal_thread_to_JavaThread(jobject jthread,
493 JavaThread ** jt_pp,
494 oop * thread_oop_p) {
495 assert(this->list() != NULL, "must have a ThreadsList");
496 assert(jt_pp != NULL, "must have a return JavaThread pointer");
497 // thread_oop_p is optional so no assert()
498
499 // The JVM_* interfaces don't allow a NULL thread parameter; JVM/TI
557 if (self->get_threads_hazard_ptr() == NULL) {
558 // The typical case is first.
559 return acquire_stable_list_fast_path(self);
560 }
561
562 // The nested case is rare.
563 return acquire_stable_list_nested_path(self);
564 }
565
566 // Fast path (and lock free) way to acquire a stable ThreadsList.
567 //
568 ThreadsList *ThreadsSMRSupport::acquire_stable_list_fast_path(Thread *self) {
569 assert(self != NULL, "sanity check");
570 assert(self->get_threads_hazard_ptr() == NULL, "sanity check");
571 assert(self->get_nested_threads_hazard_ptr() == NULL,
572 "cannot have a nested hazard ptr with a NULL regular hazard ptr");
573
574 ThreadsList* threads;
575
576 // Stable recording of a hazard ptr for SMR. This code does not use
577 // locks so its use of the _java_thread_list & _threads_hazard_ptr
578 // fields is racy relative to code that uses those fields with locks.
579 // OrderAccess and Atomic functions are used to deal with those races.
580 //
581 while (true) {
582 threads = get_java_thread_list();
583
584 // Publish a tagged hazard ptr to denote that the hazard ptr is not
585 // yet verified as being stable. Due to the fence after the hazard
586 // ptr write, it will be sequentially consistent w.r.t. the
587 // sequentially consistent writes of the ThreadsList, even on
588 // non-multiple copy atomic machines where stores can be observed
589 // in different order from different observer threads.
590 ThreadsList* unverified_threads = Thread::tag_hazard_ptr(threads);
591 self->set_threads_hazard_ptr(unverified_threads);
592
593 // If _java_thread_list has changed, we have lost a race with
594 // Threads::add() or Threads::remove() and have to try again.
595 if (get_java_thread_list() != threads) {
596 continue;
597 }
598
599 // We try to remove the tag which will verify the hazard ptr as
600 // being stable. This exchange can race with a scanning thread
601 // which might invalidate the tagged hazard ptr to keep it from
602 // being followed to access JavaThread ptrs. If we lose the race,
603 // we simply retry. If we win the race, then the stable hazard
604 // ptr is officially published.
605 if (self->cmpxchg_threads_hazard_ptr(threads, unverified_threads) == unverified_threads) {
606 break;
607 }
608 }
609
610 // A stable hazard ptr has been published letting other threads know
611 // that the ThreadsList and the JavaThreads reachable from this list
612 // are protected and hence they should not be deleted until everyone
613 // agrees it is safe to do so.
614
615 return threads;
617
618 // Acquire a nested stable ThreadsList; this is rare so it uses
619 // Threads_lock.
620 //
621 ThreadsList *ThreadsSMRSupport::acquire_stable_list_nested_path(Thread *self) {
622 assert(self != NULL, "sanity check");
623 assert(self->get_threads_hazard_ptr() != NULL,
624 "cannot have a NULL regular hazard ptr when acquiring a nested hazard ptr");
625
626 // The thread already has a hazard ptr (ThreadsList ref) so we need
627 // to create a nested ThreadsListHandle with the current ThreadsList
628 // since it might be different than our current hazard ptr. The need
629 // for a nested ThreadsListHandle is rare so we do this while holding
630 // the Threads_lock so we don't race with the scanning code; the code
631 // is so much simpler this way.
632
633 NestedThreadsList* node;
634 {
635 // Only grab the Threads_lock if we don't already own it.
636 MutexLockerEx ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
637 node = new NestedThreadsList(get_java_thread_list());
638 // We insert at the front of the list to match up with the delete
639 // in release_stable_list().
640 node->set_next(self->get_nested_threads_hazard_ptr());
641 self->set_nested_threads_hazard_ptr(node);
642 if (EnableThreadSMRStatistics) {
643 self->inc_nested_threads_hazard_ptr_cnt();
644 if (self->nested_threads_hazard_ptr_cnt() > _nested_thread_list_max) {
645 _nested_thread_list_max = self->nested_threads_hazard_ptr_cnt();
646 }
647 }
648 }
649 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::acquire_stable_list: add NestedThreadsList node containing ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(node->t_list()));
650
651 return node->t_list();
652 }
653
654 void ThreadsSMRSupport::add_thread(JavaThread *thread){
655 ThreadsList *new_list = ThreadsList::add_thread(ThreadsSMRSupport::get_java_thread_list(), thread);
656 if (EnableThreadSMRStatistics) {
657 ThreadsSMRSupport::inc_java_thread_list_alloc_cnt();
658 ThreadsSMRSupport::update_java_thread_list_max(new_list->length());
659 }
660 // Initial _java_thread_list will not generate a "Threads::add" mesg.
661 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::add: new ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(new_list));
662
663 ThreadsList *old_list = ThreadsSMRSupport::xchg_java_thread_list(new_list);
664 ThreadsSMRSupport::free_list(old_list);
665 }
666
667 // set_delete_notify() and clear_delete_notify() are called
668 // under the protection of the delete_lock, but we also use an
669 // Atomic operation to ensure the memory update is seen earlier than
670 // when the delete_lock is dropped.
671 //
672 void ThreadsSMRSupport::clear_delete_notify() {
673 Atomic::dec(&_delete_notify);
674 }
675
676 bool ThreadsSMRSupport::delete_notify() {
677 // Use load_acquire() in order to see any updates to _delete_notify
678 // earlier than when delete_lock is grabbed.
679 return (OrderAccess::load_acquire(&_delete_notify) != 0);
680 }
681
682 // Safely free a ThreadsList after a Threads::add() or Threads::remove().
683 // The specified ThreadsList may not get deleted during this call if it
684 // is still in-use (referenced by a hazard ptr). Other ThreadsLists
685 // in the chain may get deleted by this call if they are no longer in-use.
686 void ThreadsSMRSupport::free_list(ThreadsList* threads) {
687 assert_locked_or_safepoint(Threads_lock);
688
689 threads->set_next_list(_to_delete_list);
690 _to_delete_list = threads;
691 if (EnableThreadSMRStatistics) {
692 _to_delete_list_cnt++;
693 if (_to_delete_list_cnt > _to_delete_list_max) {
694 _to_delete_list_max = _to_delete_list_cnt;
695 }
696 }
697
698 // Hash table size should be first power of two higher than twice the length of the ThreadsList
699 int hash_table_size = MIN2((int)get_java_thread_list()->length(), 32) << 1;
700 hash_table_size--;
701 hash_table_size |= hash_table_size >> 1;
702 hash_table_size |= hash_table_size >> 2;
703 hash_table_size |= hash_table_size >> 4;
704 hash_table_size |= hash_table_size >> 8;
705 hash_table_size |= hash_table_size >> 16;
706 hash_table_size++;
707
708 // Gather a hash table of the current hazard ptrs:
709 ThreadScanHashtable *scan_table = new ThreadScanHashtable(hash_table_size);
710 ScanHazardPtrGatherThreadsListClosure scan_cl(scan_table);
711 Threads::threads_do(&scan_cl);
712
713 // Walk through the linked list of pending freeable ThreadsLists
714 // and free the ones that are not referenced from hazard ptrs.
715 ThreadsList* current = _to_delete_list;
716 ThreadsList* prev = NULL;
717 ThreadsList* next = NULL;
718 bool threads_is_freed = false;
719 while (current != NULL) {
720 next = current->next_list();
721 if (!scan_table->has_entry((void*)current)) {
722 // This ThreadsList is not referenced by a hazard ptr.
723 if (prev != NULL) {
724 prev->set_next_list(next);
725 }
726 if (_to_delete_list == current) {
727 _to_delete_list = next;
728 }
729
730 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::free_list: threads=" INTPTR_FORMAT " is freed.", os::current_thread_id(), p2i(current));
731 if (current == threads) threads_is_freed = true;
732 delete current;
733 if (EnableThreadSMRStatistics) {
734 _java_thread_list_free_cnt++;
735 _to_delete_list_cnt--;
736 }
737 } else {
738 prev = current;
739 }
740 current = next;
741 }
742
743 if (!threads_is_freed) {
744 // Only report "is not freed" on the original call to
745 // free_list() for this ThreadsList.
746 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::free_list: threads=" INTPTR_FORMAT " is not freed.", os::current_thread_id(), p2i(threads));
747 }
748
749 delete scan_table;
750 }
751
752 // Return true if the specified JavaThread is protected by a hazard
753 // pointer (ThreadsList reference). Otherwise, returns false.
754 //
755 bool ThreadsSMRSupport::is_a_protected_JavaThread(JavaThread *thread) {
756 assert_locked_or_safepoint(Threads_lock);
757
758 // Hash table size should be first power of two higher than twice
759 // the length of the Threads list.
760 int hash_table_size = MIN2((int)get_java_thread_list()->length(), 32) << 1;
761 hash_table_size--;
762 hash_table_size |= hash_table_size >> 1;
763 hash_table_size |= hash_table_size >> 2;
764 hash_table_size |= hash_table_size >> 4;
765 hash_table_size |= hash_table_size >> 8;
766 hash_table_size |= hash_table_size >> 16;
767 hash_table_size++;
768
769 // Gather a hash table of the JavaThreads indirectly referenced by
770 // hazard ptrs.
771 ThreadScanHashtable *scan_table = new ThreadScanHashtable(hash_table_size);
772 ScanHazardPtrGatherProtectedThreadsClosure scan_cl(scan_table);
773 Threads::threads_do(&scan_cl);
774
775 bool thread_is_protected = false;
776 if (scan_table->has_entry((void*)thread)) {
777 thread_is_protected = true;
778 }
779 delete scan_table;
780 return thread_is_protected;
795 }
796
797 // The nested case is rare.
798 release_stable_list_nested_path(self);
799 }
800
801 // Fast path way to release a stable ThreadsList. The release portion
802 // is lock-free, but the wake up portion is not.
803 //
804 void ThreadsSMRSupport::release_stable_list_fast_path(Thread *self) {
805 assert(self != NULL, "sanity check");
806 assert(self->get_threads_hazard_ptr() != NULL, "sanity check");
807 assert(self->get_nested_threads_hazard_ptr() == NULL,
808 "cannot have a nested hazard ptr when releasing a regular hazard ptr");
809
810 // After releasing the hazard ptr, other threads may go ahead and
811 // free up some memory temporarily used by a ThreadsList snapshot.
812 self->set_threads_hazard_ptr(NULL);
813
814 // We use double-check locking to reduce traffic on the system
815 // wide Thread-SMR delete_lock.
816 if (ThreadsSMRSupport::delete_notify()) {
817 // An exiting thread might be waiting in smr_delete(); we need to
818 // check with delete_lock to be sure.
819 release_stable_list_wake_up((char *) "regular hazard ptr");
820 }
821 }
822
823 // Release a nested stable ThreadsList; this is rare so it uses
824 // Threads_lock.
825 //
826 void ThreadsSMRSupport::release_stable_list_nested_path(Thread *self) {
827 assert(self != NULL, "sanity check");
828 assert(self->get_nested_threads_hazard_ptr() != NULL, "sanity check");
829 assert(self->get_threads_hazard_ptr() != NULL,
830 "must have a regular hazard ptr to have nested hazard ptrs");
831
832 // We have a nested ThreadsListHandle so we have to release it first.
833 // The need for a nested ThreadsListHandle is rare so we do this while
834 // holding the Threads_lock so we don't race with the scanning code;
835 // the code is so much simpler this way.
836
837 NestedThreadsList *node;
838 {
839 // Only grab the Threads_lock if we don't already own it.
840 MutexLockerEx ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
841 // We remove from the front of the list to match up with the insert
842 // in acquire_stable_list().
843 node = self->get_nested_threads_hazard_ptr();
844 self->set_nested_threads_hazard_ptr(node->next());
845 if (EnableThreadSMRStatistics) {
846 self->dec_nested_threads_hazard_ptr_cnt();
847 }
848 }
849
850 // An exiting thread might be waiting in smr_delete(); we need to
851 // check with delete_lock to be sure.
852 release_stable_list_wake_up((char *) "nested hazard ptr");
853
854 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::release_stable_list: delete NestedThreadsList node containing ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(node->t_list()));
855
856 delete node;
857 }
858
859 // Wake up portion of the release stable ThreadsList protocol;
860 // uses the delete_lock().
861 //
862 void ThreadsSMRSupport::release_stable_list_wake_up(char *log_str) {
863 assert(log_str != NULL, "sanity check");
864
865 // Note: delete_lock is held in smr_delete() for the entire
866 // hazard ptr search so that we do not lose this notify() if
867 // the exiting thread has to wait. That code path also holds
868 // Threads_lock (which was grabbed before delete_lock) so that
869 // threads_do() can be called. This means the system can't start a
870 // safepoint which means this thread can't take too long to get to
871 // a safepoint because of being blocked on delete_lock.
872 //
873 MonitorLockerEx ml(ThreadsSMRSupport::delete_lock(), Monitor::_no_safepoint_check_flag);
874 if (ThreadsSMRSupport::delete_notify()) {
875 // Notify any exiting JavaThreads that are waiting in smr_delete()
876 // that we've released a ThreadsList.
877 ml.notify_all();
878 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::release_stable_list notified %s", os::current_thread_id(), log_str);
879 }
880 }
881
882 void ThreadsSMRSupport::remove_thread(JavaThread *thread) {
883 ThreadsList *new_list = ThreadsList::remove_thread(ThreadsSMRSupport::get_java_thread_list(), thread);
884 if (EnableThreadSMRStatistics) {
885 ThreadsSMRSupport::inc_java_thread_list_alloc_cnt();
886 // This list is smaller so no need to check for a "longest" update.
887 }
888
889 // Final _java_thread_list will not generate a "Threads::remove" mesg.
890 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::remove: new ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(new_list));
891
892 ThreadsList *old_list = ThreadsSMRSupport::xchg_java_thread_list(new_list);
893 ThreadsSMRSupport::free_list(old_list);
894 }
895
896 // See note for clear_delete_notify().
897 //
898 void ThreadsSMRSupport::set_delete_notify() {
899 Atomic::inc(&_delete_notify);
900 }
901
902 // Safely delete a JavaThread when it is no longer in use by a
903 // ThreadsListHandle.
904 //
905 void ThreadsSMRSupport::smr_delete(JavaThread *thread) {
906 assert(!Threads_lock->owned_by_self(), "sanity");
907
908 bool has_logged_once = false;
909 elapsedTimer timer;
910 if (EnableThreadSMRStatistics) {
911 timer.start();
912 }
913
914 while (true) {
915 {
916 // No safepoint check because this JavaThread is not on the
917 // Threads list.
918 MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
919 // Cannot use a MonitorLockerEx helper here because we have
920 // to drop the Threads_lock first if we wait.
921 ThreadsSMRSupport::delete_lock()->lock_without_safepoint_check();
922 // Set the delete_notify flag after we grab delete_lock
923 // and before we scan hazard ptrs because we're doing
924 // double-check locking in release_stable_list().
925 ThreadsSMRSupport::set_delete_notify();
926
927 if (!is_a_protected_JavaThread(thread)) {
928 // This is the common case.
929 ThreadsSMRSupport::clear_delete_notify();
930 ThreadsSMRSupport::delete_lock()->unlock();
931 break;
932 }
933 if (!has_logged_once) {
934 has_logged_once = true;
935 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread=" INTPTR_FORMAT " is not deleted.", os::current_thread_id(), p2i(thread));
936 if (log_is_enabled(Debug, os, thread)) {
937 ScanHazardPtrPrintMatchingThreadsClosure scan_cl(thread);
938 Threads::threads_do(&scan_cl);
939 }
940 }
941 } // We have to drop the Threads_lock to wait or delete the thread
942
943 if (EnableThreadSMRStatistics) {
944 _delete_lock_wait_cnt++;
945 if (_delete_lock_wait_cnt > _delete_lock_wait_max) {
946 _delete_lock_wait_max = _delete_lock_wait_cnt;
947 }
948 }
949 // Wait for a release_stable_list() call before we check again. No
950 // safepoint check, no timeout, and not as suspend equivalent flag
951 // because this JavaThread is not on the Threads list.
952 ThreadsSMRSupport::delete_lock()->wait(Mutex::_no_safepoint_check_flag, 0,
953 !Mutex::_as_suspend_equivalent_flag);
954 if (EnableThreadSMRStatistics) {
955 _delete_lock_wait_cnt--;
956 }
957
958 ThreadsSMRSupport::clear_delete_notify();
959 ThreadsSMRSupport::delete_lock()->unlock();
960 // Retry the whole scenario.
961 }
962
963 if (ThreadLocalHandshakes) {
964 // The thread is about to be deleted so cancel any handshake.
965 thread->cancel_handshake();
966 }
967
968 delete thread;
969 if (EnableThreadSMRStatistics) {
970 timer.stop();
971 uint millis = (uint)timer.milliseconds();
972 ThreadsSMRSupport::inc_deleted_thread_cnt();
973 ThreadsSMRSupport::add_deleted_thread_times(millis);
974 ThreadsSMRSupport::update_deleted_thread_time_max(millis);
975 }
976
977 log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread=" INTPTR_FORMAT " is deleted.", os::current_thread_id(), p2i(thread));
978 }
979
980
981 // Debug, logging, and printing stuff at the end:
982
983 // Log Threads class SMR info.
984 void ThreadsSMRSupport::log_statistics() {
985 LogTarget(Info, thread, smr) log;
986 if (log.is_enabled()) {
987 LogStream out(log);
988 print_info_on(&out);
989 }
990 }
991
992 // Print Threads class SMR info.
993 void ThreadsSMRSupport::print_info_on(outputStream* st) {
994 // Only grab the Threads_lock if we don't already own it
995 // and if we are not reporting an error.
996 MutexLockerEx ml((Threads_lock->owned_by_self() || VMError::is_error_reported()) ? NULL : Threads_lock);
997
998 st->print_cr("Threads class SMR info:");
999 st->print_cr("_java_thread_list=" INTPTR_FORMAT ", length=%u, "
1000 "elements={", p2i(_java_thread_list),
1001 _java_thread_list->length());
1002 print_info_elements_on(st, _java_thread_list);
1003 st->print_cr("}");
1004 if (_to_delete_list != NULL) {
1005 st->print_cr("_to_delete_list=" INTPTR_FORMAT ", length=%u, "
1006 "elements={", p2i(_to_delete_list),
1007 _to_delete_list->length());
1008 print_info_elements_on(st, _to_delete_list);
1009 st->print_cr("}");
1010 for (ThreadsList *t_list = _to_delete_list->next_list();
1011 t_list != NULL; t_list = t_list->next_list()) {
1012 st->print("next-> " INTPTR_FORMAT ", length=%u, "
1013 "elements={", p2i(t_list), t_list->length());
1014 print_info_elements_on(st, t_list);
1015 st->print_cr("}");
1016 }
1017 }
1018 if (!EnableThreadSMRStatistics) {
1019 return;
1020 }
1021 st->print_cr("_java_thread_list_alloc_cnt=" UINT64_FORMAT ","
1022 "_java_thread_list_free_cnt=" UINT64_FORMAT ","
1023 "_java_thread_list_max=%u, "
1024 "_nested_thread_list_max=%u",
1025 _java_thread_list_alloc_cnt,
1026 _java_thread_list_free_cnt,
1027 _java_thread_list_max,
1028 _nested_thread_list_max);
1029 if (_tlh_cnt > 0) {
1030 st->print_cr("_tlh_cnt=%u"
1031 ", _tlh_times=%u"
1032 ", avg_tlh_time=%0.2f"
1033 ", _tlh_time_max=%u",
1034 _tlh_cnt, _tlh_times,
1035 ((double) _tlh_times / _tlh_cnt),
1036 _tlh_time_max);
1037 }
1038 if (_deleted_thread_cnt > 0) {
1039 st->print_cr("_deleted_thread_cnt=%u"
1040 ", _deleted_thread_times=%u"
1041 ", avg_deleted_thread_time=%0.2f"
1042 ", _deleted_thread_time_max=%u",
1043 _deleted_thread_cnt, _deleted_thread_times,
1044 ((double) _deleted_thread_times / _deleted_thread_cnt),
1045 _deleted_thread_time_max);
1046 }
1047 st->print_cr("_delete_lock_wait_cnt=%u, _delete_lock_wait_max=%u",
1048 _delete_lock_wait_cnt, _delete_lock_wait_max);
1049 st->print_cr("_to_delete_list_cnt=%u, _to_delete_list_max=%u",
1050 _to_delete_list_cnt, _to_delete_list_max);
1051 }
1052
1053 // Print ThreadsList elements (4 per line).
1054 void ThreadsSMRSupport::print_info_elements_on(outputStream* st, ThreadsList* t_list) {
1055 uint cnt = 0;
1056 JavaThreadIterator jti(t_list);
1057 for (JavaThread *jt = jti.first(); jt != NULL; jt = jti.next()) {
1058 st->print(INTPTR_FORMAT, p2i(jt));
1059 if (cnt < t_list->length() - 1) {
1060 // Separate with comma or comma-space except for the last one.
1061 if (((cnt + 1) % 4) == 0) {
1062 // Four INTPTR_FORMAT fit on an 80 column line so end the
1063 // current line with just a comma.
1064 st->print_cr(",");
1065 } else {
1066 // Not the last one on the current line so use comma-space:
1067 st->print(", ");
1068 }
1069 } else {
1070 // Last one so just end the current line.
1071 st->cr();
1072 }
1073 cnt++;
1074 }
|