90 #include "runtime/timer.hpp"
91 #include "runtime/timerTrace.hpp"
92 #include "runtime/vframe.hpp"
93 #include "runtime/vframeArray.hpp"
94 #include "runtime/vframe_hp.hpp"
95 #include "runtime/vmThread.hpp"
96 #include "runtime/vm_operations.hpp"
97 #include "runtime/vm_version.hpp"
98 #include "services/attachListener.hpp"
99 #include "services/management.hpp"
100 #include "services/memTracker.hpp"
101 #include "services/threadService.hpp"
102 #include "trace/traceMacros.hpp"
103 #include "trace/tracing.hpp"
104 #include "utilities/align.hpp"
105 #include "utilities/defaultStream.hpp"
106 #include "utilities/dtrace.hpp"
107 #include "utilities/events.hpp"
108 #include "utilities/macros.hpp"
109 #include "utilities/preserveException.hpp"
110 #include "utilities/resourceHash.hpp"
111 #include "utilities/vmError.hpp"
112 #if INCLUDE_ALL_GCS
113 #include "gc/cms/concurrentMarkSweepThread.hpp"
114 #include "gc/g1/concurrentMarkThread.inline.hpp"
115 #include "gc/parallel/pcTasks.hpp"
116 #endif // INCLUDE_ALL_GCS
117 #if INCLUDE_JVMCI
118 #include "jvmci/jvmciCompiler.hpp"
119 #include "jvmci/jvmciRuntime.hpp"
120 #include "logging/logHandle.hpp"
121 #endif
122 #ifdef COMPILER1
123 #include "c1/c1_Compiler.hpp"
124 #endif
125 #ifdef COMPILER2
126 #include "opto/c2compiler.hpp"
127 #include "opto/idealGraphPrinter.hpp"
128 #endif
129 #if INCLUDE_RTM_OPT
130 #include "runtime/rtmLocking.hpp"
190 p2i(aligned_addr));
191 }
192 ((Thread*) aligned_addr)->_real_malloc_address = real_malloc_addr;
193 return aligned_addr;
194 } else {
195 return throw_excpt? AllocateHeap(size, flags, CURRENT_PC)
196 : AllocateHeap(size, flags, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
197 }
198 }
199
200 void Thread::operator delete(void* p) {
201 if (UseBiasedLocking) {
202 FreeHeap(((Thread*) p)->_real_malloc_address);
203 } else {
204 FreeHeap(p);
205 }
206 }
207
208 void JavaThread::smr_delete() {
209 if (_on_thread_list) {
210 Threads::smr_delete(this);
211 } else {
212 delete this;
213 }
214 }
215
216 // Base class for all threads: VMThread, WatcherThread, ConcurrentMarkSweepThread,
217 // JavaThread
218
219
220 Thread::Thread() {
221 // stack and get_thread
222 set_stack_base(NULL);
223 set_stack_size(0);
224 set_self_raw_id(0);
225 set_lgrp_id(-1);
226 DEBUG_ONLY(clear_suspendible_thread();)
227
228 // allocated data structures
229 set_osthread(NULL);
230 set_resource_area(new (mtThread)ResourceArea());
402 clear_thread_current();
403 }
404
405 CHECK_UNHANDLED_OOPS_ONLY(if (CheckUnhandledOops) delete unhandled_oops();)
406 }
407
408 // NOTE: dummy function for assertion purpose.
409 void Thread::run() {
410 ShouldNotReachHere();
411 }
412
413 #ifdef ASSERT
414 // A JavaThread is considered "dangling" if it is not the current
415 // thread, has been added the Threads list, the system is not at a
416 // safepoint and the Thread is not "protected".
417 //
418 void Thread::check_for_dangling_thread_pointer(Thread *thread) {
419 assert(!thread->is_Java_thread() || Thread::current() == thread ||
420 !((JavaThread *) thread)->on_thread_list() ||
421 SafepointSynchronize::is_at_safepoint() ||
422 Threads::is_a_protected_JavaThread_with_lock((JavaThread *) thread),
423 "possibility of dangling Thread pointer");
424 }
425 #endif
426
427 ThreadPriority Thread::get_priority(const Thread* const thread) {
428 ThreadPriority priority;
429 // Can return an error!
430 (void)os::get_priority(thread, priority);
431 assert(MinPriority <= priority && priority <= MaxPriority, "non-Java priority found");
432 return priority;
433 }
434
435 void Thread::set_priority(Thread* thread, ThreadPriority priority) {
436 debug_only(check_for_dangling_thread_pointer(thread);)
437 // Can return an error!
438 (void)os::set_priority(thread, priority);
439 }
440
441
442 void Thread::start(Thread* thread) {
3432
3433 // ======= Threads ========
3434
3435 // The Threads class links together all active threads, and provides
3436 // operations over all threads. It is protected by the Threads_lock,
3437 // which is also used in other global contexts like safepointing.
3438 // ThreadsListHandles are used to safely perform operations on one
3439 // or more threads without the risk of the thread exiting during the
3440 // operation.
3441 //
3442 // Note: The Threads_lock is currently more widely used than we
3443 // would like. We are actively migrating Threads_lock uses to other
3444 // mechanisms in order to reduce Threads_lock contention.
3445
3446 JavaThread* Threads::_thread_list = NULL;
3447 int Threads::_number_of_threads = 0;
3448 int Threads::_number_of_non_daemon_threads = 0;
3449 int Threads::_return_code = 0;
3450 int Threads::_thread_claim_parity = 0;
3451 size_t JavaThread::_stack_size_at_create = 0;
3452 // Safe Memory Reclamation (SMR) support:
3453 Monitor* Threads::_smr_delete_lock =
3454 new Monitor(Monitor::special, "smr_delete_lock",
3455 false /* allow_vm_block */,
3456 Monitor::_safepoint_check_never);
3457 // The '_cnt', '_max' and '_times" fields are enabled via
3458 // -XX:+EnableThreadSMRStatistics:
3459
3460 // # of parallel threads in _smr_delete_lock->wait().
3461 // Impl note: Hard to imagine > 64K waiting threads so this could be 16-bit,
3462 // but there is no nice 16-bit _FORMAT support.
3463 uint Threads::_smr_delete_lock_wait_cnt = 0;
3464
3465 // Max # of parallel threads in _smr_delete_lock->wait().
3466 // Impl note: See _smr_delete_lock_wait_cnt note.
3467 uint Threads::_smr_delete_lock_wait_max = 0;
3468
3469 // Flag to indicate when an _smr_delete_lock->notify() is needed.
3470 // Impl note: See _smr_delete_lock_wait_cnt note.
3471 volatile uint Threads::_smr_delete_notify = 0;
3472
3473 // # of threads deleted over VM lifetime.
3474 // Impl note: Atomically incremented over VM lifetime so use unsigned for more
3475 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
3476 // isn't available everywhere (or is it?).
3477 volatile uint Threads::_smr_deleted_thread_cnt = 0;
3478
3479 // Max time in millis to delete a thread.
3480 // Impl note: 16-bit might be too small on an overloaded machine. Use
3481 // unsigned since this is a time value. Set via Atomic::cmpxchg() in a
3482 // loop for correctness.
3483 volatile uint Threads::_smr_deleted_thread_time_max = 0;
3484
3485 // Cumulative time in millis to delete threads.
3486 // Impl note: Atomically added to over VM lifetime so use unsigned for more
3487 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
3488 // isn't available everywhere (or is it?).
3489 volatile uint Threads::_smr_deleted_thread_times = 0;
3490
3491 ThreadsList* volatile Threads::_smr_java_thread_list = new ThreadsList(0);
3492
3493 // # of ThreadsLists allocated over VM lifetime.
3494 // Impl note: We allocate a new ThreadsList for every thread create and
3495 // every thread delete so we need a bigger type than the
3496 // _smr_deleted_thread_cnt field.
3497 uint64_t Threads::_smr_java_thread_list_alloc_cnt = 1;
3498
3499 // # of ThreadsLists freed over VM lifetime.
3500 // Impl note: See _smr_java_thread_list_alloc_cnt note.
3501 uint64_t Threads::_smr_java_thread_list_free_cnt = 0;
3502
3503 // Max size ThreadsList allocated.
3504 // Impl note: Max # of threads alive at one time should fit in unsigned 32-bit.
3505 uint Threads::_smr_java_thread_list_max = 0;
3506
3507 // Max # of nested ThreadsLists for a thread.
3508 // Impl note: Hard to imagine > 64K nested ThreadsLists so this could be
3509 // 16-bit, but there is no nice 16-bit _FORMAT support.
3510 uint Threads::_smr_nested_thread_list_max = 0;
3511
3512 // # of ThreadsListHandles deleted over VM lifetime.
3513 // Impl note: Atomically incremented over VM lifetime so use unsigned for
3514 // more range. There will be fewer ThreadsListHandles than threads so
3515 // unsigned 32-bit should be fine.
3516 volatile uint Threads::_smr_tlh_cnt = 0;
3517
3518 // Max time in millis to delete a ThreadsListHandle.
3519 // Impl note: 16-bit might be too small on an overloaded machine. Use
3520 // unsigned since this is a time value. Set via Atomic::cmpxchg() in a
3521 // loop for correctness.
3522 volatile uint Threads::_smr_tlh_time_max = 0;
3523
3524 // Cumulative time in millis to delete ThreadsListHandles.
3525 // Impl note: Atomically added to over VM lifetime so use unsigned for more
3526 // range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
3527 // isn't available everywhere (or is it?).
3528 volatile uint Threads::_smr_tlh_times = 0;
3529
3530 ThreadsList* Threads::_smr_to_delete_list = NULL;
3531
3532 // # of parallel ThreadsLists on the to-delete list.
3533 // Impl note: Hard to imagine > 64K ThreadsLists needing to be deleted so
3534 // this could be 16-bit, but there is no nice 16-bit _FORMAT support.
3535 uint Threads::_smr_to_delete_list_cnt = 0;
3536
3537 // Max # of parallel ThreadsLists on the to-delete list.
3538 // Impl note: See _smr_to_delete_list_cnt note.
3539 uint Threads::_smr_to_delete_list_max = 0;
3540
3541 #ifdef ASSERT
3542 bool Threads::_vm_complete = false;
3543 #endif
3544
3545 static inline void *prefetch_and_load_ptr(void **addr, intx prefetch_interval) {
3546 Prefetch::read((void*)addr, prefetch_interval);
3547 return *addr;
3548 }
3549
3550 // Possibly the ugliest for loop the world has seen. C++ does not allow
3551 // multiple types in the declaration section of the for loop. In this case
3552 // we are only dealing with pointers and hence can cast them. It looks ugly
3553 // but macros are ugly and therefore it's fine to make things absurdly ugly.
3554 #define DO_JAVA_THREADS(LIST, X) \
3555 for (JavaThread *MACRO_scan_interval = (JavaThread*)(uintptr_t)PrefetchScanIntervalInBytes, \
3556 *MACRO_list = (JavaThread*)(LIST), \
3557 **MACRO_end = ((JavaThread**)((ThreadsList*)MACRO_list)->threads()) + ((ThreadsList*)MACRO_list)->length(), \
3558 **MACRO_current_p = (JavaThread**)((ThreadsList*)MACRO_list)->threads(), \
3559 *X = (JavaThread*)prefetch_and_load_ptr((void**)MACRO_current_p, (intx)MACRO_scan_interval); \
3560 MACRO_current_p != MACRO_end; \
3561 MACRO_current_p++, \
3562 X = (JavaThread*)prefetch_and_load_ptr((void**)MACRO_current_p, (intx)MACRO_scan_interval))
3563
3564 inline ThreadsList* Threads::get_smr_java_thread_list() {
3565 return (ThreadsList*)OrderAccess::load_acquire(&_smr_java_thread_list);
3566 }
3567
3568 // All JavaThreads
3569 #define ALL_JAVA_THREADS(X) DO_JAVA_THREADS(get_smr_java_thread_list(), X)
3570
3571 // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
3572 void Threads::threads_do(ThreadClosure* tc) {
3573 assert_locked_or_safepoint(Threads_lock);
3574 // ALL_JAVA_THREADS iterates through all JavaThreads
3575 ALL_JAVA_THREADS(p) {
3576 tc->do_thread(p);
3577 }
3578 // Someday we could have a table or list of all non-JavaThreads.
3579 // For now, just manually iterate through them.
3580 tc->do_thread(VMThread::vm_thread());
3581 Universe::heap()->gc_threads_do(tc);
3582 WatcherThread *wt = WatcherThread::watcher_thread();
3583 // Strictly speaking, the following NULL check isn't sufficient to make sure
3584 // the data for WatcherThread is still valid upon being examined. However,
3585 // considering that WatchThread terminates when the VM is on the way to
3586 // exit at safepoint, the chance of the above is extremely small. The right
3587 // way to prevent termination of WatcherThread would be to acquire
3588 // Terminator_lock, but we can't do that without violating the lock rank
3589 // checking in some cases.
3650 if (result.get_jint() != JNI_OK) {
3651 vm_exit_during_initialization(); // no message or exception
3652 }
3653
3654 universe_post_module_init();
3655 }
3656
3657 // Phase 3. final setup - set security manager, system class loader and TCCL
3658 //
3659 // This will instantiate and set the security manager, set the system class
3660 // loader as well as the thread context class loader. The security manager
3661 // and system class loader may be a custom class loaded from -Xbootclasspath/a,
3662 // other modules or the application's classpath.
3663 static void call_initPhase3(TRAPS) {
3664 Klass* klass = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(), true, CHECK);
3665 JavaValue result(T_VOID);
3666 JavaCalls::call_static(&result, klass, vmSymbols::initPhase3_name(),
3667 vmSymbols::void_method_signature(), CHECK);
3668 }
3669
3670 // Safe Memory Reclamation (SMR) support:
3671 //
3672
3673 // Acquire a stable ThreadsList.
3674 //
3675 ThreadsList *Threads::acquire_stable_list(Thread *self, bool is_ThreadsListSetter) {
3676 assert(self != NULL, "sanity check");
3677 // acquire_stable_list_nested_path() will grab the Threads_lock
3678 // so let's make sure the ThreadsListHandle is in a safe place.
3679 // ThreadsListSetter cannot make this check on this code path.
3680 debug_only(if (!is_ThreadsListSetter && StrictSafepointChecks) self->check_for_valid_safepoint_state(/* potential_vm_operation */ false);)
3681
3682 if (self->get_threads_hazard_ptr() == NULL) {
3683 // The typical case is first.
3684 return acquire_stable_list_fast_path(self);
3685 }
3686
3687 // The nested case is rare.
3688 return acquire_stable_list_nested_path(self);
3689 }
3690
3691 // Fast path (and lock free) way to acquire a stable ThreadsList.
3692 //
3693 ThreadsList *Threads::acquire_stable_list_fast_path(Thread *self) {
3694 assert(self != NULL, "sanity check");
3695 assert(self->get_threads_hazard_ptr() == NULL, "sanity check");
3696 assert(self->get_nested_threads_hazard_ptr() == NULL,
3697 "cannot have a nested hazard ptr with a NULL regular hazard ptr");
3698
3699 ThreadsList* threads;
3700
3701 // Stable recording of a hazard ptr for SMR. This code does not use
3702 // locks so its use of the _smr_java_thread_list & _threads_hazard_ptr
3703 // fields is racy relative to code that uses those fields with locks.
3704 // OrderAccess and Atomic functions are used to deal with those races.
3705 //
3706 while (true) {
3707 threads = get_smr_java_thread_list();
3708
3709 // Publish a tagged hazard ptr to denote that the hazard ptr is not
3710 // yet verified as being stable. Due to the fence after the hazard
3711 // ptr write, it will be sequentially consistent w.r.t. the
3712 // sequentially consistent writes of the ThreadsList, even on
3713 // non-multiple copy atomic machines where stores can be observed
3714 // in different order from different observer threads.
3715 ThreadsList* unverified_threads = Thread::tag_hazard_ptr(threads);
3716 self->set_threads_hazard_ptr(unverified_threads);
3717
3718 // If _smr_java_thread_list has changed, we have lost a race with
3719 // Threads::add() or Threads::remove() and have to try again.
3720 if (get_smr_java_thread_list() != threads) {
3721 continue;
3722 }
3723
3724 // We try to remove the tag which will verify the hazard ptr as
3725 // being stable. This exchange can race with a scanning thread
3726 // which might invalidate the tagged hazard ptr to keep it from
3727 // being followed to access JavaThread ptrs. If we lose the race,
3728 // we simply retry. If we win the race, then the stable hazard
3729 // ptr is officially published.
3730 if (self->cmpxchg_threads_hazard_ptr(threads, unverified_threads) == unverified_threads) {
3731 break;
3732 }
3733 }
3734
3735 // A stable hazard ptr has been published letting other threads know
3736 // that the ThreadsList and the JavaThreads reachable from this list
3737 // are protected and hence they should not be deleted until everyone
3738 // agrees it is safe to do so.
3739
3740 return threads;
3741 }
3742
3743 // Acquire a nested stable ThreadsList; this is rare so it uses
3744 // Threads_lock.
3745 //
3746 ThreadsList *Threads::acquire_stable_list_nested_path(Thread *self) {
3747 assert(self != NULL, "sanity check");
3748 assert(self->get_threads_hazard_ptr() != NULL,
3749 "cannot have a NULL regular hazard ptr when acquiring a nested hazard ptr");
3750
3751 // The thread already has a hazard ptr (ThreadsList ref) so we need
3752 // to create a nested ThreadsListHandle with the current ThreadsList
3753 // since it might be different than our current hazard ptr. The need
3754 // for a nested ThreadsListHandle is rare so we do this while holding
3755 // the Threads_lock so we don't race with the scanning code; the code
3756 // is so much simpler this way.
3757
3758 NestedThreadsList* node;
3759 {
3760 // Only grab the Threads_lock if we don't already own it.
3761 MutexLockerEx ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
3762 node = new NestedThreadsList(get_smr_java_thread_list());
3763 // We insert at the front of the list to match up with the delete
3764 // in release_stable_list().
3765 node->set_next(self->get_nested_threads_hazard_ptr());
3766 self->set_nested_threads_hazard_ptr(node);
3767 if (EnableThreadSMRStatistics) {
3768 self->inc_nested_threads_hazard_ptr_cnt();
3769 if (self->nested_threads_hazard_ptr_cnt() > _smr_nested_thread_list_max) {
3770 _smr_nested_thread_list_max = self->nested_threads_hazard_ptr_cnt();
3771 }
3772 }
3773 }
3774 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::acquire_stable_list: add NestedThreadsList node containing ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(node->t_list()));
3775
3776 return node->t_list();
3777 }
3778
3779 inline void Threads::add_smr_deleted_thread_times(uint add_value) {
3780 Atomic::add(add_value, &_smr_deleted_thread_times);
3781 }
3782
3783 inline void Threads::inc_smr_deleted_thread_cnt() {
3784 Atomic::inc(&_smr_deleted_thread_cnt);
3785 }
3786
3787 // Release a stable ThreadsList.
3788 //
3789 void Threads::release_stable_list(Thread *self) {
3790 assert(self != NULL, "sanity check");
3791 // release_stable_list_nested_path() will grab the Threads_lock
3792 // so let's make sure the ThreadsListHandle is in a safe place.
3793 debug_only(if (StrictSafepointChecks) self->check_for_valid_safepoint_state(/* potential_vm_operation */ false);)
3794
3795 if (self->get_nested_threads_hazard_ptr() == NULL) {
3796 // The typical case is first.
3797 release_stable_list_fast_path(self);
3798 return;
3799 }
3800
3801 // The nested case is rare.
3802 release_stable_list_nested_path(self);
3803 }
3804
3805 // Fast path way to release a stable ThreadsList. The release portion
3806 // is lock-free, but the wake up portion is not.
3807 //
3808 void Threads::release_stable_list_fast_path(Thread *self) {
3809 assert(self != NULL, "sanity check");
3810 assert(self->get_threads_hazard_ptr() != NULL, "sanity check");
3811 assert(self->get_nested_threads_hazard_ptr() == NULL,
3812 "cannot have a nested hazard ptr when releasing a regular hazard ptr");
3813
3814 // After releasing the hazard ptr, other threads may go ahead and
3815 // free up some memory temporarily used by a ThreadsList snapshot.
3816 self->set_threads_hazard_ptr(NULL);
3817
3818 // We use double-check locking to reduce traffic on the system
3819 // wide smr_delete_lock.
3820 if (Threads::smr_delete_notify()) {
3821 // An exiting thread might be waiting in smr_delete(); we need to
3822 // check with smr_delete_lock to be sure.
3823 release_stable_list_wake_up((char *) "regular hazard ptr");
3824 }
3825 }
3826
3827 // Release a nested stable ThreadsList; this is rare so it uses
3828 // Threads_lock.
3829 //
3830 void Threads::release_stable_list_nested_path(Thread *self) {
3831 assert(self != NULL, "sanity check");
3832 assert(self->get_nested_threads_hazard_ptr() != NULL, "sanity check");
3833 assert(self->get_threads_hazard_ptr() != NULL,
3834 "must have a regular hazard ptr to have nested hazard ptrs");
3835
3836 // We have a nested ThreadsListHandle so we have to release it first.
3837 // The need for a nested ThreadsListHandle is rare so we do this while
3838 // holding the Threads_lock so we don't race with the scanning code;
3839 // the code is so much simpler this way.
3840
3841 NestedThreadsList *node;
3842 {
3843 // Only grab the Threads_lock if we don't already own it.
3844 MutexLockerEx ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
3845 // We remove from the front of the list to match up with the insert
3846 // in acquire_stable_list().
3847 node = self->get_nested_threads_hazard_ptr();
3848 self->set_nested_threads_hazard_ptr(node->next());
3849 if (EnableThreadSMRStatistics) {
3850 self->dec_nested_threads_hazard_ptr_cnt();
3851 }
3852 }
3853
3854 // An exiting thread might be waiting in smr_delete(); we need to
3855 // check with smr_delete_lock to be sure.
3856 release_stable_list_wake_up((char *) "nested hazard ptr");
3857
3858 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::release_stable_list: delete NestedThreadsList node containing ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(node->t_list()));
3859
3860 delete node;
3861 }
3862
3863 // Wake up portion of the release stable ThreadsList protocol;
3864 // uses the smr_delete_lock().
3865 //
3866 void Threads::release_stable_list_wake_up(char *log_str) {
3867 assert(log_str != NULL, "sanity check");
3868
3869 // Note: smr_delete_lock is held in smr_delete() for the entire
3870 // hazard ptr search so that we do not lose this notify() if
3871 // the exiting thread has to wait. That code path also holds
3872 // Threads_lock (which was grabbed before smr_delete_lock) so that
3873 // threads_do() can be called. This means the system can't start a
3874 // safepoint which means this thread can't take too long to get to
3875 // a safepoint because of being blocked on smr_delete_lock.
3876 //
3877 MonitorLockerEx ml(Threads::smr_delete_lock(), Monitor::_no_safepoint_check_flag);
3878 if (Threads::smr_delete_notify()) {
3879 // Notify any exiting JavaThreads that are waiting in smr_delete()
3880 // that we've released a ThreadsList.
3881 ml.notify_all();
3882 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::release_stable_list notified %s", os::current_thread_id(), log_str);
3883 }
3884 }
3885
3886 inline void Threads::update_smr_deleted_thread_time_max(uint new_value) {
3887 while (true) {
3888 uint cur_value = _smr_deleted_thread_time_max;
3889 if (new_value <= cur_value) {
3890 // No need to update max value so we're done.
3891 break;
3892 }
3893 if (Atomic::cmpxchg(new_value, &_smr_deleted_thread_time_max, cur_value) == cur_value) {
3894 // Updated max value so we're done. Otherwise try it all again.
3895 break;
3896 }
3897 }
3898 }
3899
3900 inline ThreadsList* Threads::xchg_smr_java_thread_list(ThreadsList* new_list) {
3901 return (ThreadsList*)Atomic::xchg(new_list, &_smr_java_thread_list);
3902 }
3903
3904 void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
3905 TraceTime timer("Initialize java.lang classes", TRACETIME_LOG(Info, startuptime));
3906
3907 if (EagerXrunInit && Arguments::init_libraries_at_startup()) {
3908 create_vm_init_libraries();
3909 }
3910
3911 initialize_class(vmSymbols::java_lang_String(), CHECK);
3912
3913 // Inject CompactStrings value after the static initializers for String ran.
3914 java_lang_String::set_compact_strings(CompactStrings);
3915
3916 // Initialize java_lang.System (needed before creating the thread)
3917 initialize_class(vmSymbols::java_lang_System(), CHECK);
3918 // The VM creates & returns objects of this class. Make sure it's initialized.
3919 initialize_class(vmSymbols::java_lang_Class(), CHECK);
3920 initialize_class(vmSymbols::java_lang_ThreadGroup(), CHECK);
3921 Handle thread_group = create_initial_thread_group(CHECK);
3922 Universe::set_main_thread_group(thread_group());
3923 initialize_class(vmSymbols::java_lang_Thread(), CHECK);
4649 return true;
4650 }
4651
4652
4653 jboolean Threads::is_supported_jni_version_including_1_1(jint version) {
4654 if (version == JNI_VERSION_1_1) return JNI_TRUE;
4655 return is_supported_jni_version(version);
4656 }
4657
4658
4659 jboolean Threads::is_supported_jni_version(jint version) {
4660 if (version == JNI_VERSION_1_2) return JNI_TRUE;
4661 if (version == JNI_VERSION_1_4) return JNI_TRUE;
4662 if (version == JNI_VERSION_1_6) return JNI_TRUE;
4663 if (version == JNI_VERSION_1_8) return JNI_TRUE;
4664 if (version == JNI_VERSION_9) return JNI_TRUE;
4665 if (version == JNI_VERSION_10) return JNI_TRUE;
4666 return JNI_FALSE;
4667 }
4668
4669 // Hash table of pointers found by a scan. Used for collecting hazard
4670 // pointers (ThreadsList references). Also used for collecting JavaThreads
4671 // that are indirectly referenced by hazard ptrs. An instance of this
4672 // class only contains one type of pointer.
4673 //
4674 class ThreadScanHashtable : public CHeapObj<mtThread> {
4675 private:
4676 static bool ptr_equals(void * const& s1, void * const& s2) {
4677 return s1 == s2;
4678 }
4679
4680 static unsigned int ptr_hash(void * const& s1) {
4681 // 2654435761 = 2^32 * Phi (golden ratio)
4682 return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
4683 }
4684
4685 int _table_size;
4686 // ResourceHashtable SIZE is specified at compile time so our
4687 // dynamic _table_size is unused for now; 1031 is the first prime
4688 // after 1024.
4689 typedef ResourceHashtable<void *, int, &ThreadScanHashtable::ptr_hash,
4690 &ThreadScanHashtable::ptr_equals, 1031,
4691 ResourceObj::C_HEAP, mtThread> PtrTable;
4692 PtrTable * _ptrs;
4693
4694 public:
4695 // ResourceHashtable is passed to various functions and populated in
4696 // different places so we allocate it using C_HEAP to make it immune
4697 // from any ResourceMarks that happen to be in the code paths.
4698 ThreadScanHashtable(int table_size) : _table_size(table_size), _ptrs(new (ResourceObj::C_HEAP, mtThread) PtrTable()) {}
4699
4700 ~ThreadScanHashtable() { delete _ptrs; }
4701
4702 bool has_entry(void *pointer) {
4703 int *val_ptr = _ptrs->get(pointer);
4704 return val_ptr != NULL && *val_ptr == 1;
4705 }
4706
4707 void add_entry(void *pointer) {
4708 _ptrs->put(pointer, 1);
4709 }
4710 };
4711
4712 // Closure to gather JavaThreads indirectly referenced by hazard ptrs
4713 // (ThreadsList references) into a hash table. This closure handles part 2
4714 // of the dance - adding all the JavaThreads referenced by the hazard
4715 // pointer (ThreadsList reference) to the hash table.
4716 //
4717 class AddThreadHazardPointerThreadClosure : public ThreadClosure {
4718 private:
4719 ThreadScanHashtable *_table;
4720
4721 public:
4722 AddThreadHazardPointerThreadClosure(ThreadScanHashtable *table) : _table(table) {}
4723
4724 virtual void do_thread(Thread *thread) {
4725 if (!_table->has_entry((void*)thread)) {
4726 // The same JavaThread might be on more than one ThreadsList or
4727 // more than one thread might be using the same ThreadsList. In
4728 // either case, we only need a single entry for a JavaThread.
4729 _table->add_entry((void*)thread);
4730 }
4731 }
4732 };
4733
4734 // Closure to gather JavaThreads indirectly referenced by hazard ptrs
4735 // (ThreadsList references) into a hash table. This closure handles part 1
4736 // of the dance - hazard ptr chain walking and dispatch to another
4737 // closure.
4738 //
4739 class ScanHazardPtrGatherProtectedThreadsClosure : public ThreadClosure {
4740 private:
4741 ThreadScanHashtable *_table;
4742 public:
4743 ScanHazardPtrGatherProtectedThreadsClosure(ThreadScanHashtable *table) : _table(table) {}
4744
4745 virtual void do_thread(Thread *thread) {
4746 assert_locked_or_safepoint(Threads_lock);
4747
4748 if (thread == NULL) return;
4749
4750 // This code races with Threads::acquire_stable_list() which is
4751 // lock-free so we have to handle some special situations.
4752 //
4753 ThreadsList *current_list = NULL;
4754 while (true) {
4755 current_list = thread->get_threads_hazard_ptr();
4756 // No hazard ptr so nothing more to do.
4757 if (current_list == NULL) {
4758 assert(thread->get_nested_threads_hazard_ptr() == NULL,
4759 "cannot have a nested hazard ptr with a NULL regular hazard ptr");
4760 return;
4761 }
4762
4763 // If the hazard ptr is verified as stable (since it is not tagged),
4764 // then it is safe to use.
4765 if (!Thread::is_hazard_ptr_tagged(current_list)) break;
4766
4767 // The hazard ptr is tagged as not yet verified as being stable
4768 // so we are racing with acquire_stable_list(). This exchange
4769 // attempts to invalidate the hazard ptr. If we win the race,
4770 // then we can ignore this unstable hazard ptr and the other
4771 // thread will retry the attempt to publish a stable hazard ptr.
4772 // If we lose the race, then we retry our attempt to look at the
4773 // hazard ptr.
4774 if (thread->cmpxchg_threads_hazard_ptr(NULL, current_list) == current_list) return;
4775 }
4776
4777 // The current JavaThread has a hazard ptr (ThreadsList reference)
4778 // which might be _smr_java_thread_list or it might be an older
4779 // ThreadsList that has been removed but not freed. In either case,
4780 // the hazard ptr is protecting all the JavaThreads on that
4781 // ThreadsList.
4782 AddThreadHazardPointerThreadClosure add_cl(_table);
4783 current_list->threads_do(&add_cl);
4784
4785 // Any NestedThreadsLists are also protecting JavaThreads so
4786 // gather those also; the ThreadsLists may be different.
4787 for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
4788 node != NULL; node = node->next()) {
4789 node->t_list()->threads_do(&add_cl);
4790 }
4791 }
4792 };
4793
4794 // Closure to print JavaThreads that have a hazard ptr (ThreadsList
4795 // reference) that contains an indirect reference to a specific JavaThread.
4796 //
4797 class ScanHazardPtrPrintMatchingThreadsClosure : public ThreadClosure {
4798 private:
4799 JavaThread *_thread;
4800 public:
4801 ScanHazardPtrPrintMatchingThreadsClosure(JavaThread *thread) : _thread(thread) {}
4802
4803 virtual void do_thread(Thread *thread) {
4804 assert_locked_or_safepoint(Threads_lock);
4805
4806 if (thread == NULL) return;
4807 ThreadsList *current_list = thread->get_threads_hazard_ptr();
4808 if (current_list == NULL) {
4809 assert(thread->get_nested_threads_hazard_ptr() == NULL,
4810 "cannot have a nested hazard ptr with a NULL regular hazard ptr");
4811 return;
4812 }
4813 // If the hazard ptr is unverified, then ignore it.
4814 if (Thread::is_hazard_ptr_tagged(current_list)) return;
4815
4816 // The current JavaThread has a hazard ptr (ThreadsList reference)
4817 // which might be _smr_java_thread_list or it might be an older
4818 // ThreadsList that has been removed but not freed. In either case,
4819 // the hazard ptr is protecting all the JavaThreads on that
4820 // ThreadsList, but we only care about matching a specific JavaThread.
4821 DO_JAVA_THREADS(current_list, p) {
4822 if (p == _thread) {
4823 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_delete: thread1=" INTPTR_FORMAT " has a hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread));
4824 break;
4825 }
4826 }
4827
4828 // Any NestedThreadsLists are also protecting JavaThreads so
4829 // check those also; the ThreadsLists may be different.
4830 for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
4831 node != NULL; node = node->next()) {
4832 DO_JAVA_THREADS(node->t_list(), p) {
4833 if (p == _thread) {
4834 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_delete: thread1=" INTPTR_FORMAT " has a nested hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread));
4835 return;
4836 }
4837 }
4838 }
4839 }
4840 };
4841
4842 // Return true if the specified JavaThread is protected by a hazard
4843 // pointer (ThreadsList reference). Otherwise, returns false.
4844 //
4845 bool Threads::is_a_protected_JavaThread(JavaThread *thread) {
4846 assert_locked_or_safepoint(Threads_lock);
4847
4848 // Hash table size should be first power of two higher than twice
4849 // the length of the Threads list.
4850 int hash_table_size = MIN2(_number_of_threads, 32) << 1;
4851 hash_table_size--;
4852 hash_table_size |= hash_table_size >> 1;
4853 hash_table_size |= hash_table_size >> 2;
4854 hash_table_size |= hash_table_size >> 4;
4855 hash_table_size |= hash_table_size >> 8;
4856 hash_table_size |= hash_table_size >> 16;
4857 hash_table_size++;
4858
4859 // Gather a hash table of the JavaThreads indirectly referenced by
4860 // hazard ptrs.
4861 ThreadScanHashtable *scan_table = new ThreadScanHashtable(hash_table_size);
4862 ScanHazardPtrGatherProtectedThreadsClosure scan_cl(scan_table);
4863 Threads::threads_do(&scan_cl);
4864
4865 bool thread_is_protected = false;
4866 if (scan_table->has_entry((void*)thread)) {
4867 thread_is_protected = true;
4868 }
4869 delete scan_table;
4870 return thread_is_protected;
4871 }
4872
4873 // Safely delete a JavaThread when it is no longer in use by a
4874 // ThreadsListHandle.
4875 //
4876 void Threads::smr_delete(JavaThread *thread) {
4877 assert(!Threads_lock->owned_by_self(), "sanity");
4878
4879 bool has_logged_once = false;
4880 elapsedTimer timer;
4881 if (EnableThreadSMRStatistics) {
4882 timer.start();
4883 }
4884
4885 while (true) {
4886 {
4887 // No safepoint check because this JavaThread is not on the
4888 // Threads list.
4889 MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
4890 // Cannot use a MonitorLockerEx helper here because we have
4891 // to drop the Threads_lock first if we wait.
4892 Threads::smr_delete_lock()->lock_without_safepoint_check();
4893 // Set the smr_delete_notify flag after we grab smr_delete_lock
4894 // and before we scan hazard ptrs because we're doing
4895 // double-check locking in release_stable_list().
4896 Threads::set_smr_delete_notify();
4897
4898 if (!is_a_protected_JavaThread(thread)) {
4899 // This is the common case.
4900 Threads::clear_smr_delete_notify();
4901 Threads::smr_delete_lock()->unlock();
4902 break;
4903 }
4904 if (!has_logged_once) {
4905 has_logged_once = true;
4906 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_delete: thread=" INTPTR_FORMAT " is not deleted.", os::current_thread_id(), p2i(thread));
4907 if (log_is_enabled(Debug, os, thread)) {
4908 ScanHazardPtrPrintMatchingThreadsClosure scan_cl(thread);
4909 Threads::threads_do(&scan_cl);
4910 }
4911 }
4912 } // We have to drop the Threads_lock to wait or delete the thread
4913
4914 if (EnableThreadSMRStatistics) {
4915 _smr_delete_lock_wait_cnt++;
4916 if (_smr_delete_lock_wait_cnt > _smr_delete_lock_wait_max) {
4917 _smr_delete_lock_wait_max = _smr_delete_lock_wait_cnt;
4918 }
4919 }
4920 // Wait for a release_stable_list() call before we check again. No
4921 // safepoint check, no timeout, and not as suspend equivalent flag
4922 // because this JavaThread is not on the Threads list.
4923 Threads::smr_delete_lock()->wait(Mutex::_no_safepoint_check_flag, 0,
4924 !Mutex::_as_suspend_equivalent_flag);
4925 if (EnableThreadSMRStatistics) {
4926 _smr_delete_lock_wait_cnt--;
4927 }
4928
4929 Threads::clear_smr_delete_notify();
4930 Threads::smr_delete_lock()->unlock();
4931 // Retry the whole scenario.
4932 }
4933
4934 if (ThreadLocalHandshakes) {
4935 // The thread is about to be deleted so cancel any handshake.
4936 thread->cancel_handshake();
4937 }
4938
4939 delete thread;
4940 if (EnableThreadSMRStatistics) {
4941 timer.stop();
4942 uint millis = (uint)timer.milliseconds();
4943 Threads::inc_smr_deleted_thread_cnt();
4944 Threads::add_smr_deleted_thread_times(millis);
4945 Threads::update_smr_deleted_thread_time_max(millis);
4946 }
4947
4948 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_delete: thread=" INTPTR_FORMAT " is deleted.", os::current_thread_id(), p2i(thread));
4949 }
4950
4951 bool Threads::smr_delete_notify() {
4952 // Use load_acquire() in order to see any updates to _smr_delete_notify
4953 // earlier than when smr_delete_lock is grabbed.
4954 return (OrderAccess::load_acquire(&_smr_delete_notify) != 0);
4955 }
4956
4957 // set_smr_delete_notify() and clear_smr_delete_notify() are called
4958 // under the protection of the smr_delete_lock, but we also use an
4959 // Atomic operation to ensure the memory update is seen earlier than
4960 // when the smr_delete_lock is dropped.
4961 //
4962 void Threads::set_smr_delete_notify() {
4963 Atomic::inc(&_smr_delete_notify);
4964 }
4965
4966 void Threads::clear_smr_delete_notify() {
4967 Atomic::dec(&_smr_delete_notify);
4968 }
4969
4970 // Closure to gather hazard ptrs (ThreadsList references) into a hash table.
4971 //
4972 class ScanHazardPtrGatherThreadsListClosure : public ThreadClosure {
4973 private:
4974 ThreadScanHashtable *_table;
4975 public:
4976 ScanHazardPtrGatherThreadsListClosure(ThreadScanHashtable *table) : _table(table) {}
4977
4978 virtual void do_thread(Thread* thread) {
4979 assert_locked_or_safepoint(Threads_lock);
4980
4981 if (thread == NULL) return;
4982 ThreadsList *threads = thread->get_threads_hazard_ptr();
4983 if (threads == NULL) {
4984 assert(thread->get_nested_threads_hazard_ptr() == NULL,
4985 "cannot have a nested hazard ptr with a NULL regular hazard ptr");
4986 return;
4987 }
4988 // In this closure we always ignore the tag that might mark this
4989 // hazard ptr as not yet verified. If we happen to catch an
4990 // unverified hazard ptr that is subsequently discarded (not
4991 // published), then the only side effect is that we might keep a
4992 // to-be-deleted ThreadsList alive a little longer.
4993 threads = Thread::untag_hazard_ptr(threads);
4994 if (!_table->has_entry((void*)threads)) {
4995 _table->add_entry((void*)threads);
4996 }
4997
4998 // Any NestedThreadsLists are also protecting JavaThreads so
4999 // gather those also; the ThreadsLists may be different.
5000 for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
5001 node != NULL; node = node->next()) {
5002 threads = node->t_list();
5003 if (!_table->has_entry((void*)threads)) {
5004 _table->add_entry((void*)threads);
5005 }
5006 }
5007 }
5008 };
5009
5010 // Safely free a ThreadsList after a Threads::add() or Threads::remove().
5011 // The specified ThreadsList may not get deleted during this call if it
5012 // is still in-use (referenced by a hazard ptr). Other ThreadsLists
5013 // in the chain may get deleted by this call if they are no longer in-use.
5014 void Threads::smr_free_list(ThreadsList* threads) {
5015 assert_locked_or_safepoint(Threads_lock);
5016
5017 threads->set_next_list(_smr_to_delete_list);
5018 _smr_to_delete_list = threads;
5019 if (EnableThreadSMRStatistics) {
5020 _smr_to_delete_list_cnt++;
5021 if (_smr_to_delete_list_cnt > _smr_to_delete_list_max) {
5022 _smr_to_delete_list_max = _smr_to_delete_list_cnt;
5023 }
5024 }
5025
5026 // Hash table size should be first power of two higher than twice the length of the ThreadsList
5027 int hash_table_size = MIN2(_number_of_threads, 32) << 1;
5028 hash_table_size--;
5029 hash_table_size |= hash_table_size >> 1;
5030 hash_table_size |= hash_table_size >> 2;
5031 hash_table_size |= hash_table_size >> 4;
5032 hash_table_size |= hash_table_size >> 8;
5033 hash_table_size |= hash_table_size >> 16;
5034 hash_table_size++;
5035
5036 // Gather a hash table of the current hazard ptrs:
5037 ThreadScanHashtable *scan_table = new ThreadScanHashtable(hash_table_size);
5038 ScanHazardPtrGatherThreadsListClosure scan_cl(scan_table);
5039 Threads::threads_do(&scan_cl);
5040
5041 // Walk through the linked list of pending freeable ThreadsLists
5042 // and free the ones that are not referenced from hazard ptrs.
5043 ThreadsList* current = _smr_to_delete_list;
5044 ThreadsList* prev = NULL;
5045 ThreadsList* next = NULL;
5046 bool threads_is_freed = false;
5047 while (current != NULL) {
5048 next = current->next_list();
5049 if (!scan_table->has_entry((void*)current)) {
5050 // This ThreadsList is not referenced by a hazard ptr.
5051 if (prev != NULL) {
5052 prev->set_next_list(next);
5053 }
5054 if (_smr_to_delete_list == current) {
5055 _smr_to_delete_list = next;
5056 }
5057
5058 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_free_list: threads=" INTPTR_FORMAT " is freed.", os::current_thread_id(), p2i(current));
5059 if (current == threads) threads_is_freed = true;
5060 delete current;
5061 if (EnableThreadSMRStatistics) {
5062 _smr_java_thread_list_free_cnt++;
5063 _smr_to_delete_list_cnt--;
5064 }
5065 } else {
5066 prev = current;
5067 }
5068 current = next;
5069 }
5070
5071 if (!threads_is_freed) {
5072 // Only report "is not freed" on the original call to
5073 // smr_free_list() for this ThreadsList.
5074 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_free_list: threads=" INTPTR_FORMAT " is not freed.", os::current_thread_id(), p2i(threads));
5075 }
5076
5077 delete scan_table;
5078 }
5079
5080 // Remove a JavaThread from a ThreadsList. The returned ThreadsList is a
5081 // new copy of the specified ThreadsList with the specified JavaThread
5082 // removed.
5083 ThreadsList *ThreadsList::remove_thread(ThreadsList* list, JavaThread* java_thread) {
5084 assert(list->_length > 0, "sanity");
5085
5086 uint i = 0;
5087 DO_JAVA_THREADS(list, current) {
5088 if (current == java_thread) {
5089 break;
5090 }
5091 i++;
5092 }
5093 assert(i < list->_length, "did not find JavaThread on the list");
5094 const uint index = i;
5095 const uint new_length = list->_length - 1;
5096 const uint head_length = index;
5097 const uint tail_length = (new_length >= index) ? (new_length - index) : 0;
5098 ThreadsList *const new_list = new ThreadsList(new_length);
5099
5100 if (head_length > 0) {
5101 Copy::disjoint_words((HeapWord*)list->_threads, (HeapWord*)new_list->_threads, head_length);
5102 }
5103 if (tail_length > 0) {
5104 Copy::disjoint_words((HeapWord*)list->_threads + index + 1, (HeapWord*)new_list->_threads + index, tail_length);
5105 }
5106
5107 return new_list;
5108 }
5109
5110 // Add a JavaThread to a ThreadsList. The returned ThreadsList is a
5111 // new copy of the specified ThreadsList with the specified JavaThread
5112 // appended to the end.
5113 ThreadsList *ThreadsList::add_thread(ThreadsList *list, JavaThread *java_thread) {
5114 const uint index = list->_length;
5115 const uint new_length = index + 1;
5116 const uint head_length = index;
5117 ThreadsList *const new_list = new ThreadsList(new_length);
5118
5119 if (head_length > 0) {
5120 Copy::disjoint_words((HeapWord*)list->_threads, (HeapWord*)new_list->_threads, head_length);
5121 }
5122 *(JavaThread**)(new_list->_threads + index) = java_thread;
5123
5124 return new_list;
5125 }
5126
5127 int ThreadsList::find_index_of_JavaThread(JavaThread *target) {
5128 if (target == NULL) {
5129 return -1;
5130 }
5131 for (uint i = 0; i < length(); i++) {
5132 if (target == thread_at(i)) {
5133 return (int)i;
5134 }
5135 }
5136 return -1;
5137 }
5138
5139 JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const {
5140 DO_JAVA_THREADS(this, thread) {
5141 oop tobj = thread->threadObj();
5142 // Ignore the thread if it hasn't run yet, has exited
5143 // or is starting to exit.
5144 if (tobj != NULL && !thread->is_exiting() &&
5145 java_tid == java_lang_Thread::thread_id(tobj)) {
5146 // found a match
5147 return thread;
5148 }
5149 }
5150 return NULL;
5151 }
5152
5153 bool ThreadsList::includes(const JavaThread * const p) const {
5154 if (p == NULL) {
5155 return false;
5156 }
5157 DO_JAVA_THREADS(this, q) {
5158 if (q == p) {
5159 return true;
5160 }
5161 }
5162 return false;
5163 }
5164
5165 void Threads::add(JavaThread* p, bool force_daemon) {
5166 // The threads lock must be owned at this point
5167 assert_locked_or_safepoint(Threads_lock);
5168
5169 // See the comment for this method in thread.hpp for its purpose and
5170 // why it is called here.
5171 p->initialize_queues();
5172 p->set_next(_thread_list);
5173 _thread_list = p;
5174
5175 // Once a JavaThread is added to the Threads list, smr_delete() has
5176 // to be used to delete it. Otherwise we can just delete it directly.
5177 p->set_on_thread_list();
5178
5179 _number_of_threads++;
5180 oop threadObj = p->threadObj();
5181 bool daemon = true;
5182 // Bootstrapping problem: threadObj can be null for initial
5183 // JavaThread (or for threads attached via JNI)
5184 if ((!force_daemon) && (threadObj == NULL || !java_lang_Thread::is_daemon(threadObj))) {
5185 _number_of_non_daemon_threads++;
5186 daemon = false;
5187 }
5188
5189 ThreadService::add_thread(p, daemon);
5190
5191 // Maintain fast thread list
5192 ThreadsList *new_list = ThreadsList::add_thread(get_smr_java_thread_list(), p);
5193 if (EnableThreadSMRStatistics) {
5194 _smr_java_thread_list_alloc_cnt++;
5195 if (new_list->length() > _smr_java_thread_list_max) {
5196 _smr_java_thread_list_max = new_list->length();
5197 }
5198 }
5199 // Initial _smr_java_thread_list will not generate a "Threads::add" mesg.
5200 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::add: new ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(new_list));
5201
5202 ThreadsList *old_list = xchg_smr_java_thread_list(new_list);
5203 smr_free_list(old_list);
5204
5205 // Possible GC point.
5206 Events::log(p, "Thread added: " INTPTR_FORMAT, p2i(p));
5207 }
5208
5209 void Threads::remove(JavaThread* p) {
5210
5211 // Reclaim the objectmonitors from the omInUseList and omFreeList of the moribund thread.
5212 ObjectSynchronizer::omFlush(p);
5213
5214 // Extra scope needed for Thread_lock, so we can check
5215 // that we do not remove thread without safepoint code notice
5216 { MutexLocker ml(Threads_lock);
5217
5218 assert(get_smr_java_thread_list()->includes(p), "p must be present");
5219
5220 // Maintain fast thread list
5221 ThreadsList *new_list = ThreadsList::remove_thread(get_smr_java_thread_list(), p);
5222 if (EnableThreadSMRStatistics) {
5223 _smr_java_thread_list_alloc_cnt++;
5224 // This list is smaller so no need to check for a "longest" update.
5225 }
5226
5227 // Final _smr_java_thread_list will not generate a "Threads::remove" mesg.
5228 log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::remove: new ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(new_list));
5229
5230 ThreadsList *old_list = xchg_smr_java_thread_list(new_list);
5231 smr_free_list(old_list);
5232
5233 JavaThread* current = _thread_list;
5234 JavaThread* prev = NULL;
5235
5236 while (current != p) {
5237 prev = current;
5238 current = current->next();
5239 }
5240
5241 if (prev) {
5242 prev->set_next(current->next());
5243 } else {
5244 _thread_list = p->next();
5245 }
5246
5247 _number_of_threads--;
5248 oop threadObj = p->threadObj();
5249 bool daemon = true;
5250 if (threadObj == NULL || !java_lang_Thread::is_daemon(threadObj)) {
5251 _number_of_non_daemon_threads--;
5436 // Threads::print_on() is called at safepoint by VM_PrintThreads operation.
5437 void Threads::print_on(outputStream* st, bool print_stacks,
5438 bool internal_format, bool print_concurrent_locks) {
5439 char buf[32];
5440 st->print_raw_cr(os::local_time_string(buf, sizeof(buf)));
5441
5442 st->print_cr("Full thread dump %s (%s %s):",
5443 Abstract_VM_Version::vm_name(),
5444 Abstract_VM_Version::vm_release(),
5445 Abstract_VM_Version::vm_info_string());
5446 st->cr();
5447
5448 #if INCLUDE_SERVICES
5449 // Dump concurrent locks
5450 ConcurrentLocksDump concurrent_locks;
5451 if (print_concurrent_locks) {
5452 concurrent_locks.dump_at_safepoint();
5453 }
5454 #endif // INCLUDE_SERVICES
5455
5456 print_smr_info_on(st);
5457 st->cr();
5458
5459 ALL_JAVA_THREADS(p) {
5460 ResourceMark rm;
5461 p->print_on(st);
5462 if (print_stacks) {
5463 if (internal_format) {
5464 p->trace_stack();
5465 } else {
5466 p->print_stack_on(st);
5467 }
5468 }
5469 st->cr();
5470 #if INCLUDE_SERVICES
5471 if (print_concurrent_locks) {
5472 concurrent_locks.print_locks_on(p, st);
5473 }
5474 #endif // INCLUDE_SERVICES
5475 }
5476
5477 VMThread::vm_thread()->print_on(st);
5478 st->cr();
5479 Universe::heap()->print_gc_threads_on(st);
5480 WatcherThread* wt = WatcherThread::watcher_thread();
5481 if (wt != NULL) {
5482 wt->print_on(st);
5483 st->cr();
5484 }
5485
5486 st->flush();
5487 }
5488
5489 // Log Threads class SMR info.
5490 void Threads::log_smr_statistics() {
5491 LogTarget(Info, thread, smr) log;
5492 if (log.is_enabled()) {
5493 LogStream out(log);
5494 print_smr_info_on(&out);
5495 }
5496 }
5497
5498 // Print Threads class SMR info.
5499 void Threads::print_smr_info_on(outputStream* st) {
5500 // Only grab the Threads_lock if we don't already own it
5501 // and if we are not reporting an error.
5502 MutexLockerEx ml((Threads_lock->owned_by_self() || VMError::is_error_reported()) ? NULL : Threads_lock);
5503
5504 st->print_cr("Threads class SMR info:");
5505 st->print_cr("_smr_java_thread_list=" INTPTR_FORMAT ", length=%u, "
5506 "elements={", p2i(_smr_java_thread_list),
5507 _smr_java_thread_list->length());
5508 print_smr_info_elements_on(st, _smr_java_thread_list);
5509 st->print_cr("}");
5510 if (_smr_to_delete_list != NULL) {
5511 st->print_cr("_smr_to_delete_list=" INTPTR_FORMAT ", length=%u, "
5512 "elements={", p2i(_smr_to_delete_list),
5513 _smr_to_delete_list->length());
5514 print_smr_info_elements_on(st, _smr_to_delete_list);
5515 st->print_cr("}");
5516 for (ThreadsList *t_list = _smr_to_delete_list->next_list();
5517 t_list != NULL; t_list = t_list->next_list()) {
5518 st->print("next-> " INTPTR_FORMAT ", length=%u, "
5519 "elements={", p2i(t_list), t_list->length());
5520 print_smr_info_elements_on(st, t_list);
5521 st->print_cr("}");
5522 }
5523 }
5524 if (!EnableThreadSMRStatistics) {
5525 return;
5526 }
5527 st->print_cr("_smr_java_thread_list_alloc_cnt=" UINT64_FORMAT ","
5528 "_smr_java_thread_list_free_cnt=" UINT64_FORMAT ","
5529 "_smr_java_thread_list_max=%u, "
5530 "_smr_nested_thread_list_max=%u",
5531 _smr_java_thread_list_alloc_cnt,
5532 _smr_java_thread_list_free_cnt,
5533 _smr_java_thread_list_max,
5534 _smr_nested_thread_list_max);
5535 if (_smr_tlh_cnt > 0) {
5536 st->print_cr("_smr_tlh_cnt=%u"
5537 ", _smr_tlh_times=%u"
5538 ", avg_smr_tlh_time=%0.2f"
5539 ", _smr_tlh_time_max=%u",
5540 _smr_tlh_cnt, _smr_tlh_times,
5541 ((double) _smr_tlh_times / _smr_tlh_cnt),
5542 _smr_tlh_time_max);
5543 }
5544 if (_smr_deleted_thread_cnt > 0) {
5545 st->print_cr("_smr_deleted_thread_cnt=%u"
5546 ", _smr_deleted_thread_times=%u"
5547 ", avg_smr_deleted_thread_time=%0.2f"
5548 ", _smr_deleted_thread_time_max=%u",
5549 _smr_deleted_thread_cnt, _smr_deleted_thread_times,
5550 ((double) _smr_deleted_thread_times / _smr_deleted_thread_cnt),
5551 _smr_deleted_thread_time_max);
5552 }
5553 st->print_cr("_smr_delete_lock_wait_cnt=%u, _smr_delete_lock_wait_max=%u",
5554 _smr_delete_lock_wait_cnt, _smr_delete_lock_wait_max);
5555 st->print_cr("_smr_to_delete_list_cnt=%u, _smr_to_delete_list_max=%u",
5556 _smr_to_delete_list_cnt, _smr_to_delete_list_max);
5557 }
5558
5559 // Print ThreadsList elements (4 per line).
5560 void Threads::print_smr_info_elements_on(outputStream* st,
5561 ThreadsList* t_list) {
5562 uint cnt = 0;
5563 JavaThreadIterator jti(t_list);
5564 for (JavaThread *jt = jti.first(); jt != NULL; jt = jti.next()) {
5565 st->print(INTPTR_FORMAT, p2i(jt));
5566 if (cnt < t_list->length() - 1) {
5567 // Separate with comma or comma-space except for the last one.
5568 if (((cnt + 1) % 4) == 0) {
5569 // Four INTPTR_FORMAT fit on an 80 column line so end the
5570 // current line with just a comma.
5571 st->print_cr(",");
5572 } else {
5573 // Not the last one on the current line so use comma-space:
5574 st->print(", ");
5575 }
5576 } else {
5577 // Last one so just end the current line.
5578 st->cr();
5579 }
5580 cnt++;
5581 }
5582 }
5583
5584 void Threads::print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf,
5585 int buflen, bool* found_current) {
5586 if (this_thread != NULL) {
5587 bool is_current = (current == this_thread);
5588 *found_current = *found_current || is_current;
5589 st->print("%s", is_current ? "=>" : " ");
5590
5591 st->print(PTR_FORMAT, p2i(this_thread));
5592 st->print(" ");
5593 this_thread->print_on_error(st, buf, buflen);
5594 st->cr();
5595 }
5596 }
5597
5598 class PrintOnErrorClosure : public ThreadClosure {
5599 outputStream* _st;
5600 Thread* _current;
5601 char* _buf;
5602 int _buflen;
5603 bool* _found_current;
5604 public:
5605 PrintOnErrorClosure(outputStream* st, Thread* current, char* buf,
5606 int buflen, bool* found_current) :
5607 _st(st), _current(current), _buf(buf), _buflen(buflen), _found_current(found_current) {}
5608
5609 virtual void do_thread(Thread* thread) {
5610 Threads::print_on_error(thread, _st, _current, _buf, _buflen, _found_current);
5611 }
5612 };
5613
5614 // Threads::print_on_error() is called by fatal error handler. It's possible
5615 // that VM is not at safepoint and/or current thread is inside signal handler.
5616 // Don't print stack trace, as the stack may not be walkable. Don't allocate
5617 // memory (even in resource area), it might deadlock the error handler.
5618 void Threads::print_on_error(outputStream* st, Thread* current, char* buf,
5619 int buflen) {
5620 print_smr_info_on(st);
5621 st->cr();
5622
5623 bool found_current = false;
5624 st->print_cr("Java Threads: ( => current thread )");
5625 ALL_JAVA_THREADS(thread) {
5626 print_on_error(thread, st, current, buf, buflen, &found_current);
5627 }
5628 st->cr();
5629
5630 st->print_cr("Other Threads:");
5631 print_on_error(VMThread::vm_thread(), st, current, buf, buflen, &found_current);
5632 print_on_error(WatcherThread::watcher_thread(), st, current, buf, buflen, &found_current);
5633
5634 PrintOnErrorClosure print_closure(st, current, buf, buflen, &found_current);
5635 Universe::heap()->gc_threads_do(&print_closure);
5636
5637 if (!found_current) {
5638 st->cr();
5639 st->print("=>" PTR_FORMAT " (exited) ", p2i(current));
5640 current->print_on_error(st, buf, buflen);
|
90 #include "runtime/timer.hpp"
91 #include "runtime/timerTrace.hpp"
92 #include "runtime/vframe.hpp"
93 #include "runtime/vframeArray.hpp"
94 #include "runtime/vframe_hp.hpp"
95 #include "runtime/vmThread.hpp"
96 #include "runtime/vm_operations.hpp"
97 #include "runtime/vm_version.hpp"
98 #include "services/attachListener.hpp"
99 #include "services/management.hpp"
100 #include "services/memTracker.hpp"
101 #include "services/threadService.hpp"
102 #include "trace/traceMacros.hpp"
103 #include "trace/tracing.hpp"
104 #include "utilities/align.hpp"
105 #include "utilities/defaultStream.hpp"
106 #include "utilities/dtrace.hpp"
107 #include "utilities/events.hpp"
108 #include "utilities/macros.hpp"
109 #include "utilities/preserveException.hpp"
110 #include "utilities/vmError.hpp"
111 #if INCLUDE_ALL_GCS
112 #include "gc/cms/concurrentMarkSweepThread.hpp"
113 #include "gc/g1/concurrentMarkThread.inline.hpp"
114 #include "gc/parallel/pcTasks.hpp"
115 #endif // INCLUDE_ALL_GCS
116 #if INCLUDE_JVMCI
117 #include "jvmci/jvmciCompiler.hpp"
118 #include "jvmci/jvmciRuntime.hpp"
119 #include "logging/logHandle.hpp"
120 #endif
121 #ifdef COMPILER1
122 #include "c1/c1_Compiler.hpp"
123 #endif
124 #ifdef COMPILER2
125 #include "opto/c2compiler.hpp"
126 #include "opto/idealGraphPrinter.hpp"
127 #endif
128 #if INCLUDE_RTM_OPT
129 #include "runtime/rtmLocking.hpp"
189 p2i(aligned_addr));
190 }
191 ((Thread*) aligned_addr)->_real_malloc_address = real_malloc_addr;
192 return aligned_addr;
193 } else {
194 return throw_excpt? AllocateHeap(size, flags, CURRENT_PC)
195 : AllocateHeap(size, flags, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
196 }
197 }
198
199 void Thread::operator delete(void* p) {
200 if (UseBiasedLocking) {
201 FreeHeap(((Thread*) p)->_real_malloc_address);
202 } else {
203 FreeHeap(p);
204 }
205 }
206
207 void JavaThread::smr_delete() {
208 if (_on_thread_list) {
209 ThreadsSMRSupport::smr_delete(this);
210 } else {
211 delete this;
212 }
213 }
214
215 // Base class for all threads: VMThread, WatcherThread, ConcurrentMarkSweepThread,
216 // JavaThread
217
218
219 Thread::Thread() {
220 // stack and get_thread
221 set_stack_base(NULL);
222 set_stack_size(0);
223 set_self_raw_id(0);
224 set_lgrp_id(-1);
225 DEBUG_ONLY(clear_suspendible_thread();)
226
227 // allocated data structures
228 set_osthread(NULL);
229 set_resource_area(new (mtThread)ResourceArea());
401 clear_thread_current();
402 }
403
404 CHECK_UNHANDLED_OOPS_ONLY(if (CheckUnhandledOops) delete unhandled_oops();)
405 }
406
407 // NOTE: dummy function for assertion purpose.
408 void Thread::run() {
409 ShouldNotReachHere();
410 }
411
412 #ifdef ASSERT
413 // A JavaThread is considered "dangling" if it is not the current
414 // thread, has been added the Threads list, the system is not at a
415 // safepoint and the Thread is not "protected".
416 //
417 void Thread::check_for_dangling_thread_pointer(Thread *thread) {
418 assert(!thread->is_Java_thread() || Thread::current() == thread ||
419 !((JavaThread *) thread)->on_thread_list() ||
420 SafepointSynchronize::is_at_safepoint() ||
421 ThreadsSMRSupport::is_a_protected_JavaThread_with_lock((JavaThread *) thread),
422 "possibility of dangling Thread pointer");
423 }
424 #endif
425
426 ThreadPriority Thread::get_priority(const Thread* const thread) {
427 ThreadPriority priority;
428 // Can return an error!
429 (void)os::get_priority(thread, priority);
430 assert(MinPriority <= priority && priority <= MaxPriority, "non-Java priority found");
431 return priority;
432 }
433
434 void Thread::set_priority(Thread* thread, ThreadPriority priority) {
435 debug_only(check_for_dangling_thread_pointer(thread);)
436 // Can return an error!
437 (void)os::set_priority(thread, priority);
438 }
439
440
441 void Thread::start(Thread* thread) {
3431
3432 // ======= Threads ========
3433
3434 // The Threads class links together all active threads, and provides
3435 // operations over all threads. It is protected by the Threads_lock,
3436 // which is also used in other global contexts like safepointing.
3437 // ThreadsListHandles are used to safely perform operations on one
3438 // or more threads without the risk of the thread exiting during the
3439 // operation.
3440 //
3441 // Note: The Threads_lock is currently more widely used than we
3442 // would like. We are actively migrating Threads_lock uses to other
3443 // mechanisms in order to reduce Threads_lock contention.
3444
3445 JavaThread* Threads::_thread_list = NULL;
3446 int Threads::_number_of_threads = 0;
3447 int Threads::_number_of_non_daemon_threads = 0;
3448 int Threads::_return_code = 0;
3449 int Threads::_thread_claim_parity = 0;
3450 size_t JavaThread::_stack_size_at_create = 0;
3451
3452 #ifdef ASSERT
3453 bool Threads::_vm_complete = false;
3454 #endif
3455
3456 static inline void *prefetch_and_load_ptr(void **addr, intx prefetch_interval) {
3457 Prefetch::read((void*)addr, prefetch_interval);
3458 return *addr;
3459 }
3460
3461 // Possibly the ugliest for loop the world has seen. C++ does not allow
3462 // multiple types in the declaration section of the for loop. In this case
3463 // we are only dealing with pointers and hence can cast them. It looks ugly
3464 // but macros are ugly and therefore it's fine to make things absurdly ugly.
3465 #define DO_JAVA_THREADS(LIST, X) \
3466 for (JavaThread *MACRO_scan_interval = (JavaThread*)(uintptr_t)PrefetchScanIntervalInBytes, \
3467 *MACRO_list = (JavaThread*)(LIST), \
3468 **MACRO_end = ((JavaThread**)((ThreadsList*)MACRO_list)->threads()) + ((ThreadsList*)MACRO_list)->length(), \
3469 **MACRO_current_p = (JavaThread**)((ThreadsList*)MACRO_list)->threads(), \
3470 *X = (JavaThread*)prefetch_and_load_ptr((void**)MACRO_current_p, (intx)MACRO_scan_interval); \
3471 MACRO_current_p != MACRO_end; \
3472 MACRO_current_p++, \
3473 X = (JavaThread*)prefetch_and_load_ptr((void**)MACRO_current_p, (intx)MACRO_scan_interval))
3474
3475 // All JavaThreads
3476 #define ALL_JAVA_THREADS(X) DO_JAVA_THREADS(ThreadsSMRSupport::get_smr_java_thread_list(), X)
3477
3478 // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
3479 void Threads::threads_do(ThreadClosure* tc) {
3480 assert_locked_or_safepoint(Threads_lock);
3481 // ALL_JAVA_THREADS iterates through all JavaThreads
3482 ALL_JAVA_THREADS(p) {
3483 tc->do_thread(p);
3484 }
3485 // Someday we could have a table or list of all non-JavaThreads.
3486 // For now, just manually iterate through them.
3487 tc->do_thread(VMThread::vm_thread());
3488 Universe::heap()->gc_threads_do(tc);
3489 WatcherThread *wt = WatcherThread::watcher_thread();
3490 // Strictly speaking, the following NULL check isn't sufficient to make sure
3491 // the data for WatcherThread is still valid upon being examined. However,
3492 // considering that WatchThread terminates when the VM is on the way to
3493 // exit at safepoint, the chance of the above is extremely small. The right
3494 // way to prevent termination of WatcherThread would be to acquire
3495 // Terminator_lock, but we can't do that without violating the lock rank
3496 // checking in some cases.
3557 if (result.get_jint() != JNI_OK) {
3558 vm_exit_during_initialization(); // no message or exception
3559 }
3560
3561 universe_post_module_init();
3562 }
3563
3564 // Phase 3. final setup - set security manager, system class loader and TCCL
3565 //
3566 // This will instantiate and set the security manager, set the system class
3567 // loader as well as the thread context class loader. The security manager
3568 // and system class loader may be a custom class loaded from -Xbootclasspath/a,
3569 // other modules or the application's classpath.
3570 static void call_initPhase3(TRAPS) {
3571 Klass* klass = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(), true, CHECK);
3572 JavaValue result(T_VOID);
3573 JavaCalls::call_static(&result, klass, vmSymbols::initPhase3_name(),
3574 vmSymbols::void_method_signature(), CHECK);
3575 }
3576
3577 void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
3578 TraceTime timer("Initialize java.lang classes", TRACETIME_LOG(Info, startuptime));
3579
3580 if (EagerXrunInit && Arguments::init_libraries_at_startup()) {
3581 create_vm_init_libraries();
3582 }
3583
3584 initialize_class(vmSymbols::java_lang_String(), CHECK);
3585
3586 // Inject CompactStrings value after the static initializers for String ran.
3587 java_lang_String::set_compact_strings(CompactStrings);
3588
3589 // Initialize java_lang.System (needed before creating the thread)
3590 initialize_class(vmSymbols::java_lang_System(), CHECK);
3591 // The VM creates & returns objects of this class. Make sure it's initialized.
3592 initialize_class(vmSymbols::java_lang_Class(), CHECK);
3593 initialize_class(vmSymbols::java_lang_ThreadGroup(), CHECK);
3594 Handle thread_group = create_initial_thread_group(CHECK);
3595 Universe::set_main_thread_group(thread_group());
3596 initialize_class(vmSymbols::java_lang_Thread(), CHECK);
4322 return true;
4323 }
4324
4325
4326 jboolean Threads::is_supported_jni_version_including_1_1(jint version) {
4327 if (version == JNI_VERSION_1_1) return JNI_TRUE;
4328 return is_supported_jni_version(version);
4329 }
4330
4331
4332 jboolean Threads::is_supported_jni_version(jint version) {
4333 if (version == JNI_VERSION_1_2) return JNI_TRUE;
4334 if (version == JNI_VERSION_1_4) return JNI_TRUE;
4335 if (version == JNI_VERSION_1_6) return JNI_TRUE;
4336 if (version == JNI_VERSION_1_8) return JNI_TRUE;
4337 if (version == JNI_VERSION_9) return JNI_TRUE;
4338 if (version == JNI_VERSION_10) return JNI_TRUE;
4339 return JNI_FALSE;
4340 }
4341
4342
4343 void Threads::add(JavaThread* p, bool force_daemon) {
4344 // The threads lock must be owned at this point
4345 assert_locked_or_safepoint(Threads_lock);
4346
4347 // See the comment for this method in thread.hpp for its purpose and
4348 // why it is called here.
4349 p->initialize_queues();
4350 p->set_next(_thread_list);
4351 _thread_list = p;
4352
4353 // Once a JavaThread is added to the Threads list, smr_delete() has
4354 // to be used to delete it. Otherwise we can just delete it directly.
4355 p->set_on_thread_list();
4356
4357 _number_of_threads++;
4358 oop threadObj = p->threadObj();
4359 bool daemon = true;
4360 // Bootstrapping problem: threadObj can be null for initial
4361 // JavaThread (or for threads attached via JNI)
4362 if ((!force_daemon) && (threadObj == NULL || !java_lang_Thread::is_daemon(threadObj))) {
4363 _number_of_non_daemon_threads++;
4364 daemon = false;
4365 }
4366
4367 ThreadService::add_thread(p, daemon);
4368
4369 // Maintain fast thread list
4370 ThreadsSMRSupport::add_thread(p);
4371
4372 // Possible GC point.
4373 Events::log(p, "Thread added: " INTPTR_FORMAT, p2i(p));
4374 }
4375
4376 void Threads::remove(JavaThread* p) {
4377
4378 // Reclaim the objectmonitors from the omInUseList and omFreeList of the moribund thread.
4379 ObjectSynchronizer::omFlush(p);
4380
4381 // Extra scope needed for Thread_lock, so we can check
4382 // that we do not remove thread without safepoint code notice
4383 { MutexLocker ml(Threads_lock);
4384
4385 assert(ThreadsSMRSupport::get_smr_java_thread_list()->includes(p), "p must be present");
4386
4387 // Maintain fast thread list
4388 ThreadsSMRSupport::remove_thread(p);
4389
4390 JavaThread* current = _thread_list;
4391 JavaThread* prev = NULL;
4392
4393 while (current != p) {
4394 prev = current;
4395 current = current->next();
4396 }
4397
4398 if (prev) {
4399 prev->set_next(current->next());
4400 } else {
4401 _thread_list = p->next();
4402 }
4403
4404 _number_of_threads--;
4405 oop threadObj = p->threadObj();
4406 bool daemon = true;
4407 if (threadObj == NULL || !java_lang_Thread::is_daemon(threadObj)) {
4408 _number_of_non_daemon_threads--;
4593 // Threads::print_on() is called at safepoint by VM_PrintThreads operation.
4594 void Threads::print_on(outputStream* st, bool print_stacks,
4595 bool internal_format, bool print_concurrent_locks) {
4596 char buf[32];
4597 st->print_raw_cr(os::local_time_string(buf, sizeof(buf)));
4598
4599 st->print_cr("Full thread dump %s (%s %s):",
4600 Abstract_VM_Version::vm_name(),
4601 Abstract_VM_Version::vm_release(),
4602 Abstract_VM_Version::vm_info_string());
4603 st->cr();
4604
4605 #if INCLUDE_SERVICES
4606 // Dump concurrent locks
4607 ConcurrentLocksDump concurrent_locks;
4608 if (print_concurrent_locks) {
4609 concurrent_locks.dump_at_safepoint();
4610 }
4611 #endif // INCLUDE_SERVICES
4612
4613 ThreadsSMRSupport::print_smr_info_on(st);
4614 st->cr();
4615
4616 ALL_JAVA_THREADS(p) {
4617 ResourceMark rm;
4618 p->print_on(st);
4619 if (print_stacks) {
4620 if (internal_format) {
4621 p->trace_stack();
4622 } else {
4623 p->print_stack_on(st);
4624 }
4625 }
4626 st->cr();
4627 #if INCLUDE_SERVICES
4628 if (print_concurrent_locks) {
4629 concurrent_locks.print_locks_on(p, st);
4630 }
4631 #endif // INCLUDE_SERVICES
4632 }
4633
4634 VMThread::vm_thread()->print_on(st);
4635 st->cr();
4636 Universe::heap()->print_gc_threads_on(st);
4637 WatcherThread* wt = WatcherThread::watcher_thread();
4638 if (wt != NULL) {
4639 wt->print_on(st);
4640 st->cr();
4641 }
4642
4643 st->flush();
4644 }
4645
4646 void Threads::print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf,
4647 int buflen, bool* found_current) {
4648 if (this_thread != NULL) {
4649 bool is_current = (current == this_thread);
4650 *found_current = *found_current || is_current;
4651 st->print("%s", is_current ? "=>" : " ");
4652
4653 st->print(PTR_FORMAT, p2i(this_thread));
4654 st->print(" ");
4655 this_thread->print_on_error(st, buf, buflen);
4656 st->cr();
4657 }
4658 }
4659
4660 class PrintOnErrorClosure : public ThreadClosure {
4661 outputStream* _st;
4662 Thread* _current;
4663 char* _buf;
4664 int _buflen;
4665 bool* _found_current;
4666 public:
4667 PrintOnErrorClosure(outputStream* st, Thread* current, char* buf,
4668 int buflen, bool* found_current) :
4669 _st(st), _current(current), _buf(buf), _buflen(buflen), _found_current(found_current) {}
4670
4671 virtual void do_thread(Thread* thread) {
4672 Threads::print_on_error(thread, _st, _current, _buf, _buflen, _found_current);
4673 }
4674 };
4675
4676 // Threads::print_on_error() is called by fatal error handler. It's possible
4677 // that VM is not at safepoint and/or current thread is inside signal handler.
4678 // Don't print stack trace, as the stack may not be walkable. Don't allocate
4679 // memory (even in resource area), it might deadlock the error handler.
4680 void Threads::print_on_error(outputStream* st, Thread* current, char* buf,
4681 int buflen) {
4682 ThreadsSMRSupport::print_smr_info_on(st);
4683 st->cr();
4684
4685 bool found_current = false;
4686 st->print_cr("Java Threads: ( => current thread )");
4687 ALL_JAVA_THREADS(thread) {
4688 print_on_error(thread, st, current, buf, buflen, &found_current);
4689 }
4690 st->cr();
4691
4692 st->print_cr("Other Threads:");
4693 print_on_error(VMThread::vm_thread(), st, current, buf, buflen, &found_current);
4694 print_on_error(WatcherThread::watcher_thread(), st, current, buf, buflen, &found_current);
4695
4696 PrintOnErrorClosure print_closure(st, current, buf, buflen, &found_current);
4697 Universe::heap()->gc_threads_do(&print_closure);
4698
4699 if (!found_current) {
4700 st->cr();
4701 st->print("=>" PTR_FORMAT " (exited) ", p2i(current));
4702 current->print_on_error(st, buf, buflen);
|