1 /* 2 * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "memory/allocation.hpp" 28 #include "memory/heapInspection.hpp" 29 #include "memory/oopFactory.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/instanceKlass.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/objArrayOop.inline.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/jvmtiRawMonitor.hpp" 36 #include "runtime/atomic.hpp" 37 #include "runtime/handles.inline.hpp" 38 #include "runtime/init.hpp" 39 #include "runtime/objectMonitor.inline.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "runtime/threadSMR.inline.hpp" 42 #include "runtime/vframe.hpp" 43 #include "runtime/vmThread.hpp" 44 #include "runtime/vmOperations.hpp" 45 #include "services/threadService.hpp" 46 47 // TODO: we need to define a naming convention for perf counters 48 // to distinguish counters for: 49 // - standard JSR174 use 50 // - Hotspot extension (public and committed) 51 // - Hotspot extension (private/internal and uncommitted) 52 53 // Default is disabled. 54 bool ThreadService::_thread_monitoring_contention_enabled = false; 55 bool ThreadService::_thread_cpu_time_enabled = false; 56 bool ThreadService::_thread_allocated_memory_enabled = false; 57 58 PerfCounter* ThreadService::_total_threads_count = NULL; 59 PerfVariable* ThreadService::_live_threads_count = NULL; 60 PerfVariable* ThreadService::_peak_threads_count = NULL; 61 PerfVariable* ThreadService::_daemon_threads_count = NULL; 62 volatile int ThreadService::_atomic_threads_count = 0; 63 volatile int ThreadService::_atomic_daemon_threads_count = 0; 64 65 ThreadDumpResult* ThreadService::_threaddump_list = NULL; 66 67 static const int INITIAL_ARRAY_SIZE = 10; 68 69 void ThreadService::init() { 70 EXCEPTION_MARK; 71 72 // These counters are for java.lang.management API support. 73 // They are created even if -XX:-UsePerfData is set and in 74 // that case, they will be allocated on C heap. 75 76 _total_threads_count = 77 PerfDataManager::create_counter(JAVA_THREADS, "started", 78 PerfData::U_Events, CHECK); 79 80 _live_threads_count = 81 PerfDataManager::create_variable(JAVA_THREADS, "live", 82 PerfData::U_None, CHECK); 83 84 _peak_threads_count = 85 PerfDataManager::create_variable(JAVA_THREADS, "livePeak", 86 PerfData::U_None, CHECK); 87 88 _daemon_threads_count = 89 PerfDataManager::create_variable(JAVA_THREADS, "daemon", 90 PerfData::U_None, CHECK); 91 92 if (os::is_thread_cpu_time_supported()) { 93 _thread_cpu_time_enabled = true; 94 } 95 96 _thread_allocated_memory_enabled = true; // Always on, so enable it 97 } 98 99 void ThreadService::reset_peak_thread_count() { 100 // Acquire the lock to update the peak thread count 101 // to synchronize with thread addition and removal. 102 MutexLocker mu(Threads_lock); 103 _peak_threads_count->set_value(get_live_thread_count()); 104 } 105 106 static bool is_hidden_thread(JavaThread *thread) { 107 // hide VM internal or JVMTI agent threads 108 return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread(); 109 } 110 111 void ThreadService::add_thread(JavaThread* thread, bool daemon) { 112 assert(Threads_lock->owned_by_self(), "must have threads lock"); 113 114 // Do not count hidden threads 115 if (is_hidden_thread(thread)) { 116 return; 117 } 118 119 _total_threads_count->inc(); 120 _live_threads_count->inc(); 121 Atomic::inc(&_atomic_threads_count); 122 int count = _atomic_threads_count; 123 124 if (count > _peak_threads_count->get_value()) { 125 _peak_threads_count->set_value(count); 126 } 127 128 if (daemon) { 129 _daemon_threads_count->inc(); 130 Atomic::inc(&_atomic_daemon_threads_count); 131 } 132 } 133 134 void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) { 135 Atomic::dec(&_atomic_threads_count); 136 137 if (daemon) { 138 Atomic::dec(&_atomic_daemon_threads_count); 139 } 140 } 141 142 void ThreadService::remove_thread(JavaThread* thread, bool daemon) { 143 assert(Threads_lock->owned_by_self(), "must have threads lock"); 144 145 // Do not count hidden threads 146 if (is_hidden_thread(thread)) { 147 return; 148 } 149 150 assert(!thread->is_terminated(), "must not be terminated"); 151 if (!thread->is_exiting()) { 152 // JavaThread::exit() skipped calling current_thread_exiting() 153 decrement_thread_counts(thread, daemon); 154 } 155 156 int daemon_count = _atomic_daemon_threads_count; 157 int count = _atomic_threads_count; 158 159 // Counts are incremented at the same time, but atomic counts are 160 // decremented earlier than perf counts. 161 assert(_live_threads_count->get_value() > count, 162 "thread count mismatch %d : %d", 163 (int)_live_threads_count->get_value(), count); 164 165 _live_threads_count->dec(1); 166 if (daemon) { 167 assert(_daemon_threads_count->get_value() > daemon_count, 168 "thread count mismatch %d : %d", 169 (int)_daemon_threads_count->get_value(), daemon_count); 170 171 _daemon_threads_count->dec(1); 172 } 173 174 // Counts are incremented at the same time, but atomic counts are 175 // decremented earlier than perf counts. 176 assert(_daemon_threads_count->get_value() >= daemon_count, 177 "thread count mismatch %d : %d", 178 (int)_daemon_threads_count->get_value(), daemon_count); 179 assert(_live_threads_count->get_value() >= count, 180 "thread count mismatch %d : %d", 181 (int)_live_threads_count->get_value(), count); 182 assert(_live_threads_count->get_value() > 0 || 183 (_live_threads_count->get_value() == 0 && count == 0 && 184 _daemon_threads_count->get_value() == 0 && daemon_count == 0), 185 "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d", 186 (int)_live_threads_count->get_value(), count, 187 (int)_daemon_threads_count->get_value(), daemon_count); 188 assert(_daemon_threads_count->get_value() > 0 || 189 (_daemon_threads_count->get_value() == 0 && daemon_count == 0), 190 "thread counts should reach 0 at the same time, daemon %d,%d", 191 (int)_daemon_threads_count->get_value(), daemon_count); 192 } 193 194 void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) { 195 // Do not count hidden threads 196 if (is_hidden_thread(jt)) { 197 return; 198 } 199 200 assert(jt == JavaThread::current(), "Called by current thread"); 201 assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting"); 202 203 decrement_thread_counts(jt, daemon); 204 } 205 206 // FIXME: JVMTI should call this function 207 Handle ThreadService::get_current_contended_monitor(JavaThread* thread) { 208 assert(thread != NULL, "should be non-NULL"); 209 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 210 211 // This function can be called on a target JavaThread that is not 212 // the caller and we are not at a safepoint. So it is possible for 213 // the waiting or pending condition to be over/stale and for the 214 // first stage of async deflation to clear the object field in 215 // the ObjectMonitor. It is also possible for the object to be 216 // inflated again and to be associated with a completely different 217 // ObjectMonitor by the time this object reference is processed 218 // by the caller. 219 ObjectMonitor *wait_obj = thread->current_waiting_monitor(); 220 221 oop obj = NULL; 222 if (wait_obj != NULL) { 223 // thread is doing an Object.wait() call 224 obj = (oop) wait_obj->object(); 225 assert(AsyncDeflateIdleMonitors || obj != NULL, "Object.wait() should have an object"); 226 } else { 227 ObjectMonitor *enter_obj = thread->current_pending_monitor(); 228 if (enter_obj != NULL) { 229 // thread is trying to enter() an ObjectMonitor. 230 obj = (oop) enter_obj->object(); 231 assert(AsyncDeflateIdleMonitors || obj != NULL, "ObjectMonitor should have an associated object!"); 232 } 233 } 234 235 Handle h(Thread::current(), obj); 236 return h; 237 } 238 239 bool ThreadService::set_thread_monitoring_contention(bool flag) { 240 MutexLocker m(Management_lock); 241 242 bool prev = _thread_monitoring_contention_enabled; 243 _thread_monitoring_contention_enabled = flag; 244 245 return prev; 246 } 247 248 bool ThreadService::set_thread_cpu_time_enabled(bool flag) { 249 MutexLocker m(Management_lock); 250 251 bool prev = _thread_cpu_time_enabled; 252 _thread_cpu_time_enabled = flag; 253 254 return prev; 255 } 256 257 bool ThreadService::set_thread_allocated_memory_enabled(bool flag) { 258 MutexLocker m(Management_lock); 259 260 bool prev = _thread_allocated_memory_enabled; 261 _thread_allocated_memory_enabled = flag; 262 263 return prev; 264 } 265 266 // GC support 267 void ThreadService::oops_do(OopClosure* f) { 268 for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) { 269 dump->oops_do(f); 270 } 271 } 272 273 void ThreadService::metadata_do(void f(Metadata*)) { 274 for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) { 275 dump->metadata_do(f); 276 } 277 } 278 279 void ThreadService::add_thread_dump(ThreadDumpResult* dump) { 280 MutexLocker ml(Management_lock); 281 if (_threaddump_list == NULL) { 282 _threaddump_list = dump; 283 } else { 284 dump->set_next(_threaddump_list); 285 _threaddump_list = dump; 286 } 287 } 288 289 void ThreadService::remove_thread_dump(ThreadDumpResult* dump) { 290 MutexLocker ml(Management_lock); 291 292 ThreadDumpResult* prev = NULL; 293 bool found = false; 294 for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) { 295 if (d == dump) { 296 if (prev == NULL) { 297 _threaddump_list = dump->next(); 298 } else { 299 prev->set_next(dump->next()); 300 } 301 found = true; 302 break; 303 } 304 } 305 assert(found, "The threaddump result to be removed must exist."); 306 } 307 308 // Dump stack trace of threads specified in the given threads array. 309 // Returns StackTraceElement[][] each element is the stack trace of a thread in 310 // the corresponding entry in the given threads array 311 Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads, 312 int num_threads, 313 TRAPS) { 314 assert(num_threads > 0, "just checking"); 315 316 ThreadDumpResult dump_result; 317 VM_ThreadDump op(&dump_result, 318 threads, 319 num_threads, 320 -1, /* entire stack */ 321 false, /* with locked monitors */ 322 false /* with locked synchronizers */); 323 VMThread::execute(&op); 324 325 // Allocate the resulting StackTraceElement[][] object 326 327 ResourceMark rm(THREAD); 328 Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH); 329 ObjArrayKlass* ik = ObjArrayKlass::cast(k); 330 objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH); 331 objArrayHandle result_obj(THREAD, r); 332 333 int num_snapshots = dump_result.num_snapshots(); 334 assert(num_snapshots == num_threads, "Must have num_threads thread snapshots"); 335 assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot"); 336 int i = 0; 337 for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) { 338 ThreadStackTrace* stacktrace = ts->get_stack_trace(); 339 if (stacktrace == NULL) { 340 // No stack trace 341 result_obj->obj_at_put(i, NULL); 342 } else { 343 // Construct an array of java/lang/StackTraceElement object 344 Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH); 345 result_obj->obj_at_put(i, backtrace_h()); 346 } 347 } 348 349 return result_obj; 350 } 351 352 void ThreadService::reset_contention_count_stat(JavaThread* thread) { 353 ThreadStatistics* stat = thread->get_thread_stat(); 354 if (stat != NULL) { 355 stat->reset_count_stat(); 356 } 357 } 358 359 void ThreadService::reset_contention_time_stat(JavaThread* thread) { 360 ThreadStatistics* stat = thread->get_thread_stat(); 361 if (stat != NULL) { 362 stat->reset_time_stat(); 363 } 364 } 365 366 // Find deadlocks involving raw monitors, object monitors and concurrent locks 367 // if concurrent_locks is true. 368 DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) { 369 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 370 371 // This code was modified from the original Threads::find_deadlocks code. 372 int globalDfn = 0, thisDfn; 373 ObjectMonitor* waitingToLockMonitor = NULL; 374 JvmtiRawMonitor* waitingToLockRawMonitor = NULL; 375 oop waitingToLockBlocker = NULL; 376 bool blocked_on_monitor = false; 377 JavaThread *currentThread, *previousThread; 378 int num_deadlocks = 0; 379 380 // Initialize the depth-first-number for each JavaThread. 381 JavaThreadIterator jti(t_list); 382 for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { 383 jt->set_depth_first_number(-1); 384 } 385 386 DeadlockCycle* deadlocks = NULL; 387 DeadlockCycle* last = NULL; 388 DeadlockCycle* cycle = new DeadlockCycle(); 389 for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { 390 if (jt->depth_first_number() >= 0) { 391 // this thread was already visited 392 continue; 393 } 394 395 thisDfn = globalDfn; 396 jt->set_depth_first_number(globalDfn++); 397 previousThread = jt; 398 currentThread = jt; 399 400 cycle->reset(); 401 402 // The ObjectMonitor* can't be async deflated since we are at a safepoint. 403 // When there is a deadlock, all the monitors involved in the dependency 404 // cycle must be contended and heavyweight. So we only care about the 405 // heavyweight monitor a thread is waiting to lock. 406 waitingToLockMonitor = jt->current_pending_monitor(); 407 // JVM TI raw monitors can also be involved in deadlocks, and we can be 408 // waiting to lock both a raw monitor and ObjectMonitor at the same time. 409 // It isn't clear how to make deadlock detection work correctly if that 410 // happens. 411 waitingToLockRawMonitor = jt->current_pending_raw_monitor(); 412 413 if (concurrent_locks) { 414 waitingToLockBlocker = jt->current_park_blocker(); 415 } 416 417 while (waitingToLockMonitor != NULL || 418 waitingToLockRawMonitor != NULL || 419 waitingToLockBlocker != NULL) { 420 cycle->add_thread(currentThread); 421 // Give preference to the raw monitor 422 if (waitingToLockRawMonitor != NULL) { 423 Thread* owner = waitingToLockRawMonitor->owner(); 424 if (owner != NULL && // the raw monitor could be released at any time 425 owner->is_Java_thread()) { 426 // only JavaThreads can be reported here 427 currentThread = (JavaThread*) owner; 428 } 429 } else if (waitingToLockMonitor != NULL) { 430 address currentOwner = (address)waitingToLockMonitor->owner(); 431 if (currentOwner != NULL) { 432 currentThread = Threads::owning_thread_from_monitor_owner(t_list, 433 currentOwner); 434 if (currentThread == NULL) { 435 // This function is called at a safepoint so the JavaThread 436 // that owns waitingToLockMonitor should be findable, but 437 // if it is not findable, then the previous currentThread is 438 // blocked permanently. We record this as a deadlock. 439 num_deadlocks++; 440 441 cycle->set_deadlock(true); 442 443 // add this cycle to the deadlocks list 444 if (deadlocks == NULL) { 445 deadlocks = cycle; 446 } else { 447 last->set_next(cycle); 448 } 449 last = cycle; 450 cycle = new DeadlockCycle(); 451 break; 452 } 453 } 454 } else { 455 if (concurrent_locks) { 456 if (waitingToLockBlocker->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { 457 oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); 458 // This JavaThread (if there is one) is protected by the 459 // ThreadsListSetter in VM_FindDeadlocks::doit(). 460 currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL; 461 } else { 462 currentThread = NULL; 463 } 464 } 465 } 466 467 if (currentThread == NULL) { 468 // No dependency on another thread 469 break; 470 } 471 if (currentThread->depth_first_number() < 0) { 472 // First visit to this thread 473 currentThread->set_depth_first_number(globalDfn++); 474 } else if (currentThread->depth_first_number() < thisDfn) { 475 // Thread already visited, and not on a (new) cycle 476 break; 477 } else if (currentThread == previousThread) { 478 // Self-loop, ignore 479 break; 480 } else { 481 // We have a (new) cycle 482 num_deadlocks++; 483 484 cycle->set_deadlock(true); 485 486 // add this cycle to the deadlocks list 487 if (deadlocks == NULL) { 488 deadlocks = cycle; 489 } else { 490 last->set_next(cycle); 491 } 492 last = cycle; 493 cycle = new DeadlockCycle(); 494 break; 495 } 496 previousThread = currentThread; 497 waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor(); 498 if (concurrent_locks) { 499 waitingToLockBlocker = currentThread->current_park_blocker(); 500 } 501 } 502 503 } 504 delete cycle; 505 return deadlocks; 506 } 507 508 ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { 509 510 // Create a new ThreadDumpResult object and append to the list. 511 // If GC happens before this function returns, Method* 512 // in the stack trace will be visited. 513 ThreadService::add_thread_dump(this); 514 } 515 516 ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { 517 // Create a new ThreadDumpResult object and append to the list. 518 // If GC happens before this function returns, oops 519 // will be visited. 520 ThreadService::add_thread_dump(this); 521 } 522 523 ThreadDumpResult::~ThreadDumpResult() { 524 ThreadService::remove_thread_dump(this); 525 526 // free all the ThreadSnapshot objects created during 527 // the VM_ThreadDump operation 528 ThreadSnapshot* ts = _snapshots; 529 while (ts != NULL) { 530 ThreadSnapshot* p = ts; 531 ts = ts->next(); 532 delete p; 533 } 534 } 535 536 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() { 537 ThreadSnapshot* ts = new ThreadSnapshot(); 538 link_thread_snapshot(ts); 539 return ts; 540 } 541 542 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) { 543 // Note: it is very important that the ThreadSnapshot* gets linked before 544 // ThreadSnapshot::initialize gets called. This is to ensure that 545 // ThreadSnapshot::oops_do can get called prior to the field 546 // ThreadSnapshot::_threadObj being assigned a value (to prevent a dangling 547 // oop). 548 ThreadSnapshot* ts = new ThreadSnapshot(); 549 link_thread_snapshot(ts); 550 ts->initialize(t_list(), thread); 551 return ts; 552 } 553 554 void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) { 555 assert(_num_threads == 0 || _num_snapshots < _num_threads, 556 "_num_snapshots must be less than _num_threads"); 557 _num_snapshots++; 558 if (_snapshots == NULL) { 559 _snapshots = ts; 560 } else { 561 _last->set_next(ts); 562 } 563 _last = ts; 564 } 565 566 void ThreadDumpResult::oops_do(OopClosure* f) { 567 for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) { 568 ts->oops_do(f); 569 } 570 } 571 572 void ThreadDumpResult::metadata_do(void f(Metadata*)) { 573 for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) { 574 ts->metadata_do(f); 575 } 576 } 577 578 ThreadsList* ThreadDumpResult::t_list() { 579 return _setter.list(); 580 } 581 582 StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) { 583 _method = jvf->method(); 584 _bci = jvf->bci(); 585 _class_holder = _method->method_holder()->klass_holder(); 586 _locked_monitors = NULL; 587 if (with_lock_info) { 588 ResourceMark rm; 589 GrowableArray<MonitorInfo*>* list = jvf->locked_monitors(); 590 int length = list->length(); 591 if (length > 0) { 592 _locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(length, mtServiceability); 593 for (int i = 0; i < length; i++) { 594 MonitorInfo* monitor = list->at(i); 595 assert(monitor->owner() != NULL, "This monitor must have an owning object"); 596 _locked_monitors->append(monitor->owner()); 597 } 598 } 599 } 600 } 601 602 void StackFrameInfo::oops_do(OopClosure* f) { 603 if (_locked_monitors != NULL) { 604 int length = _locked_monitors->length(); 605 for (int i = 0; i < length; i++) { 606 f->do_oop((oop*) _locked_monitors->adr_at(i)); 607 } 608 } 609 f->do_oop(&_class_holder); 610 } 611 612 void StackFrameInfo::metadata_do(void f(Metadata*)) { 613 f(_method); 614 } 615 616 void StackFrameInfo::print_on(outputStream* st) const { 617 ResourceMark rm; 618 java_lang_Throwable::print_stack_element(st, method(), bci()); 619 int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0); 620 for (int i = 0; i < len; i++) { 621 oop o = _locked_monitors->at(i); 622 st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name()); 623 } 624 625 } 626 627 // Iterate through monitor cache to find JNI locked monitors 628 class InflatedMonitorsClosure: public MonitorClosure { 629 private: 630 ThreadStackTrace* _stack_trace; 631 Thread* _thread; 632 public: 633 InflatedMonitorsClosure(Thread* t, ThreadStackTrace* st) { 634 _thread = t; 635 _stack_trace = st; 636 } 637 void do_monitor(ObjectMonitor* mid) { 638 if (mid->owner() == _thread) { 639 oop object = (oop) mid->object(); 640 if (!_stack_trace->is_owned_monitor_on_stack(object)) { 641 _stack_trace->add_jni_locked_monitor(object); 642 } 643 } 644 } 645 }; 646 647 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) { 648 _thread = t; 649 _frames = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability); 650 _depth = 0; 651 _with_locked_monitors = with_locked_monitors; 652 if (_with_locked_monitors) { 653 _jni_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability); 654 } else { 655 _jni_locked_monitors = NULL; 656 } 657 } 658 659 ThreadStackTrace::~ThreadStackTrace() { 660 for (int i = 0; i < _frames->length(); i++) { 661 delete _frames->at(i); 662 } 663 delete _frames; 664 if (_jni_locked_monitors != NULL) { 665 delete _jni_locked_monitors; 666 } 667 } 668 669 void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) { 670 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 671 672 if (_thread->has_last_Java_frame()) { 673 RegisterMap reg_map(_thread); 674 vframe* start_vf = _thread->last_java_vframe(®_map); 675 int count = 0; 676 for (vframe* f = start_vf; f; f = f->sender() ) { 677 if (maxDepth >= 0 && count == maxDepth) { 678 // Skip frames if more than maxDepth 679 break; 680 } 681 if (f->is_java_frame()) { 682 javaVFrame* jvf = javaVFrame::cast(f); 683 add_stack_frame(jvf); 684 count++; 685 } else { 686 // Ignore non-Java frames 687 } 688 } 689 } 690 691 if (_with_locked_monitors) { 692 // Iterate inflated monitors and find monitors locked by this thread 693 // not found in the stack 694 InflatedMonitorsClosure imc(_thread, this); 695 ObjectSynchronizer::monitors_iterate(&imc); 696 } 697 } 698 699 700 bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) { 701 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 702 703 bool found = false; 704 int num_frames = get_stack_depth(); 705 for (int depth = 0; depth < num_frames; depth++) { 706 StackFrameInfo* frame = stack_frame_at(depth); 707 int len = frame->num_locked_monitors(); 708 GrowableArray<oop>* locked_monitors = frame->locked_monitors(); 709 for (int j = 0; j < len; j++) { 710 oop monitor = locked_monitors->at(j); 711 assert(monitor != NULL, "must be a Java object"); 712 if (monitor == object) { 713 found = true; 714 break; 715 } 716 } 717 } 718 return found; 719 } 720 721 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) { 722 InstanceKlass* ik = SystemDictionary::StackTraceElement_klass(); 723 assert(ik != NULL, "must be loaded in 1.4+"); 724 725 // Allocate an array of java/lang/StackTraceElement object 726 objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH); 727 objArrayHandle backtrace(THREAD, ste); 728 for (int j = 0; j < _depth; j++) { 729 StackFrameInfo* frame = _frames->at(j); 730 methodHandle mh(THREAD, frame->method()); 731 oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH); 732 backtrace->obj_at_put(j, element); 733 } 734 return backtrace; 735 } 736 737 void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) { 738 StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors); 739 _frames->append(frame); 740 _depth++; 741 } 742 743 void ThreadStackTrace::oops_do(OopClosure* f) { 744 int length = _frames->length(); 745 for (int i = 0; i < length; i++) { 746 _frames->at(i)->oops_do(f); 747 } 748 749 length = (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0); 750 for (int j = 0; j < length; j++) { 751 f->do_oop((oop*) _jni_locked_monitors->adr_at(j)); 752 } 753 } 754 755 void ThreadStackTrace::metadata_do(void f(Metadata*)) { 756 int length = _frames->length(); 757 for (int i = 0; i < length; i++) { 758 _frames->at(i)->metadata_do(f); 759 } 760 } 761 762 763 ConcurrentLocksDump::~ConcurrentLocksDump() { 764 if (_retain_map_on_free) { 765 return; 766 } 767 768 for (ThreadConcurrentLocks* t = _map; t != NULL;) { 769 ThreadConcurrentLocks* tcl = t; 770 t = t->next(); 771 delete tcl; 772 } 773 } 774 775 void ConcurrentLocksDump::dump_at_safepoint() { 776 // dump all locked concurrent locks 777 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 778 779 GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability); 780 781 // Find all instances of AbstractOwnableSynchronizer 782 HeapInspection::find_instances_at_safepoint(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(), 783 aos_objects); 784 // Build a map of thread to its owned AQS locks 785 build_map(aos_objects); 786 787 delete aos_objects; 788 } 789 790 791 // build a map of JavaThread to all its owned AbstractOwnableSynchronizer 792 void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) { 793 int length = aos_objects->length(); 794 for (int i = 0; i < length; i++) { 795 oop o = aos_objects->at(i); 796 oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o); 797 if (owner_thread_obj != NULL) { 798 // See comments in ThreadConcurrentLocks to see how this 799 // JavaThread* is protected. 800 JavaThread* thread = java_lang_Thread::thread(owner_thread_obj); 801 assert(o->is_instance(), "Must be an instanceOop"); 802 add_lock(thread, (instanceOop) o); 803 } 804 } 805 } 806 807 void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) { 808 ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread); 809 if (tcl != NULL) { 810 tcl->add_lock(o); 811 return; 812 } 813 814 // First owned lock found for this thread 815 tcl = new ThreadConcurrentLocks(thread); 816 tcl->add_lock(o); 817 if (_map == NULL) { 818 _map = tcl; 819 } else { 820 _last->set_next(tcl); 821 } 822 _last = tcl; 823 } 824 825 ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) { 826 for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) { 827 if (tcl->java_thread() == thread) { 828 return tcl; 829 } 830 } 831 return NULL; 832 } 833 834 void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) { 835 st->print_cr(" Locked ownable synchronizers:"); 836 ThreadConcurrentLocks* tcl = thread_concurrent_locks(t); 837 GrowableArray<instanceOop>* locks = (tcl != NULL ? tcl->owned_locks() : NULL); 838 if (locks == NULL || locks->is_empty()) { 839 st->print_cr("\t- None"); 840 st->cr(); 841 return; 842 } 843 844 for (int i = 0; i < locks->length(); i++) { 845 instanceOop obj = locks->at(i); 846 st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name()); 847 } 848 st->cr(); 849 } 850 851 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) { 852 _thread = thread; 853 _owned_locks = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<instanceOop>(INITIAL_ARRAY_SIZE, mtServiceability); 854 _next = NULL; 855 } 856 857 ThreadConcurrentLocks::~ThreadConcurrentLocks() { 858 delete _owned_locks; 859 } 860 861 void ThreadConcurrentLocks::add_lock(instanceOop o) { 862 _owned_locks->append(o); 863 } 864 865 void ThreadConcurrentLocks::oops_do(OopClosure* f) { 866 int length = _owned_locks->length(); 867 for (int i = 0; i < length; i++) { 868 f->do_oop((oop*) _owned_locks->adr_at(i)); 869 } 870 } 871 872 ThreadStatistics::ThreadStatistics() { 873 _contended_enter_count = 0; 874 _monitor_wait_count = 0; 875 _sleep_count = 0; 876 _count_pending_reset = false; 877 _timer_pending_reset = false; 878 memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts)); 879 } 880 881 void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) { 882 _thread = thread; 883 _threadObj = thread->threadObj(); 884 885 ThreadStatistics* stat = thread->get_thread_stat(); 886 _contended_enter_ticks = stat->contended_enter_ticks(); 887 _contended_enter_count = stat->contended_enter_count(); 888 _monitor_wait_ticks = stat->monitor_wait_ticks(); 889 _monitor_wait_count = stat->monitor_wait_count(); 890 _sleep_ticks = stat->sleep_ticks(); 891 _sleep_count = stat->sleep_count(); 892 893 _thread_status = java_lang_Thread::get_thread_status(_threadObj); 894 _is_ext_suspended = thread->is_being_ext_suspended(); 895 _is_in_native = (thread->thread_state() == _thread_in_native); 896 897 if (_thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER || 898 _thread_status == java_lang_Thread::IN_OBJECT_WAIT || 899 _thread_status == java_lang_Thread::IN_OBJECT_WAIT_TIMED) { 900 901 Handle obj = ThreadService::get_current_contended_monitor(thread); 902 if (obj() == NULL) { 903 // monitor no longer exists; thread is not blocked 904 _thread_status = java_lang_Thread::RUNNABLE; 905 } else { 906 _blocker_object = obj(); 907 JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj); 908 if ((owner == NULL && _thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER) 909 || (owner != NULL && owner->is_attaching_via_jni())) { 910 // ownership information of the monitor is not available 911 // (may no longer be owned or releasing to some other thread) 912 // make this thread in RUNNABLE state. 913 // And when the owner thread is in attaching state, the java thread 914 // is not completely initialized. For example thread name and id 915 // and may not be set, so hide the attaching thread. 916 _thread_status = java_lang_Thread::RUNNABLE; 917 _blocker_object = NULL; 918 } else if (owner != NULL) { 919 _blocker_object_owner = owner->threadObj(); 920 } 921 } 922 } 923 924 // Support for JSR-166 locks 925 if (_thread_status == java_lang_Thread::PARKED || _thread_status == java_lang_Thread::PARKED_TIMED) { 926 _blocker_object = thread->current_park_blocker(); 927 if (_blocker_object != NULL && _blocker_object->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { 928 _blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(_blocker_object); 929 } 930 } 931 } 932 933 ThreadSnapshot::~ThreadSnapshot() { 934 delete _stack_trace; 935 delete _concurrent_locks; 936 } 937 938 void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) { 939 _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors); 940 _stack_trace->dump_stack_at_safepoint(max_depth); 941 } 942 943 944 void ThreadSnapshot::oops_do(OopClosure* f) { 945 f->do_oop(&_threadObj); 946 f->do_oop(&_blocker_object); 947 f->do_oop(&_blocker_object_owner); 948 if (_stack_trace != NULL) { 949 _stack_trace->oops_do(f); 950 } 951 if (_concurrent_locks != NULL) { 952 _concurrent_locks->oops_do(f); 953 } 954 } 955 956 void ThreadSnapshot::metadata_do(void f(Metadata*)) { 957 if (_stack_trace != NULL) { 958 _stack_trace->metadata_do(f); 959 } 960 } 961 962 963 DeadlockCycle::DeadlockCycle() { 964 _is_deadlock = false; 965 _threads = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability); 966 _next = NULL; 967 } 968 969 DeadlockCycle::~DeadlockCycle() { 970 delete _threads; 971 } 972 973 void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const { 974 st->cr(); 975 st->print_cr("Found one Java-level deadlock:"); 976 st->print("============================="); 977 978 JavaThread* currentThread; 979 JvmtiRawMonitor* waitingToLockRawMonitor; 980 oop waitingToLockBlocker; 981 int len = _threads->length(); 982 for (int i = 0; i < len; i++) { 983 currentThread = _threads->at(i); 984 // The ObjectMonitor* can't be async deflated since we are at a safepoint. 985 ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor(); 986 waitingToLockRawMonitor = currentThread->current_pending_raw_monitor(); 987 waitingToLockBlocker = currentThread->current_park_blocker(); 988 st->cr(); 989 st->print_cr("\"%s\":", currentThread->get_thread_name()); 990 const char* owner_desc = ",\n which is held by"; 991 992 // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor 993 // sets the current pending monitor, it is possible to then see a pending raw monitor as well. 994 if (waitingToLockRawMonitor != NULL) { 995 st->print(" waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor)); 996 Thread* owner = waitingToLockRawMonitor->owner(); 997 // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread 998 if (owner != NULL) { 999 if (owner->is_Java_thread()) { 1000 currentThread = (JavaThread*) owner; 1001 st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); 1002 } else { 1003 st->print_cr(",\n which has now been released"); 1004 } 1005 } else { 1006 st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner)); 1007 } 1008 } 1009 1010 if (waitingToLockMonitor != NULL) { 1011 st->print(" waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor)); 1012 oop obj = (oop)waitingToLockMonitor->object(); 1013 st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj), 1014 obj->klass()->external_name()); 1015 1016 if (!currentThread->current_pending_monitor_is_from_java()) { 1017 owner_desc = "\n in JNI, which is held by"; 1018 } 1019 currentThread = Threads::owning_thread_from_monitor_owner(t_list, 1020 (address)waitingToLockMonitor->owner()); 1021 if (currentThread == NULL) { 1022 // The deadlock was detected at a safepoint so the JavaThread 1023 // that owns waitingToLockMonitor should be findable, but 1024 // if it is not findable, then the previous currentThread is 1025 // blocked permanently. 1026 st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc, 1027 p2i(waitingToLockMonitor->owner())); 1028 continue; 1029 } 1030 } else { 1031 st->print(" waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)", 1032 p2i(waitingToLockBlocker), 1033 waitingToLockBlocker->klass()->external_name()); 1034 assert(waitingToLockBlocker->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()), 1035 "Must be an AbstractOwnableSynchronizer"); 1036 oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); 1037 currentThread = java_lang_Thread::thread(ownerObj); 1038 assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL"); 1039 } 1040 st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); 1041 } 1042 1043 st->cr(); 1044 1045 // Print stack traces 1046 bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace; 1047 JavaMonitorsInStackTrace = true; 1048 st->print_cr("Java stack information for the threads listed above:"); 1049 st->print_cr("==================================================="); 1050 for (int j = 0; j < len; j++) { 1051 currentThread = _threads->at(j); 1052 st->print_cr("\"%s\":", currentThread->get_thread_name()); 1053 currentThread->print_stack_on(st); 1054 } 1055 JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace; 1056 } 1057 1058 ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread, 1059 bool include_jvmti_agent_threads, 1060 bool include_jni_attaching_threads) { 1061 assert(cur_thread == Thread::current(), "Check current thread"); 1062 1063 int init_size = ThreadService::get_live_thread_count(); 1064 _threads_array = new GrowableArray<instanceHandle>(init_size); 1065 1066 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1067 // skips JavaThreads in the process of exiting 1068 // and also skips VM internal JavaThreads 1069 // Threads in _thread_new or _thread_new_trans state are included. 1070 // i.e. threads have been started but not yet running. 1071 if (jt->threadObj() == NULL || 1072 jt->is_exiting() || 1073 !java_lang_Thread::is_alive(jt->threadObj()) || 1074 jt->is_hidden_from_external_view()) { 1075 continue; 1076 } 1077 1078 // skip agent threads 1079 if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) { 1080 continue; 1081 } 1082 1083 // skip jni threads in the process of attaching 1084 if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) { 1085 continue; 1086 } 1087 1088 instanceHandle h(cur_thread, (instanceOop) jt->threadObj()); 1089 _threads_array->append(h); 1090 } 1091 }