1 /* 2 * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "memory/allocation.hpp" 28 #include "memory/heapInspection.hpp" 29 #include "memory/oopFactory.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "memory/universe.hpp" 32 #include "oops/instanceKlass.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/objArrayOop.inline.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiRawMonitor.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/init.hpp" 40 #include "runtime/objectMonitor.inline.hpp" 41 #include "runtime/thread.inline.hpp" 42 #include "runtime/threadSMR.inline.hpp" 43 #include "runtime/vframe.hpp" 44 #include "runtime/vmThread.hpp" 45 #include "runtime/vmOperations.hpp" 46 #include "services/threadService.hpp" 47 48 // TODO: we need to define a naming convention for perf counters 49 // to distinguish counters for: 50 // - standard JSR174 use 51 // - Hotspot extension (public and committed) 52 // - Hotspot extension (private/internal and uncommitted) 53 54 // Default is disabled. 55 bool ThreadService::_thread_monitoring_contention_enabled = false; 56 bool ThreadService::_thread_cpu_time_enabled = false; 57 bool ThreadService::_thread_allocated_memory_enabled = false; 58 59 PerfCounter* ThreadService::_total_threads_count = NULL; 60 PerfVariable* ThreadService::_live_threads_count = NULL; 61 PerfVariable* ThreadService::_peak_threads_count = NULL; 62 PerfVariable* ThreadService::_daemon_threads_count = NULL; 63 volatile int ThreadService::_atomic_threads_count = 0; 64 volatile int ThreadService::_atomic_daemon_threads_count = 0; 65 66 ThreadDumpResult* ThreadService::_threaddump_list = NULL; 67 68 static const int INITIAL_ARRAY_SIZE = 10; 69 70 void ThreadService::init() { 71 EXCEPTION_MARK; 72 73 // These counters are for java.lang.management API support. 74 // They are created even if -XX:-UsePerfData is set and in 75 // that case, they will be allocated on C heap. 76 77 _total_threads_count = 78 PerfDataManager::create_counter(JAVA_THREADS, "started", 79 PerfData::U_Events, CHECK); 80 81 _live_threads_count = 82 PerfDataManager::create_variable(JAVA_THREADS, "live", 83 PerfData::U_None, CHECK); 84 85 _peak_threads_count = 86 PerfDataManager::create_variable(JAVA_THREADS, "livePeak", 87 PerfData::U_None, CHECK); 88 89 _daemon_threads_count = 90 PerfDataManager::create_variable(JAVA_THREADS, "daemon", 91 PerfData::U_None, CHECK); 92 93 if (os::is_thread_cpu_time_supported()) { 94 _thread_cpu_time_enabled = true; 95 } 96 97 _thread_allocated_memory_enabled = true; // Always on, so enable it 98 } 99 100 void ThreadService::reset_peak_thread_count() { 101 // Acquire the lock to update the peak thread count 102 // to synchronize with thread addition and removal. 103 MutexLocker mu(Threads_lock); 104 _peak_threads_count->set_value(get_live_thread_count()); 105 } 106 107 static bool is_hidden_thread(JavaThread *thread) { 108 // hide VM internal or JVMTI agent threads 109 return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread(); 110 } 111 112 void ThreadService::add_thread(JavaThread* thread, bool daemon) { 113 assert(Threads_lock->owned_by_self(), "must have threads lock"); 114 115 // Do not count hidden threads 116 if (is_hidden_thread(thread)) { 117 return; 118 } 119 120 _total_threads_count->inc(); 121 _live_threads_count->inc(); 122 Atomic::inc(&_atomic_threads_count); 123 int count = _atomic_threads_count; 124 125 if (count > _peak_threads_count->get_value()) { 126 _peak_threads_count->set_value(count); 127 } 128 129 if (daemon) { 130 _daemon_threads_count->inc(); 131 Atomic::inc(&_atomic_daemon_threads_count); 132 } 133 } 134 135 void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) { 136 Atomic::dec(&_atomic_threads_count); 137 138 if (daemon) { 139 Atomic::dec(&_atomic_daemon_threads_count); 140 } 141 } 142 143 void ThreadService::remove_thread(JavaThread* thread, bool daemon) { 144 assert(Threads_lock->owned_by_self(), "must have threads lock"); 145 146 // Do not count hidden threads 147 if (is_hidden_thread(thread)) { 148 return; 149 } 150 151 assert(!thread->is_terminated(), "must not be terminated"); 152 if (!thread->is_exiting()) { 153 // JavaThread::exit() skipped calling current_thread_exiting() 154 decrement_thread_counts(thread, daemon); 155 } 156 157 int daemon_count = _atomic_daemon_threads_count; 158 int count = _atomic_threads_count; 159 160 // Counts are incremented at the same time, but atomic counts are 161 // decremented earlier than perf counts. 162 assert(_live_threads_count->get_value() > count, 163 "thread count mismatch %d : %d", 164 (int)_live_threads_count->get_value(), count); 165 166 _live_threads_count->dec(1); 167 if (daemon) { 168 assert(_daemon_threads_count->get_value() > daemon_count, 169 "thread count mismatch %d : %d", 170 (int)_daemon_threads_count->get_value(), daemon_count); 171 172 _daemon_threads_count->dec(1); 173 } 174 175 // Counts are incremented at the same time, but atomic counts are 176 // decremented earlier than perf counts. 177 assert(_daemon_threads_count->get_value() >= daemon_count, 178 "thread count mismatch %d : %d", 179 (int)_daemon_threads_count->get_value(), daemon_count); 180 assert(_live_threads_count->get_value() >= count, 181 "thread count mismatch %d : %d", 182 (int)_live_threads_count->get_value(), count); 183 assert(_live_threads_count->get_value() > 0 || 184 (_live_threads_count->get_value() == 0 && count == 0 && 185 _daemon_threads_count->get_value() == 0 && daemon_count == 0), 186 "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d", 187 (int)_live_threads_count->get_value(), count, 188 (int)_daemon_threads_count->get_value(), daemon_count); 189 assert(_daemon_threads_count->get_value() > 0 || 190 (_daemon_threads_count->get_value() == 0 && daemon_count == 0), 191 "thread counts should reach 0 at the same time, daemon %d,%d", 192 (int)_daemon_threads_count->get_value(), daemon_count); 193 } 194 195 void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) { 196 // Do not count hidden threads 197 if (is_hidden_thread(jt)) { 198 return; 199 } 200 201 assert(jt == JavaThread::current(), "Called by current thread"); 202 assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting"); 203 204 decrement_thread_counts(jt, daemon); 205 } 206 207 // FIXME: JVMTI should call this function 208 Handle ThreadService::get_current_contended_monitor(JavaThread* thread) { 209 assert(thread != NULL, "should be non-NULL"); 210 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 211 212 // This function can be called on a target JavaThread that is not 213 // the caller and we are not at a safepoint. So it is possible for 214 // the waiting or pending condition to be over/stale and for the 215 // first stage of async deflation to clear the object field in 216 // the ObjectMonitor. It is also possible for the object to be 217 // inflated again and to be associated with a completely different 218 // ObjectMonitor by the time this object reference is processed 219 // by the caller. 220 ObjectMonitor *wait_obj = thread->current_waiting_monitor(); 221 222 oop obj = NULL; 223 if (wait_obj != NULL) { 224 // thread is doing an Object.wait() call 225 obj = (oop) wait_obj->object(); 226 } else { 227 ObjectMonitor *enter_obj = thread->current_pending_monitor(); 228 if (enter_obj != NULL) { 229 // thread is trying to enter() an ObjectMonitor. 230 obj = (oop) enter_obj->object(); 231 } 232 } 233 234 Handle h(Thread::current(), obj); 235 return h; 236 } 237 238 bool ThreadService::set_thread_monitoring_contention(bool flag) { 239 MutexLocker m(Management_lock); 240 241 bool prev = _thread_monitoring_contention_enabled; 242 _thread_monitoring_contention_enabled = flag; 243 244 return prev; 245 } 246 247 bool ThreadService::set_thread_cpu_time_enabled(bool flag) { 248 MutexLocker m(Management_lock); 249 250 bool prev = _thread_cpu_time_enabled; 251 _thread_cpu_time_enabled = flag; 252 253 return prev; 254 } 255 256 bool ThreadService::set_thread_allocated_memory_enabled(bool flag) { 257 MutexLocker m(Management_lock); 258 259 bool prev = _thread_allocated_memory_enabled; 260 _thread_allocated_memory_enabled = flag; 261 262 return prev; 263 } 264 265 void ThreadService::metadata_do(void f(Metadata*)) { 266 for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) { 267 dump->metadata_do(f); 268 } 269 } 270 271 void ThreadService::add_thread_dump(ThreadDumpResult* dump) { 272 MutexLocker ml(Management_lock); 273 if (_threaddump_list == NULL) { 274 _threaddump_list = dump; 275 } else { 276 dump->set_next(_threaddump_list); 277 _threaddump_list = dump; 278 } 279 } 280 281 void ThreadService::remove_thread_dump(ThreadDumpResult* dump) { 282 MutexLocker ml(Management_lock); 283 284 ThreadDumpResult* prev = NULL; 285 bool found = false; 286 for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) { 287 if (d == dump) { 288 if (prev == NULL) { 289 _threaddump_list = dump->next(); 290 } else { 291 prev->set_next(dump->next()); 292 } 293 found = true; 294 break; 295 } 296 } 297 assert(found, "The threaddump result to be removed must exist."); 298 } 299 300 // Dump stack trace of threads specified in the given threads array. 301 // Returns StackTraceElement[][] each element is the stack trace of a thread in 302 // the corresponding entry in the given threads array 303 Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads, 304 int num_threads, 305 TRAPS) { 306 assert(num_threads > 0, "just checking"); 307 308 ThreadDumpResult dump_result; 309 VM_ThreadDump op(&dump_result, 310 threads, 311 num_threads, 312 -1, /* entire stack */ 313 false, /* with locked monitors */ 314 false /* with locked synchronizers */); 315 VMThread::execute(&op); 316 317 // Allocate the resulting StackTraceElement[][] object 318 319 ResourceMark rm(THREAD); 320 Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH); 321 ObjArrayKlass* ik = ObjArrayKlass::cast(k); 322 objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH); 323 objArrayHandle result_obj(THREAD, r); 324 325 int num_snapshots = dump_result.num_snapshots(); 326 assert(num_snapshots == num_threads, "Must have num_threads thread snapshots"); 327 assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot"); 328 int i = 0; 329 for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) { 330 ThreadStackTrace* stacktrace = ts->get_stack_trace(); 331 if (stacktrace == NULL) { 332 // No stack trace 333 result_obj->obj_at_put(i, NULL); 334 } else { 335 // Construct an array of java/lang/StackTraceElement object 336 Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH); 337 result_obj->obj_at_put(i, backtrace_h()); 338 } 339 } 340 341 return result_obj; 342 } 343 344 void ThreadService::reset_contention_count_stat(JavaThread* thread) { 345 ThreadStatistics* stat = thread->get_thread_stat(); 346 if (stat != NULL) { 347 stat->reset_count_stat(); 348 } 349 } 350 351 void ThreadService::reset_contention_time_stat(JavaThread* thread) { 352 ThreadStatistics* stat = thread->get_thread_stat(); 353 if (stat != NULL) { 354 stat->reset_time_stat(); 355 } 356 } 357 358 // Find deadlocks involving raw monitors, object monitors and concurrent locks 359 // if concurrent_locks is true. 360 DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) { 361 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 362 363 // This code was modified from the original Threads::find_deadlocks code. 364 int globalDfn = 0, thisDfn; 365 ObjectMonitor* waitingToLockMonitor = NULL; 366 JvmtiRawMonitor* waitingToLockRawMonitor = NULL; 367 oop waitingToLockBlocker = NULL; 368 bool blocked_on_monitor = false; 369 JavaThread *currentThread, *previousThread; 370 int num_deadlocks = 0; 371 372 // Initialize the depth-first-number for each JavaThread. 373 JavaThreadIterator jti(t_list); 374 for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { 375 jt->set_depth_first_number(-1); 376 } 377 378 DeadlockCycle* deadlocks = NULL; 379 DeadlockCycle* last = NULL; 380 DeadlockCycle* cycle = new DeadlockCycle(); 381 for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { 382 if (jt->depth_first_number() >= 0) { 383 // this thread was already visited 384 continue; 385 } 386 387 thisDfn = globalDfn; 388 jt->set_depth_first_number(globalDfn++); 389 previousThread = jt; 390 currentThread = jt; 391 392 cycle->reset(); 393 394 // The ObjectMonitor* can't be async deflated since we are at a safepoint. 395 // When there is a deadlock, all the monitors involved in the dependency 396 // cycle must be contended and heavyweight. So we only care about the 397 // heavyweight monitor a thread is waiting to lock. 398 waitingToLockMonitor = jt->current_pending_monitor(); 399 // JVM TI raw monitors can also be involved in deadlocks, and we can be 400 // waiting to lock both a raw monitor and ObjectMonitor at the same time. 401 // It isn't clear how to make deadlock detection work correctly if that 402 // happens. 403 waitingToLockRawMonitor = jt->current_pending_raw_monitor(); 404 405 if (concurrent_locks) { 406 waitingToLockBlocker = jt->current_park_blocker(); 407 } 408 409 while (waitingToLockMonitor != NULL || 410 waitingToLockRawMonitor != NULL || 411 waitingToLockBlocker != NULL) { 412 cycle->add_thread(currentThread); 413 // Give preference to the raw monitor 414 if (waitingToLockRawMonitor != NULL) { 415 Thread* owner = waitingToLockRawMonitor->owner(); 416 if (owner != NULL && // the raw monitor could be released at any time 417 owner->is_Java_thread()) { 418 // only JavaThreads can be reported here 419 currentThread = (JavaThread*) owner; 420 } 421 } else if (waitingToLockMonitor != NULL) { 422 address currentOwner = (address)waitingToLockMonitor->owner(); 423 if (currentOwner != NULL) { 424 currentThread = Threads::owning_thread_from_monitor_owner(t_list, 425 currentOwner); 426 if (currentThread == NULL) { 427 // This function is called at a safepoint so the JavaThread 428 // that owns waitingToLockMonitor should be findable, but 429 // if it is not findable, then the previous currentThread is 430 // blocked permanently. We record this as a deadlock. 431 num_deadlocks++; 432 433 cycle->set_deadlock(true); 434 435 // add this cycle to the deadlocks list 436 if (deadlocks == NULL) { 437 deadlocks = cycle; 438 } else { 439 last->set_next(cycle); 440 } 441 last = cycle; 442 cycle = new DeadlockCycle(); 443 break; 444 } 445 } 446 } else { 447 if (concurrent_locks) { 448 if (waitingToLockBlocker->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { 449 oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); 450 // This JavaThread (if there is one) is protected by the 451 // ThreadsListSetter in VM_FindDeadlocks::doit(). 452 currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL; 453 } else { 454 currentThread = NULL; 455 } 456 } 457 } 458 459 if (currentThread == NULL) { 460 // No dependency on another thread 461 break; 462 } 463 if (currentThread->depth_first_number() < 0) { 464 // First visit to this thread 465 currentThread->set_depth_first_number(globalDfn++); 466 } else if (currentThread->depth_first_number() < thisDfn) { 467 // Thread already visited, and not on a (new) cycle 468 break; 469 } else if (currentThread == previousThread) { 470 // Self-loop, ignore 471 break; 472 } else { 473 // We have a (new) cycle 474 num_deadlocks++; 475 476 cycle->set_deadlock(true); 477 478 // add this cycle to the deadlocks list 479 if (deadlocks == NULL) { 480 deadlocks = cycle; 481 } else { 482 last->set_next(cycle); 483 } 484 last = cycle; 485 cycle = new DeadlockCycle(); 486 break; 487 } 488 previousThread = currentThread; 489 waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor(); 490 if (concurrent_locks) { 491 waitingToLockBlocker = currentThread->current_park_blocker(); 492 } 493 } 494 495 } 496 delete cycle; 497 return deadlocks; 498 } 499 500 ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { 501 502 // Create a new ThreadDumpResult object and append to the list. 503 // If GC happens before this function returns, Method* 504 // in the stack trace will be visited. 505 ThreadService::add_thread_dump(this); 506 } 507 508 ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { 509 // Create a new ThreadDumpResult object and append to the list. 510 // If GC happens before this function returns, oops 511 // will be visited. 512 ThreadService::add_thread_dump(this); 513 } 514 515 ThreadDumpResult::~ThreadDumpResult() { 516 ThreadService::remove_thread_dump(this); 517 518 // free all the ThreadSnapshot objects created during 519 // the VM_ThreadDump operation 520 ThreadSnapshot* ts = _snapshots; 521 while (ts != NULL) { 522 ThreadSnapshot* p = ts; 523 ts = ts->next(); 524 delete p; 525 } 526 } 527 528 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() { 529 ThreadSnapshot* ts = new ThreadSnapshot(); 530 link_thread_snapshot(ts); 531 return ts; 532 } 533 534 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) { 535 ThreadSnapshot* ts = new ThreadSnapshot(); 536 link_thread_snapshot(ts); 537 ts->initialize(t_list(), thread); 538 return ts; 539 } 540 541 void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) { 542 assert(_num_threads == 0 || _num_snapshots < _num_threads, 543 "_num_snapshots must be less than _num_threads"); 544 _num_snapshots++; 545 if (_snapshots == NULL) { 546 _snapshots = ts; 547 } else { 548 _last->set_next(ts); 549 } 550 _last = ts; 551 } 552 553 void ThreadDumpResult::metadata_do(void f(Metadata*)) { 554 for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) { 555 ts->metadata_do(f); 556 } 557 } 558 559 ThreadsList* ThreadDumpResult::t_list() { 560 return _setter.list(); 561 } 562 563 StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) { 564 _method = jvf->method(); 565 _bci = jvf->bci(); 566 _class_holder = OopHandle(Universe::vm_global(), _method->method_holder()->klass_holder()); 567 _locked_monitors = NULL; 568 if (with_lock_info) { 569 ResourceMark rm; 570 GrowableArray<MonitorInfo*>* list = jvf->locked_monitors(); 571 int length = list->length(); 572 if (length > 0) { 573 _locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(length, mtServiceability); 574 for (int i = 0; i < length; i++) { 575 MonitorInfo* monitor = list->at(i); 576 assert(monitor->owner() != NULL, "This monitor must have an owning object"); 577 _locked_monitors->append(OopHandle(Universe::vm_global(), monitor->owner())); 578 } 579 } 580 } 581 } 582 583 StackFrameInfo::~StackFrameInfo() { 584 if (_locked_monitors != NULL) { 585 for (int i = 0; i < _locked_monitors->length(); i++) { 586 _locked_monitors->at(i).release(Universe::vm_global()); 587 } 588 delete _locked_monitors; 589 } 590 _class_holder.release(Universe::vm_global()); 591 } 592 593 void StackFrameInfo::metadata_do(void f(Metadata*)) { 594 f(_method); 595 } 596 597 void StackFrameInfo::print_on(outputStream* st) const { 598 ResourceMark rm; 599 java_lang_Throwable::print_stack_element(st, method(), bci()); 600 int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0); 601 for (int i = 0; i < len; i++) { 602 oop o = _locked_monitors->at(i).resolve(); 603 st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name()); 604 } 605 606 } 607 608 // Iterate through monitor cache to find JNI locked monitors 609 class InflatedMonitorsClosure: public MonitorClosure { 610 private: 611 ThreadStackTrace* _stack_trace; 612 Thread* _thread; 613 public: 614 InflatedMonitorsClosure(Thread* t, ThreadStackTrace* st) { 615 _thread = t; 616 _stack_trace = st; 617 } 618 void do_monitor(ObjectMonitor* mid) { 619 if (mid->owner() == _thread) { 620 oop object = (oop) mid->object(); 621 if (!_stack_trace->is_owned_monitor_on_stack(object)) { 622 _stack_trace->add_jni_locked_monitor(object); 623 } 624 } 625 } 626 }; 627 628 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) { 629 _thread = t; 630 _frames = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability); 631 _depth = 0; 632 _with_locked_monitors = with_locked_monitors; 633 if (_with_locked_monitors) { 634 _jni_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability); 635 } else { 636 _jni_locked_monitors = NULL; 637 } 638 } 639 640 void ThreadStackTrace::add_jni_locked_monitor(oop object) { 641 _jni_locked_monitors->append(OopHandle(Universe::vm_global(), object)); 642 } 643 644 ThreadStackTrace::~ThreadStackTrace() { 645 for (int i = 0; i < _frames->length(); i++) { 646 delete _frames->at(i); 647 } 648 delete _frames; 649 if (_jni_locked_monitors != NULL) { 650 for (int i = 0; i < _jni_locked_monitors->length(); i++) { 651 _jni_locked_monitors->at(i).release(Universe::vm_global()); 652 } 653 delete _jni_locked_monitors; 654 } 655 } 656 657 void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) { 658 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 659 660 if (_thread->has_last_Java_frame()) { 661 RegisterMap reg_map(_thread); 662 vframe* start_vf = _thread->last_java_vframe(®_map); 663 int count = 0; 664 for (vframe* f = start_vf; f; f = f->sender() ) { 665 if (maxDepth >= 0 && count == maxDepth) { 666 // Skip frames if more than maxDepth 667 break; 668 } 669 if (f->is_java_frame()) { 670 javaVFrame* jvf = javaVFrame::cast(f); 671 add_stack_frame(jvf); 672 count++; 673 } else { 674 // Ignore non-Java frames 675 } 676 } 677 } 678 679 if (_with_locked_monitors) { 680 // Iterate inflated monitors and find monitors locked by this thread 681 // not found in the stack 682 InflatedMonitorsClosure imc(_thread, this); 683 ObjectSynchronizer::monitors_iterate(&imc); 684 } 685 } 686 687 688 bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) { 689 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 690 691 bool found = false; 692 int num_frames = get_stack_depth(); 693 for (int depth = 0; depth < num_frames; depth++) { 694 StackFrameInfo* frame = stack_frame_at(depth); 695 int len = frame->num_locked_monitors(); 696 GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors(); 697 for (int j = 0; j < len; j++) { 698 oop monitor = locked_monitors->at(j).resolve(); 699 assert(monitor != NULL, "must be a Java object"); 700 if (monitor == object) { 701 found = true; 702 break; 703 } 704 } 705 } 706 return found; 707 } 708 709 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) { 710 InstanceKlass* ik = SystemDictionary::StackTraceElement_klass(); 711 assert(ik != NULL, "must be loaded in 1.4+"); 712 713 // Allocate an array of java/lang/StackTraceElement object 714 objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH); 715 objArrayHandle backtrace(THREAD, ste); 716 for (int j = 0; j < _depth; j++) { 717 StackFrameInfo* frame = _frames->at(j); 718 methodHandle mh(THREAD, frame->method()); 719 oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH); 720 backtrace->obj_at_put(j, element); 721 } 722 return backtrace; 723 } 724 725 void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) { 726 StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors); 727 _frames->append(frame); 728 _depth++; 729 } 730 731 void ThreadStackTrace::metadata_do(void f(Metadata*)) { 732 int length = _frames->length(); 733 for (int i = 0; i < length; i++) { 734 _frames->at(i)->metadata_do(f); 735 } 736 } 737 738 739 ConcurrentLocksDump::~ConcurrentLocksDump() { 740 if (_retain_map_on_free) { 741 return; 742 } 743 744 for (ThreadConcurrentLocks* t = _map; t != NULL;) { 745 ThreadConcurrentLocks* tcl = t; 746 t = t->next(); 747 delete tcl; 748 } 749 } 750 751 void ConcurrentLocksDump::dump_at_safepoint() { 752 // dump all locked concurrent locks 753 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 754 755 GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability); 756 757 // Find all instances of AbstractOwnableSynchronizer 758 HeapInspection::find_instances_at_safepoint(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(), 759 aos_objects); 760 // Build a map of thread to its owned AQS locks 761 build_map(aos_objects); 762 763 delete aos_objects; 764 } 765 766 767 // build a map of JavaThread to all its owned AbstractOwnableSynchronizer 768 void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) { 769 int length = aos_objects->length(); 770 for (int i = 0; i < length; i++) { 771 oop o = aos_objects->at(i); 772 oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o); 773 if (owner_thread_obj != NULL) { 774 // See comments in ThreadConcurrentLocks to see how this 775 // JavaThread* is protected. 776 JavaThread* thread = java_lang_Thread::thread(owner_thread_obj); 777 assert(o->is_instance(), "Must be an instanceOop"); 778 add_lock(thread, (instanceOop) o); 779 } 780 } 781 } 782 783 void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) { 784 ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread); 785 if (tcl != NULL) { 786 tcl->add_lock(o); 787 return; 788 } 789 790 // First owned lock found for this thread 791 tcl = new ThreadConcurrentLocks(thread); 792 tcl->add_lock(o); 793 if (_map == NULL) { 794 _map = tcl; 795 } else { 796 _last->set_next(tcl); 797 } 798 _last = tcl; 799 } 800 801 ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) { 802 for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) { 803 if (tcl->java_thread() == thread) { 804 return tcl; 805 } 806 } 807 return NULL; 808 } 809 810 void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) { 811 st->print_cr(" Locked ownable synchronizers:"); 812 ThreadConcurrentLocks* tcl = thread_concurrent_locks(t); 813 GrowableArray<OopHandle>* locks = (tcl != NULL ? tcl->owned_locks() : NULL); 814 if (locks == NULL || locks->is_empty()) { 815 st->print_cr("\t- None"); 816 st->cr(); 817 return; 818 } 819 820 for (int i = 0; i < locks->length(); i++) { 821 oop obj = locks->at(i).resolve(); 822 st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name()); 823 } 824 st->cr(); 825 } 826 827 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) { 828 _thread = thread; 829 _owned_locks = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability); 830 _next = NULL; 831 } 832 833 ThreadConcurrentLocks::~ThreadConcurrentLocks() { 834 for (int i = 0; i < _owned_locks->length(); i++) { 835 _owned_locks->at(i).release(Universe::vm_global()); 836 } 837 delete _owned_locks; 838 } 839 840 void ThreadConcurrentLocks::add_lock(instanceOop o) { 841 _owned_locks->append(OopHandle(Universe::vm_global(), o)); 842 } 843 844 ThreadStatistics::ThreadStatistics() { 845 _contended_enter_count = 0; 846 _monitor_wait_count = 0; 847 _sleep_count = 0; 848 _count_pending_reset = false; 849 _timer_pending_reset = false; 850 memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts)); 851 } 852 853 oop ThreadSnapshot::threadObj() const { return _threadObj.resolve(); } 854 855 void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) { 856 _thread = thread; 857 oop threadObj = thread->threadObj(); 858 _threadObj = OopHandle(Universe::vm_global(), threadObj); 859 860 ThreadStatistics* stat = thread->get_thread_stat(); 861 _contended_enter_ticks = stat->contended_enter_ticks(); 862 _contended_enter_count = stat->contended_enter_count(); 863 _monitor_wait_ticks = stat->monitor_wait_ticks(); 864 _monitor_wait_count = stat->monitor_wait_count(); 865 _sleep_ticks = stat->sleep_ticks(); 866 _sleep_count = stat->sleep_count(); 867 868 _thread_status = java_lang_Thread::get_thread_status(threadObj); 869 _is_ext_suspended = thread->is_being_ext_suspended(); 870 _is_in_native = (thread->thread_state() == _thread_in_native); 871 872 oop blocker_object = NULL; 873 oop blocker_object_owner = NULL; 874 875 if (_thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER || 876 _thread_status == java_lang_Thread::IN_OBJECT_WAIT || 877 _thread_status == java_lang_Thread::IN_OBJECT_WAIT_TIMED) { 878 879 Handle obj = ThreadService::get_current_contended_monitor(thread); 880 if (obj() == NULL) { 881 // monitor no longer exists; thread is not blocked 882 _thread_status = java_lang_Thread::RUNNABLE; 883 } else { 884 blocker_object = obj(); 885 JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj); 886 if ((owner == NULL && _thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER) 887 || (owner != NULL && owner->is_attaching_via_jni())) { 888 // ownership information of the monitor is not available 889 // (may no longer be owned or releasing to some other thread) 890 // make this thread in RUNNABLE state. 891 // And when the owner thread is in attaching state, the java thread 892 // is not completely initialized. For example thread name and id 893 // and may not be set, so hide the attaching thread. 894 _thread_status = java_lang_Thread::RUNNABLE; 895 blocker_object = NULL; 896 } else if (owner != NULL) { 897 blocker_object_owner = owner->threadObj(); 898 } 899 } 900 } 901 902 // Support for JSR-166 locks 903 if (_thread_status == java_lang_Thread::PARKED || _thread_status == java_lang_Thread::PARKED_TIMED) { 904 blocker_object = thread->current_park_blocker(); 905 if (blocker_object != NULL && blocker_object->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { 906 blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(blocker_object); 907 } 908 } 909 910 if (blocker_object != NULL) { 911 _blocker_object = OopHandle(Universe::vm_global(), blocker_object); 912 } 913 if (blocker_object_owner != NULL) { 914 _blocker_object_owner = OopHandle(Universe::vm_global(), blocker_object_owner); 915 } 916 } 917 918 oop ThreadSnapshot::blocker_object() const { return _blocker_object.resolve(); } 919 oop ThreadSnapshot::blocker_object_owner() const { return _blocker_object_owner.resolve(); } 920 921 ThreadSnapshot::~ThreadSnapshot() { 922 _blocker_object.release(Universe::vm_global()); 923 _blocker_object_owner.release(Universe::vm_global()); 924 _threadObj.release(Universe::vm_global()); 925 926 delete _stack_trace; 927 delete _concurrent_locks; 928 } 929 930 void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) { 931 _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors); 932 _stack_trace->dump_stack_at_safepoint(max_depth); 933 } 934 935 936 void ThreadSnapshot::metadata_do(void f(Metadata*)) { 937 if (_stack_trace != NULL) { 938 _stack_trace->metadata_do(f); 939 } 940 } 941 942 943 DeadlockCycle::DeadlockCycle() { 944 _is_deadlock = false; 945 _threads = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability); 946 _next = NULL; 947 } 948 949 DeadlockCycle::~DeadlockCycle() { 950 delete _threads; 951 } 952 953 void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const { 954 st->cr(); 955 st->print_cr("Found one Java-level deadlock:"); 956 st->print("============================="); 957 958 JavaThread* currentThread; 959 JvmtiRawMonitor* waitingToLockRawMonitor; 960 oop waitingToLockBlocker; 961 int len = _threads->length(); 962 for (int i = 0; i < len; i++) { 963 currentThread = _threads->at(i); 964 // The ObjectMonitor* can't be async deflated since we are at a safepoint. 965 ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor(); 966 waitingToLockRawMonitor = currentThread->current_pending_raw_monitor(); 967 waitingToLockBlocker = currentThread->current_park_blocker(); 968 st->cr(); 969 st->print_cr("\"%s\":", currentThread->get_thread_name()); 970 const char* owner_desc = ",\n which is held by"; 971 972 // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor 973 // sets the current pending monitor, it is possible to then see a pending raw monitor as well. 974 if (waitingToLockRawMonitor != NULL) { 975 st->print(" waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor)); 976 Thread* owner = waitingToLockRawMonitor->owner(); 977 // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread 978 if (owner != NULL) { 979 if (owner->is_Java_thread()) { 980 currentThread = (JavaThread*) owner; 981 st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); 982 } else { 983 st->print_cr(",\n which has now been released"); 984 } 985 } else { 986 st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner)); 987 } 988 } 989 990 if (waitingToLockMonitor != NULL) { 991 st->print(" waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor)); 992 oop obj = (oop)waitingToLockMonitor->object(); 993 st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj), 994 obj->klass()->external_name()); 995 996 if (!currentThread->current_pending_monitor_is_from_java()) { 997 owner_desc = "\n in JNI, which is held by"; 998 } 999 currentThread = Threads::owning_thread_from_monitor_owner(t_list, 1000 (address)waitingToLockMonitor->owner()); 1001 if (currentThread == NULL) { 1002 // The deadlock was detected at a safepoint so the JavaThread 1003 // that owns waitingToLockMonitor should be findable, but 1004 // if it is not findable, then the previous currentThread is 1005 // blocked permanently. 1006 st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc, 1007 p2i(waitingToLockMonitor->owner())); 1008 continue; 1009 } 1010 } else { 1011 st->print(" waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)", 1012 p2i(waitingToLockBlocker), 1013 waitingToLockBlocker->klass()->external_name()); 1014 assert(waitingToLockBlocker->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()), 1015 "Must be an AbstractOwnableSynchronizer"); 1016 oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); 1017 currentThread = java_lang_Thread::thread(ownerObj); 1018 assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL"); 1019 } 1020 st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); 1021 } 1022 1023 st->cr(); 1024 1025 // Print stack traces 1026 bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace; 1027 JavaMonitorsInStackTrace = true; 1028 st->print_cr("Java stack information for the threads listed above:"); 1029 st->print_cr("==================================================="); 1030 for (int j = 0; j < len; j++) { 1031 currentThread = _threads->at(j); 1032 st->print_cr("\"%s\":", currentThread->get_thread_name()); 1033 currentThread->print_stack_on(st); 1034 } 1035 JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace; 1036 } 1037 1038 ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread, 1039 bool include_jvmti_agent_threads, 1040 bool include_jni_attaching_threads) { 1041 assert(cur_thread == Thread::current(), "Check current thread"); 1042 1043 int init_size = ThreadService::get_live_thread_count(); 1044 _threads_array = new GrowableArray<instanceHandle>(init_size); 1045 1046 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1047 // skips JavaThreads in the process of exiting 1048 // and also skips VM internal JavaThreads 1049 // Threads in _thread_new or _thread_new_trans state are included. 1050 // i.e. threads have been started but not yet running. 1051 if (jt->threadObj() == NULL || 1052 jt->is_exiting() || 1053 !java_lang_Thread::is_alive(jt->threadObj()) || 1054 jt->is_hidden_from_external_view()) { 1055 continue; 1056 } 1057 1058 // skip agent threads 1059 if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) { 1060 continue; 1061 } 1062 1063 // skip jni threads in the process of attaching 1064 if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) { 1065 continue; 1066 } 1067 1068 instanceHandle h(cur_thread, (instanceOop) jt->threadObj()); 1069 _threads_array->append(h); 1070 } 1071 }