1 /*
   2  * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/heapInspection.hpp"
  29 #include "memory/oopFactory.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "oops/instanceKlass.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "oops/objArrayOop.inline.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "prims/jvmtiRawMonitor.hpp"
  36 #include "runtime/atomic.hpp"
  37 #include "runtime/handles.inline.hpp"
  38 #include "runtime/init.hpp"
  39 #include "runtime/objectMonitor.inline.hpp"
  40 #include "runtime/thread.inline.hpp"
  41 #include "runtime/threadSMR.inline.hpp"
  42 #include "runtime/vframe.hpp"
  43 #include "runtime/vmThread.hpp"
  44 #include "runtime/vmOperations.hpp"
  45 #include "services/threadService.hpp"
  46 
  47 // TODO: we need to define a naming convention for perf counters
  48 // to distinguish counters for:
  49 //   - standard JSR174 use
  50 //   - Hotspot extension (public and committed)
  51 //   - Hotspot extension (private/internal and uncommitted)
  52 
  53 // Default is disabled.
  54 bool ThreadService::_thread_monitoring_contention_enabled = false;
  55 bool ThreadService::_thread_cpu_time_enabled = false;
  56 bool ThreadService::_thread_allocated_memory_enabled = false;
  57 
  58 PerfCounter*  ThreadService::_total_threads_count = NULL;
  59 PerfVariable* ThreadService::_live_threads_count = NULL;
  60 PerfVariable* ThreadService::_peak_threads_count = NULL;
  61 PerfVariable* ThreadService::_daemon_threads_count = NULL;
  62 volatile int ThreadService::_atomic_threads_count = 0;
  63 volatile int ThreadService::_atomic_daemon_threads_count = 0;
  64 
  65 ThreadDumpResult* ThreadService::_threaddump_list = NULL;
  66 
  67 static const int INITIAL_ARRAY_SIZE = 10;
  68 
  69 void ThreadService::init() {
  70   EXCEPTION_MARK;
  71 
  72   // These counters are for java.lang.management API support.
  73   // They are created even if -XX:-UsePerfData is set and in
  74   // that case, they will be allocated on C heap.
  75 
  76   _total_threads_count =
  77                 PerfDataManager::create_counter(JAVA_THREADS, "started",
  78                                                 PerfData::U_Events, CHECK);
  79 
  80   _live_threads_count =
  81                 PerfDataManager::create_variable(JAVA_THREADS, "live",
  82                                                  PerfData::U_None, CHECK);
  83 
  84   _peak_threads_count =
  85                 PerfDataManager::create_variable(JAVA_THREADS, "livePeak",
  86                                                  PerfData::U_None, CHECK);
  87 
  88   _daemon_threads_count =
  89                 PerfDataManager::create_variable(JAVA_THREADS, "daemon",
  90                                                  PerfData::U_None, CHECK);
  91 
  92   if (os::is_thread_cpu_time_supported()) {
  93     _thread_cpu_time_enabled = true;
  94   }
  95 
  96   _thread_allocated_memory_enabled = true; // Always on, so enable it
  97 }
  98 
  99 void ThreadService::reset_peak_thread_count() {
 100   // Acquire the lock to update the peak thread count
 101   // to synchronize with thread addition and removal.
 102   MutexLocker mu(Threads_lock);
 103   _peak_threads_count->set_value(get_live_thread_count());
 104 }
 105 
 106 static bool is_hidden_thread(JavaThread *thread) {
 107   // hide VM internal or JVMTI agent threads
 108   return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread();
 109 }
 110 
 111 void ThreadService::add_thread(JavaThread* thread, bool daemon) {
 112   assert(Threads_lock->owned_by_self(), "must have threads lock");
 113 
 114   // Do not count hidden threads
 115   if (is_hidden_thread(thread)) {
 116     return;
 117   }
 118 
 119   _total_threads_count->inc();
 120   _live_threads_count->inc();
 121   Atomic::inc(&_atomic_threads_count);
 122   int count = _atomic_threads_count;
 123 
 124   if (count > _peak_threads_count->get_value()) {
 125     _peak_threads_count->set_value(count);
 126   }
 127 
 128   if (daemon) {
 129     _daemon_threads_count->inc();
 130     Atomic::inc(&_atomic_daemon_threads_count);
 131   }
 132 }
 133 
 134 void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) {
 135   Atomic::dec(&_atomic_threads_count);
 136 
 137   if (daemon) {
 138     Atomic::dec(&_atomic_daemon_threads_count);
 139   }
 140 }
 141 
 142 void ThreadService::remove_thread(JavaThread* thread, bool daemon) {
 143   assert(Threads_lock->owned_by_self(), "must have threads lock");
 144 
 145   // Do not count hidden threads
 146   if (is_hidden_thread(thread)) {
 147     return;
 148   }
 149 
 150   assert(!thread->is_terminated(), "must not be terminated");
 151   if (!thread->is_exiting()) {
 152     // JavaThread::exit() skipped calling current_thread_exiting()
 153     decrement_thread_counts(thread, daemon);
 154   }
 155 
 156   int daemon_count = _atomic_daemon_threads_count;
 157   int count = _atomic_threads_count;
 158 
 159   // Counts are incremented at the same time, but atomic counts are
 160   // decremented earlier than perf counts.
 161   assert(_live_threads_count->get_value() > count,
 162     "thread count mismatch %d : %d",
 163     (int)_live_threads_count->get_value(), count);
 164 
 165   _live_threads_count->dec(1);
 166   if (daemon) {
 167     assert(_daemon_threads_count->get_value() > daemon_count,
 168       "thread count mismatch %d : %d",
 169       (int)_daemon_threads_count->get_value(), daemon_count);
 170 
 171     _daemon_threads_count->dec(1);
 172   }
 173 
 174   // Counts are incremented at the same time, but atomic counts are
 175   // decremented earlier than perf counts.
 176   assert(_daemon_threads_count->get_value() >= daemon_count,
 177     "thread count mismatch %d : %d",
 178     (int)_daemon_threads_count->get_value(), daemon_count);
 179   assert(_live_threads_count->get_value() >= count,
 180     "thread count mismatch %d : %d",
 181     (int)_live_threads_count->get_value(), count);
 182   assert(_live_threads_count->get_value() > 0 ||
 183     (_live_threads_count->get_value() == 0 && count == 0 &&
 184     _daemon_threads_count->get_value() == 0 && daemon_count == 0),
 185     "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d",
 186     (int)_live_threads_count->get_value(), count,
 187     (int)_daemon_threads_count->get_value(), daemon_count);
 188   assert(_daemon_threads_count->get_value() > 0 ||
 189     (_daemon_threads_count->get_value() == 0 && daemon_count == 0),
 190     "thread counts should reach 0 at the same time, daemon %d,%d",
 191     (int)_daemon_threads_count->get_value(), daemon_count);
 192 }
 193 
 194 void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) {
 195   // Do not count hidden threads
 196   if (is_hidden_thread(jt)) {
 197     return;
 198   }
 199 
 200   assert(jt == JavaThread::current(), "Called by current thread");
 201   assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting");
 202 
 203   decrement_thread_counts(jt, daemon);
 204 }
 205 
 206 // FIXME: JVMTI should call this function
 207 Handle ThreadService::get_current_contended_monitor(JavaThread* thread) {
 208   assert(thread != NULL, "should be non-NULL");
 209   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
 210 
 211   // This function can be called on a target JavaThread that is not
 212   // the caller and we are not at a safepoint. So it is possible for
 213   // the waiting or pending condition to be over/stale and for the
 214   // first stage of async deflation to clear the object field in
 215   // the ObjectMonitor. It is also possible for the object to be
 216   // inflated again and to be associated with a completely different
 217   // ObjectMonitor by the time this object reference is processed
 218   // by the caller.
 219   ObjectMonitor *wait_obj = thread->current_waiting_monitor();
 220 
 221   oop obj = NULL;
 222   if (wait_obj != NULL) {
 223     // thread is doing an Object.wait() call
 224     obj = (oop) wait_obj->object();
 225   } else {
 226     ObjectMonitor *enter_obj = thread->current_pending_monitor();
 227     if (enter_obj != NULL) {
 228       // thread is trying to enter() an ObjectMonitor.
 229       obj = (oop) enter_obj->object();
 230     }
 231   }
 232 
 233   Handle h(Thread::current(), obj);
 234   return h;
 235 }
 236 
 237 bool ThreadService::set_thread_monitoring_contention(bool flag) {
 238   MutexLocker m(Management_lock);
 239 
 240   bool prev = _thread_monitoring_contention_enabled;
 241   _thread_monitoring_contention_enabled = flag;
 242 
 243   return prev;
 244 }
 245 
 246 bool ThreadService::set_thread_cpu_time_enabled(bool flag) {
 247   MutexLocker m(Management_lock);
 248 
 249   bool prev = _thread_cpu_time_enabled;
 250   _thread_cpu_time_enabled = flag;
 251 
 252   return prev;
 253 }
 254 
 255 bool ThreadService::set_thread_allocated_memory_enabled(bool flag) {
 256   MutexLocker m(Management_lock);
 257 
 258   bool prev = _thread_allocated_memory_enabled;
 259   _thread_allocated_memory_enabled = flag;
 260 
 261   return prev;
 262 }
 263 
 264 // GC support
 265 void ThreadService::oops_do(OopClosure* f) {
 266   for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
 267     dump->oops_do(f);
 268   }
 269 }
 270 
 271 void ThreadService::metadata_do(void f(Metadata*)) {
 272   for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
 273     dump->metadata_do(f);
 274   }
 275 }
 276 
 277 void ThreadService::add_thread_dump(ThreadDumpResult* dump) {
 278   MutexLocker ml(Management_lock);
 279   if (_threaddump_list == NULL) {
 280     _threaddump_list = dump;
 281   } else {
 282     dump->set_next(_threaddump_list);
 283     _threaddump_list = dump;
 284   }
 285 }
 286 
 287 void ThreadService::remove_thread_dump(ThreadDumpResult* dump) {
 288   MutexLocker ml(Management_lock);
 289 
 290   ThreadDumpResult* prev = NULL;
 291   bool found = false;
 292   for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) {
 293     if (d == dump) {
 294       if (prev == NULL) {
 295         _threaddump_list = dump->next();
 296       } else {
 297         prev->set_next(dump->next());
 298       }
 299       found = true;
 300       break;
 301     }
 302   }
 303   assert(found, "The threaddump result to be removed must exist.");
 304 }
 305 
 306 // Dump stack trace of threads specified in the given threads array.
 307 // Returns StackTraceElement[][] each element is the stack trace of a thread in
 308 // the corresponding entry in the given threads array
 309 Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads,
 310                                         int num_threads,
 311                                         TRAPS) {
 312   assert(num_threads > 0, "just checking");
 313 
 314   ThreadDumpResult dump_result;
 315   VM_ThreadDump op(&dump_result,
 316                    threads,
 317                    num_threads,
 318                    -1,    /* entire stack */
 319                    false, /* with locked monitors */
 320                    false  /* with locked synchronizers */);
 321   VMThread::execute(&op);
 322 
 323   // Allocate the resulting StackTraceElement[][] object
 324 
 325   ResourceMark rm(THREAD);
 326   Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH);
 327   ObjArrayKlass* ik = ObjArrayKlass::cast(k);
 328   objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH);
 329   objArrayHandle result_obj(THREAD, r);
 330 
 331   int num_snapshots = dump_result.num_snapshots();
 332   assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
 333   assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot");
 334   int i = 0;
 335   for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) {
 336     ThreadStackTrace* stacktrace = ts->get_stack_trace();
 337     if (stacktrace == NULL) {
 338       // No stack trace
 339       result_obj->obj_at_put(i, NULL);
 340     } else {
 341       // Construct an array of java/lang/StackTraceElement object
 342       Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH);
 343       result_obj->obj_at_put(i, backtrace_h());
 344     }
 345   }
 346 
 347   return result_obj;
 348 }
 349 
 350 void ThreadService::reset_contention_count_stat(JavaThread* thread) {
 351   ThreadStatistics* stat = thread->get_thread_stat();
 352   if (stat != NULL) {
 353     stat->reset_count_stat();
 354   }
 355 }
 356 
 357 void ThreadService::reset_contention_time_stat(JavaThread* thread) {
 358   ThreadStatistics* stat = thread->get_thread_stat();
 359   if (stat != NULL) {
 360     stat->reset_time_stat();
 361   }
 362 }
 363 
 364 // Find deadlocks involving raw monitors, object monitors and concurrent locks
 365 // if concurrent_locks is true.
 366 DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) {
 367   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 368 
 369   // This code was modified from the original Threads::find_deadlocks code.
 370   int globalDfn = 0, thisDfn;
 371   ObjectMonitor* waitingToLockMonitor = NULL;
 372   JvmtiRawMonitor* waitingToLockRawMonitor = NULL;
 373   oop waitingToLockBlocker = NULL;
 374   bool blocked_on_monitor = false;
 375   JavaThread *currentThread, *previousThread;
 376   int num_deadlocks = 0;
 377 
 378   // Initialize the depth-first-number for each JavaThread.
 379   JavaThreadIterator jti(t_list);
 380   for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) {
 381     jt->set_depth_first_number(-1);
 382   }
 383 
 384   DeadlockCycle* deadlocks = NULL;
 385   DeadlockCycle* last = NULL;
 386   DeadlockCycle* cycle = new DeadlockCycle();
 387   for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) {
 388     if (jt->depth_first_number() >= 0) {
 389       // this thread was already visited
 390       continue;
 391     }
 392 
 393     thisDfn = globalDfn;
 394     jt->set_depth_first_number(globalDfn++);
 395     previousThread = jt;
 396     currentThread = jt;
 397 
 398     cycle->reset();
 399 
 400     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
 401     // When there is a deadlock, all the monitors involved in the dependency
 402     // cycle must be contended and heavyweight. So we only care about the
 403     // heavyweight monitor a thread is waiting to lock.
 404     waitingToLockMonitor = jt->current_pending_monitor();
 405     // JVM TI raw monitors can also be involved in deadlocks, and we can be
 406     // waiting to lock both a raw monitor and ObjectMonitor at the same time.
 407     // It isn't clear how to make deadlock detection work correctly if that
 408     // happens.
 409     waitingToLockRawMonitor = jt->current_pending_raw_monitor();
 410 
 411     if (concurrent_locks) {
 412       waitingToLockBlocker = jt->current_park_blocker();
 413     }
 414 
 415     while (waitingToLockMonitor != NULL ||
 416            waitingToLockRawMonitor != NULL ||
 417            waitingToLockBlocker != NULL) {
 418       cycle->add_thread(currentThread);
 419       // Give preference to the raw monitor
 420       if (waitingToLockRawMonitor != NULL) {
 421         Thread* owner = waitingToLockRawMonitor->owner();
 422         if (owner != NULL && // the raw monitor could be released at any time
 423             owner->is_Java_thread()) {
 424           // only JavaThreads can be reported here
 425           currentThread = (JavaThread*) owner;
 426         }
 427       } else if (waitingToLockMonitor != NULL) {
 428         address currentOwner = (address)waitingToLockMonitor->owner();
 429         if (currentOwner != NULL) {
 430           currentThread = Threads::owning_thread_from_monitor_owner(t_list,
 431                                                                     currentOwner);
 432           if (currentThread == NULL) {
 433             // This function is called at a safepoint so the JavaThread
 434             // that owns waitingToLockMonitor should be findable, but
 435             // if it is not findable, then the previous currentThread is
 436             // blocked permanently. We record this as a deadlock.
 437             num_deadlocks++;
 438 
 439             cycle->set_deadlock(true);
 440 
 441             // add this cycle to the deadlocks list
 442             if (deadlocks == NULL) {
 443               deadlocks = cycle;
 444             } else {
 445               last->set_next(cycle);
 446             }
 447             last = cycle;
 448             cycle = new DeadlockCycle();
 449             break;
 450           }
 451         }
 452       } else {
 453         if (concurrent_locks) {
 454           if (waitingToLockBlocker->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
 455             oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
 456             // This JavaThread (if there is one) is protected by the
 457             // ThreadsListSetter in VM_FindDeadlocks::doit().
 458             currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL;
 459           } else {
 460             currentThread = NULL;
 461           }
 462         }
 463       }
 464 
 465       if (currentThread == NULL) {
 466         // No dependency on another thread
 467         break;
 468       }
 469       if (currentThread->depth_first_number() < 0) {
 470         // First visit to this thread
 471         currentThread->set_depth_first_number(globalDfn++);
 472       } else if (currentThread->depth_first_number() < thisDfn) {
 473         // Thread already visited, and not on a (new) cycle
 474         break;
 475       } else if (currentThread == previousThread) {
 476         // Self-loop, ignore
 477         break;
 478       } else {
 479         // We have a (new) cycle
 480         num_deadlocks++;
 481 
 482         cycle->set_deadlock(true);
 483 
 484         // add this cycle to the deadlocks list
 485         if (deadlocks == NULL) {
 486           deadlocks = cycle;
 487         } else {
 488           last->set_next(cycle);
 489         }
 490         last = cycle;
 491         cycle = new DeadlockCycle();
 492         break;
 493       }
 494       previousThread = currentThread;
 495       waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
 496       if (concurrent_locks) {
 497         waitingToLockBlocker = currentThread->current_park_blocker();
 498       }
 499     }
 500 
 501   }
 502   delete cycle;
 503   return deadlocks;
 504 }
 505 
 506 ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() {
 507 
 508   // Create a new ThreadDumpResult object and append to the list.
 509   // If GC happens before this function returns, Method*
 510   // in the stack trace will be visited.
 511   ThreadService::add_thread_dump(this);
 512 }
 513 
 514 ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() {
 515   // Create a new ThreadDumpResult object and append to the list.
 516   // If GC happens before this function returns, oops
 517   // will be visited.
 518   ThreadService::add_thread_dump(this);
 519 }
 520 
 521 ThreadDumpResult::~ThreadDumpResult() {
 522   ThreadService::remove_thread_dump(this);
 523 
 524   // free all the ThreadSnapshot objects created during
 525   // the VM_ThreadDump operation
 526   ThreadSnapshot* ts = _snapshots;
 527   while (ts != NULL) {
 528     ThreadSnapshot* p = ts;
 529     ts = ts->next();
 530     delete p;
 531   }
 532 }
 533 
 534 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() {
 535   ThreadSnapshot* ts = new ThreadSnapshot();
 536   link_thread_snapshot(ts);
 537   return ts;
 538 }
 539 
 540 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) {
 541   // Note: it is very important that the ThreadSnapshot* gets linked before
 542   // ThreadSnapshot::initialize gets called. This is to ensure that
 543   // ThreadSnapshot::oops_do can get called prior to the field
 544   // ThreadSnapshot::_threadObj being assigned a value (to prevent a dangling
 545   // oop).
 546   ThreadSnapshot* ts = new ThreadSnapshot();
 547   link_thread_snapshot(ts);
 548   ts->initialize(t_list(), thread);
 549   return ts;
 550 }
 551 
 552 void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) {
 553   assert(_num_threads == 0 || _num_snapshots < _num_threads,
 554          "_num_snapshots must be less than _num_threads");
 555   _num_snapshots++;
 556   if (_snapshots == NULL) {
 557     _snapshots = ts;
 558   } else {
 559     _last->set_next(ts);
 560   }
 561   _last = ts;
 562 }
 563 
 564 void ThreadDumpResult::oops_do(OopClosure* f) {
 565   for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
 566     ts->oops_do(f);
 567   }
 568 }
 569 
 570 void ThreadDumpResult::metadata_do(void f(Metadata*)) {
 571   for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
 572     ts->metadata_do(f);
 573   }
 574 }
 575 
 576 ThreadsList* ThreadDumpResult::t_list() {
 577   return _setter.list();
 578 }
 579 
 580 StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
 581   _method = jvf->method();
 582   _bci = jvf->bci();
 583   _class_holder = _method->method_holder()->klass_holder();
 584   _locked_monitors = NULL;
 585   if (with_lock_info) {
 586     ResourceMark rm;
 587     GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
 588     int length = list->length();
 589     if (length > 0) {
 590       _locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(length, mtServiceability);
 591       for (int i = 0; i < length; i++) {
 592         MonitorInfo* monitor = list->at(i);
 593         assert(monitor->owner() != NULL, "This monitor must have an owning object");
 594         _locked_monitors->append(monitor->owner());
 595       }
 596     }
 597   }
 598 }
 599 
 600 void StackFrameInfo::oops_do(OopClosure* f) {
 601   if (_locked_monitors != NULL) {
 602     int length = _locked_monitors->length();
 603     for (int i = 0; i < length; i++) {
 604       f->do_oop((oop*) _locked_monitors->adr_at(i));
 605     }
 606   }
 607   f->do_oop(&_class_holder);
 608 }
 609 
 610 void StackFrameInfo::metadata_do(void f(Metadata*)) {
 611   f(_method);
 612 }
 613 
 614 void StackFrameInfo::print_on(outputStream* st) const {
 615   ResourceMark rm;
 616   java_lang_Throwable::print_stack_element(st, method(), bci());
 617   int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0);
 618   for (int i = 0; i < len; i++) {
 619     oop o = _locked_monitors->at(i);
 620     st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name());
 621   }
 622 
 623 }
 624 
 625 // Iterate through monitor cache to find JNI locked monitors
 626 class InflatedMonitorsClosure: public MonitorClosure {
 627 private:
 628   ThreadStackTrace* _stack_trace;
 629   Thread* _thread;
 630 public:
 631   InflatedMonitorsClosure(Thread* t, ThreadStackTrace* st) {
 632     _thread = t;
 633     _stack_trace = st;
 634   }
 635   void do_monitor(ObjectMonitor* mid) {
 636     if (mid->owner() == _thread) {
 637       oop object = (oop) mid->object();
 638       if (!_stack_trace->is_owned_monitor_on_stack(object)) {
 639         _stack_trace->add_jni_locked_monitor(object);
 640       }
 641     }
 642   }
 643 };
 644 
 645 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
 646   _thread = t;
 647   _frames = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability);
 648   _depth = 0;
 649   _with_locked_monitors = with_locked_monitors;
 650   if (_with_locked_monitors) {
 651     _jni_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability);
 652   } else {
 653     _jni_locked_monitors = NULL;
 654   }
 655 }
 656 
 657 ThreadStackTrace::~ThreadStackTrace() {
 658   for (int i = 0; i < _frames->length(); i++) {
 659     delete _frames->at(i);
 660   }
 661   delete _frames;
 662   if (_jni_locked_monitors != NULL) {
 663     delete _jni_locked_monitors;
 664   }
 665 }
 666 
 667 void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) {
 668   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 669 
 670   if (_thread->has_last_Java_frame()) {
 671     RegisterMap reg_map(_thread);
 672     vframe* start_vf = _thread->last_java_vframe(&reg_map);
 673     int count = 0;
 674     for (vframe* f = start_vf; f; f = f->sender() ) {
 675       if (maxDepth >= 0 && count == maxDepth) {
 676         // Skip frames if more than maxDepth
 677         break;
 678       }
 679       if (f->is_java_frame()) {
 680         javaVFrame* jvf = javaVFrame::cast(f);
 681         add_stack_frame(jvf);
 682         count++;
 683       } else {
 684         // Ignore non-Java frames
 685       }
 686     }
 687   }
 688 
 689   if (_with_locked_monitors) {
 690     // Iterate inflated monitors and find monitors locked by this thread
 691     // not found in the stack
 692     InflatedMonitorsClosure imc(_thread, this);
 693     ObjectSynchronizer::monitors_iterate(&imc);
 694   }
 695 }
 696 
 697 
 698 bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
 699   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 700 
 701   bool found = false;
 702   int num_frames = get_stack_depth();
 703   for (int depth = 0; depth < num_frames; depth++) {
 704     StackFrameInfo* frame = stack_frame_at(depth);
 705     int len = frame->num_locked_monitors();
 706     GrowableArray<oop>* locked_monitors = frame->locked_monitors();
 707     for (int j = 0; j < len; j++) {
 708       oop monitor = locked_monitors->at(j);
 709       assert(monitor != NULL, "must be a Java object");
 710       if (monitor == object) {
 711         found = true;
 712         break;
 713       }
 714     }
 715   }
 716   return found;
 717 }
 718 
 719 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
 720   InstanceKlass* ik = SystemDictionary::StackTraceElement_klass();
 721   assert(ik != NULL, "must be loaded in 1.4+");
 722 
 723   // Allocate an array of java/lang/StackTraceElement object
 724   objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH);
 725   objArrayHandle backtrace(THREAD, ste);
 726   for (int j = 0; j < _depth; j++) {
 727     StackFrameInfo* frame = _frames->at(j);
 728     methodHandle mh(THREAD, frame->method());
 729     oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH);
 730     backtrace->obj_at_put(j, element);
 731   }
 732   return backtrace;
 733 }
 734 
 735 void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) {
 736   StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors);
 737   _frames->append(frame);
 738   _depth++;
 739 }
 740 
 741 void ThreadStackTrace::oops_do(OopClosure* f) {
 742   int length = _frames->length();
 743   for (int i = 0; i < length; i++) {
 744     _frames->at(i)->oops_do(f);
 745   }
 746 
 747   length = (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0);
 748   for (int j = 0; j < length; j++) {
 749     f->do_oop((oop*) _jni_locked_monitors->adr_at(j));
 750   }
 751 }
 752 
 753 void ThreadStackTrace::metadata_do(void f(Metadata*)) {
 754   int length = _frames->length();
 755   for (int i = 0; i < length; i++) {
 756     _frames->at(i)->metadata_do(f);
 757   }
 758 }
 759 
 760 
 761 ConcurrentLocksDump::~ConcurrentLocksDump() {
 762   if (_retain_map_on_free) {
 763     return;
 764   }
 765 
 766   for (ThreadConcurrentLocks* t = _map; t != NULL;)  {
 767     ThreadConcurrentLocks* tcl = t;
 768     t = t->next();
 769     delete tcl;
 770   }
 771 }
 772 
 773 void ConcurrentLocksDump::dump_at_safepoint() {
 774   // dump all locked concurrent locks
 775   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 776 
 777   GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability);
 778 
 779   // Find all instances of AbstractOwnableSynchronizer
 780   HeapInspection::find_instances_at_safepoint(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(),
 781                                               aos_objects);
 782   // Build a map of thread to its owned AQS locks
 783   build_map(aos_objects);
 784 
 785   delete aos_objects;
 786 }
 787 
 788 
 789 // build a map of JavaThread to all its owned AbstractOwnableSynchronizer
 790 void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) {
 791   int length = aos_objects->length();
 792   for (int i = 0; i < length; i++) {
 793     oop o = aos_objects->at(i);
 794     oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o);
 795     if (owner_thread_obj != NULL) {
 796       // See comments in ThreadConcurrentLocks to see how this
 797       // JavaThread* is protected.
 798       JavaThread* thread = java_lang_Thread::thread(owner_thread_obj);
 799       assert(o->is_instance(), "Must be an instanceOop");
 800       add_lock(thread, (instanceOop) o);
 801     }
 802   }
 803 }
 804 
 805 void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) {
 806   ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread);
 807   if (tcl != NULL) {
 808     tcl->add_lock(o);
 809     return;
 810   }
 811 
 812   // First owned lock found for this thread
 813   tcl = new ThreadConcurrentLocks(thread);
 814   tcl->add_lock(o);
 815   if (_map == NULL) {
 816     _map = tcl;
 817   } else {
 818     _last->set_next(tcl);
 819   }
 820   _last = tcl;
 821 }
 822 
 823 ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) {
 824   for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) {
 825     if (tcl->java_thread() == thread) {
 826       return tcl;
 827     }
 828   }
 829   return NULL;
 830 }
 831 
 832 void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
 833   st->print_cr("   Locked ownable synchronizers:");
 834   ThreadConcurrentLocks* tcl = thread_concurrent_locks(t);
 835   GrowableArray<instanceOop>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
 836   if (locks == NULL || locks->is_empty()) {
 837     st->print_cr("\t- None");
 838     st->cr();
 839     return;
 840   }
 841 
 842   for (int i = 0; i < locks->length(); i++) {
 843     instanceOop obj = locks->at(i);
 844     st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name());
 845   }
 846   st->cr();
 847 }
 848 
 849 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
 850   _thread = thread;
 851   _owned_locks = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<instanceOop>(INITIAL_ARRAY_SIZE, mtServiceability);
 852   _next = NULL;
 853 }
 854 
 855 ThreadConcurrentLocks::~ThreadConcurrentLocks() {
 856   delete _owned_locks;
 857 }
 858 
 859 void ThreadConcurrentLocks::add_lock(instanceOop o) {
 860   _owned_locks->append(o);
 861 }
 862 
 863 void ThreadConcurrentLocks::oops_do(OopClosure* f) {
 864   int length = _owned_locks->length();
 865   for (int i = 0; i < length; i++) {
 866     f->do_oop((oop*) _owned_locks->adr_at(i));
 867   }
 868 }
 869 
 870 ThreadStatistics::ThreadStatistics() {
 871   _contended_enter_count = 0;
 872   _monitor_wait_count = 0;
 873   _sleep_count = 0;
 874   _count_pending_reset = false;
 875   _timer_pending_reset = false;
 876   memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts));
 877 }
 878 
 879 void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
 880   _thread = thread;
 881   _threadObj = thread->threadObj();
 882 
 883   ThreadStatistics* stat = thread->get_thread_stat();
 884   _contended_enter_ticks = stat->contended_enter_ticks();
 885   _contended_enter_count = stat->contended_enter_count();
 886   _monitor_wait_ticks = stat->monitor_wait_ticks();
 887   _monitor_wait_count = stat->monitor_wait_count();
 888   _sleep_ticks = stat->sleep_ticks();
 889   _sleep_count = stat->sleep_count();
 890 
 891   _thread_status = java_lang_Thread::get_thread_status(_threadObj);
 892   _is_ext_suspended = thread->is_being_ext_suspended();
 893   _is_in_native = (thread->thread_state() == _thread_in_native);
 894 
 895   if (_thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER ||
 896       _thread_status == java_lang_Thread::IN_OBJECT_WAIT ||
 897       _thread_status == java_lang_Thread::IN_OBJECT_WAIT_TIMED) {
 898 
 899     Handle obj = ThreadService::get_current_contended_monitor(thread);
 900     if (obj() == NULL) {
 901       // monitor no longer exists; thread is not blocked
 902       _thread_status = java_lang_Thread::RUNNABLE;
 903     } else {
 904       _blocker_object = obj();
 905       JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj);
 906       if ((owner == NULL && _thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER)
 907           || (owner != NULL && owner->is_attaching_via_jni())) {
 908         // ownership information of the monitor is not available
 909         // (may no longer be owned or releasing to some other thread)
 910         // make this thread in RUNNABLE state.
 911         // And when the owner thread is in attaching state, the java thread
 912         // is not completely initialized. For example thread name and id
 913         // and may not be set, so hide the attaching thread.
 914         _thread_status = java_lang_Thread::RUNNABLE;
 915         _blocker_object = NULL;
 916       } else if (owner != NULL) {
 917         _blocker_object_owner = owner->threadObj();
 918       }
 919     }
 920   }
 921 
 922   // Support for JSR-166 locks
 923   if (_thread_status == java_lang_Thread::PARKED || _thread_status == java_lang_Thread::PARKED_TIMED) {
 924     _blocker_object = thread->current_park_blocker();
 925     if (_blocker_object != NULL && _blocker_object->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
 926       _blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(_blocker_object);
 927     }
 928   }
 929 }
 930 
 931 ThreadSnapshot::~ThreadSnapshot() {
 932   delete _stack_trace;
 933   delete _concurrent_locks;
 934 }
 935 
 936 void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) {
 937   _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
 938   _stack_trace->dump_stack_at_safepoint(max_depth);
 939 }
 940 
 941 
 942 void ThreadSnapshot::oops_do(OopClosure* f) {
 943   f->do_oop(&_threadObj);
 944   f->do_oop(&_blocker_object);
 945   f->do_oop(&_blocker_object_owner);
 946   if (_stack_trace != NULL) {
 947     _stack_trace->oops_do(f);
 948   }
 949   if (_concurrent_locks != NULL) {
 950     _concurrent_locks->oops_do(f);
 951   }
 952 }
 953 
 954 void ThreadSnapshot::metadata_do(void f(Metadata*)) {
 955   if (_stack_trace != NULL) {
 956     _stack_trace->metadata_do(f);
 957   }
 958 }
 959 
 960 
 961 DeadlockCycle::DeadlockCycle() {
 962   _is_deadlock = false;
 963   _threads = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability);
 964   _next = NULL;
 965 }
 966 
 967 DeadlockCycle::~DeadlockCycle() {
 968   delete _threads;
 969 }
 970 
 971 void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const {
 972   st->cr();
 973   st->print_cr("Found one Java-level deadlock:");
 974   st->print("=============================");
 975 
 976   JavaThread* currentThread;
 977   JvmtiRawMonitor* waitingToLockRawMonitor;
 978   oop waitingToLockBlocker;
 979   int len = _threads->length();
 980   for (int i = 0; i < len; i++) {
 981     currentThread = _threads->at(i);
 982     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
 983     ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor();
 984     waitingToLockRawMonitor = currentThread->current_pending_raw_monitor();
 985     waitingToLockBlocker = currentThread->current_park_blocker();
 986     st->cr();
 987     st->print_cr("\"%s\":", currentThread->get_thread_name());
 988     const char* owner_desc = ",\n  which is held by";
 989 
 990     // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor
 991     // sets the current pending monitor, it is possible to then see a pending raw monitor as well.
 992     if (waitingToLockRawMonitor != NULL) {
 993       st->print("  waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor));
 994       Thread* owner = waitingToLockRawMonitor->owner();
 995       // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread
 996       if (owner != NULL) {
 997         if (owner->is_Java_thread()) {
 998           currentThread = (JavaThread*) owner;
 999           st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name());
1000         } else {
1001           st->print_cr(",\n  which has now been released");
1002         }
1003       } else {
1004         st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner));
1005       }
1006     }
1007 
1008     if (waitingToLockMonitor != NULL) {
1009       st->print("  waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor));
1010       oop obj = (oop)waitingToLockMonitor->object();
1011       st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj),
1012                  obj->klass()->external_name());
1013 
1014       if (!currentThread->current_pending_monitor_is_from_java()) {
1015         owner_desc = "\n  in JNI, which is held by";
1016       }
1017       currentThread = Threads::owning_thread_from_monitor_owner(t_list,
1018                                                                 (address)waitingToLockMonitor->owner());
1019       if (currentThread == NULL) {
1020         // The deadlock was detected at a safepoint so the JavaThread
1021         // that owns waitingToLockMonitor should be findable, but
1022         // if it is not findable, then the previous currentThread is
1023         // blocked permanently.
1024         st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
1025                   p2i(waitingToLockMonitor->owner()));
1026         continue;
1027       }
1028     } else {
1029       st->print("  waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
1030                 p2i(waitingToLockBlocker),
1031                 waitingToLockBlocker->klass()->external_name());
1032       assert(waitingToLockBlocker->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()),
1033              "Must be an AbstractOwnableSynchronizer");
1034       oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
1035       currentThread = java_lang_Thread::thread(ownerObj);
1036       assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL");
1037     }
1038     st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name());
1039   }
1040 
1041   st->cr();
1042 
1043   // Print stack traces
1044   bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace;
1045   JavaMonitorsInStackTrace = true;
1046   st->print_cr("Java stack information for the threads listed above:");
1047   st->print_cr("===================================================");
1048   for (int j = 0; j < len; j++) {
1049     currentThread = _threads->at(j);
1050     st->print_cr("\"%s\":", currentThread->get_thread_name());
1051     currentThread->print_stack_on(st);
1052   }
1053   JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace;
1054 }
1055 
1056 ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread,
1057                                              bool include_jvmti_agent_threads,
1058                                              bool include_jni_attaching_threads) {
1059   assert(cur_thread == Thread::current(), "Check current thread");
1060 
1061   int init_size = ThreadService::get_live_thread_count();
1062   _threads_array = new GrowableArray<instanceHandle>(init_size);
1063 
1064   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1065     // skips JavaThreads in the process of exiting
1066     // and also skips VM internal JavaThreads
1067     // Threads in _thread_new or _thread_new_trans state are included.
1068     // i.e. threads have been started but not yet running.
1069     if (jt->threadObj() == NULL   ||
1070         jt->is_exiting() ||
1071         !java_lang_Thread::is_alive(jt->threadObj())   ||
1072         jt->is_hidden_from_external_view()) {
1073       continue;
1074     }
1075 
1076     // skip agent threads
1077     if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) {
1078       continue;
1079     }
1080 
1081     // skip jni threads in the process of attaching
1082     if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) {
1083       continue;
1084     }
1085 
1086     instanceHandle h(cur_thread, (instanceOop) jt->threadObj());
1087     _threads_array->append(h);
1088   }
1089 }