1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_RUNTIME_THREAD_HPP 26 #define SHARE_RUNTIME_THREAD_HPP 27 28 #include "jni.h" 29 #include "code/compiledMethod.hpp" 30 #include "gc/shared/gcThreadLocalData.hpp" 31 #include "gc/shared/threadLocalAllocBuffer.hpp" 32 #include "memory/allocation.hpp" 33 #include "oops/oop.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "runtime/frame.hpp" 36 #include "runtime/globals.hpp" 37 #include "runtime/handshake.hpp" 38 #include "runtime/javaFrameAnchor.hpp" 39 #include "runtime/jniHandles.hpp" 40 #include "runtime/mutexLocker.hpp" 41 #include "runtime/os.hpp" 42 #include "runtime/osThread.hpp" 43 #include "runtime/park.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/threadHeapSampler.hpp" 46 #include "runtime/threadLocalStorage.hpp" 47 #include "runtime/threadStatisticalInfo.hpp" 48 #include "runtime/unhandledOops.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/exceptions.hpp" 51 #include "utilities/macros.hpp" 52 #ifdef ZERO 53 # include "stack_zero.hpp" 54 #endif 55 #if INCLUDE_JFR 56 #include "jfr/support/jfrThreadExtension.hpp" 57 #endif 58 59 60 class SafeThreadsListPtr; 61 class ThreadSafepointState; 62 class ThreadsList; 63 class ThreadsSMRSupport; 64 65 class JvmtiThreadState; 66 class ThreadStatistics; 67 class ConcurrentLocksDump; 68 class ParkEvent; 69 class Parker; 70 class MonitorInfo; 71 72 class ciEnv; 73 class CompileThread; 74 class CompileLog; 75 class CompileTask; 76 class CompileQueue; 77 class CompilerCounters; 78 79 class vframeArray; 80 class vframe; 81 class javaVFrame; 82 83 class DeoptResourceMark; 84 class jvmtiDeferredLocalVariableSet; 85 86 class ThreadClosure; 87 class ICRefillVerifier; 88 class IdealGraphPrinter; 89 90 class JVMCIEnv; 91 class JVMCIPrimitiveArray; 92 93 class Metadata; 94 class ResourceArea; 95 96 DEBUG_ONLY(class ResourceMark;) 97 98 class WorkerThread; 99 100 // Class hierarchy 101 // - Thread 102 // - JavaThread 103 // - various subclasses eg CompilerThread, ServiceThread 104 // - NonJavaThread 105 // - NamedThread 106 // - VMThread 107 // - ConcurrentGCThread 108 // - WorkerThread 109 // - GangWorker 110 // - WatcherThread 111 // - JfrThreadSampler 112 // 113 // All Thread subclasses must be either JavaThread or NonJavaThread. 114 // This means !t->is_Java_thread() iff t is a NonJavaThread, or t is 115 // a partially constructed/destroyed Thread. 116 117 // Thread execution sequence and actions: 118 // All threads: 119 // - thread_native_entry // per-OS native entry point 120 // - stack initialization 121 // - other OS-level initialization (signal masks etc) 122 // - handshake with creating thread (if not started suspended) 123 // - this->call_run() // common shared entry point 124 // - shared common initialization 125 // - this->pre_run() // virtual per-thread-type initialization 126 // - this->run() // virtual per-thread-type "main" logic 127 // - shared common tear-down 128 // - this->post_run() // virtual per-thread-type tear-down 129 // - // 'this' no longer referenceable 130 // - OS-level tear-down (minimal) 131 // - final logging 132 // 133 // For JavaThread: 134 // - this->run() // virtual but not normally overridden 135 // - this->thread_main_inner() // extra call level to ensure correct stack calculations 136 // - this->entry_point() // set differently for each kind of JavaThread 137 138 class Thread: public ThreadShadow { 139 friend class VMStructs; 140 friend class JVMCIVMStructs; 141 private: 142 143 #ifndef USE_LIBRARY_BASED_TLS_ONLY 144 // Current thread is maintained as a thread-local variable 145 static THREAD_LOCAL_DECL Thread* _thr_current; 146 #endif 147 148 // Thread local data area available to the GC. The internal 149 // structure and contents of this data area is GC-specific. 150 // Only GC and GC barrier code should access this data area. 151 GCThreadLocalData _gc_data; 152 153 public: 154 static ByteSize gc_data_offset() { 155 return byte_offset_of(Thread, _gc_data); 156 } 157 158 template <typename T> T* gc_data() { 159 STATIC_ASSERT(sizeof(T) <= sizeof(_gc_data)); 160 return reinterpret_cast<T*>(&_gc_data); 161 } 162 163 // Exception handling 164 // (Note: _pending_exception and friends are in ThreadShadow) 165 //oop _pending_exception; // pending exception for current thread 166 // const char* _exception_file; // file information for exception (debugging only) 167 // int _exception_line; // line information for exception (debugging only) 168 protected: 169 170 DEBUG_ONLY(static Thread* _starting_thread;) 171 172 // Support for forcing alignment of thread objects for biased locking 173 void* _real_malloc_address; 174 175 // JavaThread lifecycle support: 176 friend class SafeThreadsListPtr; // for _threads_list_ptr, cmpxchg_threads_hazard_ptr(), {dec_,inc_,}nested_threads_hazard_ptr_cnt(), {g,s}et_threads_hazard_ptr(), inc_nested_handle_cnt(), tag_hazard_ptr() access 177 friend class ScanHazardPtrGatherProtectedThreadsClosure; // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access 178 friend class ScanHazardPtrGatherThreadsListClosure; // for get_threads_hazard_ptr(), untag_hazard_ptr() access 179 friend class ScanHazardPtrPrintMatchingThreadsClosure; // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access 180 friend class ThreadsSMRSupport; // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access 181 182 ThreadsList* volatile _threads_hazard_ptr; 183 SafeThreadsListPtr* _threads_list_ptr; 184 ThreadsList* cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value); 185 ThreadsList* get_threads_hazard_ptr(); 186 void set_threads_hazard_ptr(ThreadsList* new_list); 187 static bool is_hazard_ptr_tagged(ThreadsList* list) { 188 return (intptr_t(list) & intptr_t(1)) == intptr_t(1); 189 } 190 static ThreadsList* tag_hazard_ptr(ThreadsList* list) { 191 return (ThreadsList*)(intptr_t(list) | intptr_t(1)); 192 } 193 static ThreadsList* untag_hazard_ptr(ThreadsList* list) { 194 return (ThreadsList*)(intptr_t(list) & ~intptr_t(1)); 195 } 196 // This field is enabled via -XX:+EnableThreadSMRStatistics: 197 uint _nested_threads_hazard_ptr_cnt; 198 void dec_nested_threads_hazard_ptr_cnt() { 199 assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()"); 200 _nested_threads_hazard_ptr_cnt--; 201 } 202 void inc_nested_threads_hazard_ptr_cnt() { 203 _nested_threads_hazard_ptr_cnt++; 204 } 205 uint nested_threads_hazard_ptr_cnt() { 206 return _nested_threads_hazard_ptr_cnt; 207 } 208 209 public: 210 void* operator new(size_t size) throw() { return allocate(size, true); } 211 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { 212 return allocate(size, false); } 213 void operator delete(void* p); 214 215 protected: 216 static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread); 217 private: 218 219 // *************************************************************** 220 // Suspend and resume support 221 // *************************************************************** 222 // 223 // VM suspend/resume no longer exists - it was once used for various 224 // things including safepoints but was deprecated and finally removed 225 // in Java 7. Because VM suspension was considered "internal" Java-level 226 // suspension was considered "external", and this legacy naming scheme 227 // remains. 228 // 229 // External suspend/resume requests come from JVM_SuspendThread, 230 // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI 231 // ResumeThread. External 232 // suspend requests cause _external_suspend to be set and external 233 // resume requests cause _external_suspend to be cleared. 234 // External suspend requests do not nest on top of other external 235 // suspend requests. The higher level APIs reject suspend requests 236 // for already suspended threads. 237 // 238 // The external_suspend 239 // flag is checked by has_special_runtime_exit_condition() and java thread 240 // will self-suspend when handle_special_runtime_exit_condition() is 241 // called. Most uses of the _thread_blocked state in JavaThreads are 242 // considered the same as being externally suspended; if the blocking 243 // condition lifts, the JavaThread will self-suspend. Other places 244 // where VM checks for external_suspend include: 245 // + mutex granting (do not enter monitors when thread is suspended) 246 // + state transitions from _thread_in_native 247 // 248 // In general, java_suspend() does not wait for an external suspend 249 // request to complete. When it returns, the only guarantee is that 250 // the _external_suspend field is true. 251 // 252 // wait_for_ext_suspend_completion() is used to wait for an external 253 // suspend request to complete. External suspend requests are usually 254 // followed by some other interface call that requires the thread to 255 // be quiescent, e.g., GetCallTrace(). By moving the "wait time" into 256 // the interface that requires quiescence, we give the JavaThread a 257 // chance to self-suspend before we need it to be quiescent. This 258 // improves overall suspend/query performance. 259 // 260 // _suspend_flags controls the behavior of java_ suspend/resume. 261 // It must be set under the protection of SR_lock. Read from the flag is 262 // OK without SR_lock as long as the value is only used as a hint. 263 // (e.g., check _external_suspend first without lock and then recheck 264 // inside SR_lock and finish the suspension) 265 // 266 // _suspend_flags is also overloaded for other "special conditions" so 267 // that a single check indicates whether any special action is needed 268 // eg. for async exceptions. 269 // ------------------------------------------------------------------- 270 // Notes: 271 // 1. The suspend/resume logic no longer uses ThreadState in OSThread 272 // but we still update its value to keep other part of the system (mainly 273 // JVMTI) happy. ThreadState is legacy code (see notes in 274 // osThread.hpp). 275 // 276 // 2. It would be more natural if set_external_suspend() is private and 277 // part of java_suspend(), but that probably would affect the suspend/query 278 // performance. Need more investigation on this. 279 280 // suspend/resume lock: used for self-suspend 281 Monitor* _SR_lock; 282 283 protected: 284 enum SuspendFlags { 285 // NOTE: avoid using the sign-bit as cc generates different test code 286 // when the sign-bit is used, and sometimes incorrectly - see CR 6398077 287 288 _external_suspend = 0x20000000U, // thread is asked to self suspend 289 _ext_suspended = 0x40000000U, // thread has self-suspended 290 291 _has_async_exception = 0x00000001U, // there is a pending async exception 292 _critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock 293 294 _trace_flag = 0x00000004U, // call tracing backend 295 _ea_obj_deopt = 0x00000008U // suspend for object reallocation and relocking for JVMTI agent 296 }; 297 298 // various suspension related flags - atomically updated 299 // overloaded for async exception checking in check_special_condition_for_native_trans. 300 volatile uint32_t _suspend_flags; 301 302 private: 303 int _num_nested_signal; 304 305 DEBUG_ONLY(bool _suspendible_thread;) 306 307 public: 308 void enter_signal_handler() { _num_nested_signal++; } 309 void leave_signal_handler() { _num_nested_signal--; } 310 bool is_inside_signal_handler() const { return _num_nested_signal > 0; } 311 312 // Determines if a heap allocation failure will be retried 313 // (e.g., by deoptimizing and re-executing in the interpreter). 314 // In this case, the failed allocation must raise 315 // Universe::out_of_memory_error_retry() and omit side effects 316 // such as JVMTI events and handling -XX:+HeapDumpOnOutOfMemoryError 317 // and -XX:OnOutOfMemoryError. 318 virtual bool in_retryable_allocation() const { return false; } 319 320 #ifdef ASSERT 321 void set_suspendible_thread() { 322 _suspendible_thread = true; 323 } 324 325 void clear_suspendible_thread() { 326 _suspendible_thread = false; 327 } 328 329 bool is_suspendible_thread() { return _suspendible_thread; } 330 #endif 331 332 private: 333 // Active_handles points to a block of handles 334 JNIHandleBlock* _active_handles; 335 336 // One-element thread local free list 337 JNIHandleBlock* _free_handle_block; 338 339 // Point to the last handle mark 340 HandleMark* _last_handle_mark; 341 342 // Claim value for parallel iteration over threads. 343 uintx _threads_do_token; 344 345 // Support for GlobalCounter 346 private: 347 volatile uintx _rcu_counter; 348 public: 349 volatile uintx* get_rcu_counter() { 350 return &_rcu_counter; 351 } 352 353 public: 354 void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; } 355 HandleMark* last_handle_mark() const { return _last_handle_mark; } 356 private: 357 358 #ifdef ASSERT 359 ICRefillVerifier* _missed_ic_stub_refill_verifier; 360 361 public: 362 ICRefillVerifier* missed_ic_stub_refill_verifier() { 363 return _missed_ic_stub_refill_verifier; 364 } 365 366 void set_missed_ic_stub_refill_verifier(ICRefillVerifier* verifier) { 367 _missed_ic_stub_refill_verifier = verifier; 368 } 369 #endif // ASSERT 370 371 private: 372 373 // Debug support for checking if code allows safepoints or not. 374 // Safepoints in the VM can happen because of allocation, invoking a VM operation, or blocking on 375 // mutex, or blocking on an object synchronizer (Java locking). 376 // If _no_safepoint_count is non-zero, then an assertion failure will happen in any of 377 // the above cases. 378 // 379 // The class NoSafepointVerifier is used to set this counter. 380 // 381 NOT_PRODUCT(int _no_safepoint_count;) // If 0, thread allow a safepoint to happen 382 383 private: 384 // Used by SkipGCALot class. 385 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot? 386 387 friend class GCLocker; 388 friend class NoSafepointVerifier; 389 friend class PauseNoSafepointVerifier; 390 391 volatile void* _polling_page; // Thread local polling page 392 393 ThreadLocalAllocBuffer _tlab; // Thread-local eden 394 jlong _allocated_bytes; // Cumulative number of bytes allocated on 395 // the Java heap 396 ThreadHeapSampler _heap_sampler; // For use when sampling the memory. 397 398 ThreadStatisticalInfo _statistical_info; // Statistics about the thread 399 400 JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;) // Thread-local data for jfr 401 402 int _vm_operation_started_count; // VM_Operation support 403 int _vm_operation_completed_count; // VM_Operation support 404 405 ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread 406 // is waiting to lock 407 bool _current_pending_monitor_is_from_java; // locking is from Java code 408 409 // ObjectMonitor on which this thread called Object.wait() 410 ObjectMonitor* _current_waiting_monitor; 411 412 // Private thread-local objectmonitor list - a simple cache organized as a SLL. 413 public: 414 ObjectMonitor* omFreeList; 415 int omFreeCount; // length of omFreeList 416 int omFreeProvision; // reload chunk size 417 ObjectMonitor* omInUseList; // SLL to track monitors in circulation 418 int omInUseCount; // length of omInUseList 419 420 #ifdef ASSERT 421 private: 422 volatile uint64_t _visited_for_critical_count; 423 424 public: 425 void set_visited_for_critical_count(uint64_t safepoint_id) { 426 assert(_visited_for_critical_count == 0, "Must be reset before set"); 427 assert((safepoint_id & 0x1) == 1, "Must be odd"); 428 _visited_for_critical_count = safepoint_id; 429 } 430 void reset_visited_for_critical_count(uint64_t safepoint_id) { 431 assert(_visited_for_critical_count == safepoint_id, "Was not visited"); 432 _visited_for_critical_count = 0; 433 } 434 bool was_visited_for_critical_count(uint64_t safepoint_id) const { 435 return _visited_for_critical_count == safepoint_id; 436 } 437 #endif 438 439 public: 440 enum { 441 is_definitely_current_thread = true 442 }; 443 444 // Constructor 445 Thread(); 446 virtual ~Thread() = 0; // Thread is abstract. 447 448 // Manage Thread::current() 449 void initialize_thread_current(); 450 static void clear_thread_current(); // TLS cleanup needed before threads terminate 451 452 protected: 453 // To be implemented by children. 454 virtual void run() = 0; 455 virtual void pre_run() = 0; 456 virtual void post_run() = 0; // Note: Thread must not be deleted prior to calling this! 457 458 #ifdef ASSERT 459 enum RunState { 460 PRE_CALL_RUN, 461 CALL_RUN, 462 PRE_RUN, 463 RUN, 464 POST_RUN 465 // POST_CALL_RUN - can't define this one as 'this' may be deleted when we want to set it 466 }; 467 RunState _run_state; // for lifecycle checks 468 #endif 469 470 471 public: 472 // invokes <ChildThreadClass>::run(), with common preparations and cleanups. 473 void call_run(); 474 475 // Testers 476 virtual bool is_VM_thread() const { return false; } 477 virtual bool is_Java_thread() const { return false; } 478 virtual bool is_Compiler_thread() const { return false; } 479 virtual bool is_Code_cache_sweeper_thread() const { return false; } 480 virtual bool is_hidden_from_external_view() const { return false; } 481 virtual bool is_jvmti_agent_thread() const { return false; } 482 // True iff the thread can perform GC operations at a safepoint. 483 // Generally will be true only of VM thread and parallel GC WorkGang 484 // threads. 485 virtual bool is_GC_task_thread() const { return false; } 486 virtual bool is_Watcher_thread() const { return false; } 487 virtual bool is_ConcurrentGC_thread() const { return false; } 488 virtual bool is_Named_thread() const { return false; } 489 virtual bool is_Worker_thread() const { return false; } 490 491 // Can this thread make Java upcalls 492 virtual bool can_call_java() const { return false; } 493 494 // Is this a JavaThread that is on the VM's current ThreadsList? 495 // If so it must participate in the safepoint protocol. 496 virtual bool is_active_Java_thread() const { return false; } 497 498 // Casts 499 virtual WorkerThread* as_Worker_thread() const { return NULL; } 500 501 virtual char* name() const { return (char*)"Unknown thread"; } 502 503 // Returns the current thread (ASSERTS if NULL) 504 static inline Thread* current(); 505 // Returns the current thread, or NULL if not attached 506 static inline Thread* current_or_null(); 507 // Returns the current thread, or NULL if not attached, and is 508 // safe for use from signal-handlers 509 static inline Thread* current_or_null_safe(); 510 511 // Common thread operations 512 #ifdef ASSERT 513 static void check_for_dangling_thread_pointer(Thread *thread); 514 #endif 515 static void set_priority(Thread* thread, ThreadPriority priority); 516 static ThreadPriority get_priority(const Thread* const thread); 517 static void start(Thread* thread); 518 static void interrupt(Thread* thr); 519 static bool is_interrupted(Thread* thr, bool clear_interrupted); 520 521 void set_native_thread_name(const char *name) { 522 assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread"); 523 os::set_native_thread_name(name); 524 } 525 526 ObjectMonitor** omInUseList_addr() { return (ObjectMonitor **)&omInUseList; } 527 Monitor* SR_lock() const { return _SR_lock; } 528 529 bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; } 530 531 inline void set_suspend_flag(SuspendFlags f); 532 inline void clear_suspend_flag(SuspendFlags f); 533 534 inline void set_has_async_exception(); 535 inline void clear_has_async_exception(); 536 537 bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; } 538 539 inline void set_critical_native_unlock(); 540 inline void clear_critical_native_unlock(); 541 542 inline void set_trace_flag(); 543 inline void clear_trace_flag(); 544 545 inline void set_ea_obj_deopt_flag(); 546 inline void clear_ea_obj_deopt_flag(); 547 548 // Support for Unhandled Oop detection 549 // Add the field for both, fastdebug and debug, builds to keep 550 // Thread's fields layout the same. 551 // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build. 552 #ifdef CHECK_UNHANDLED_OOPS 553 private: 554 UnhandledOops* _unhandled_oops; 555 #elif defined(ASSERT) 556 private: 557 void* _unhandled_oops; 558 #endif 559 #ifdef CHECK_UNHANDLED_OOPS 560 public: 561 UnhandledOops* unhandled_oops() { return _unhandled_oops; } 562 // Mark oop safe for gc. It may be stack allocated but won't move. 563 void allow_unhandled_oop(oop *op) { 564 if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op); 565 } 566 // Clear oops at safepoint so crashes point to unhandled oop violator 567 void clear_unhandled_oops() { 568 if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops(); 569 } 570 #endif // CHECK_UNHANDLED_OOPS 571 572 public: 573 #ifndef PRODUCT 574 bool skip_gcalot() { return _skip_gcalot; } 575 void set_skip_gcalot(bool v) { _skip_gcalot = v; } 576 #endif 577 578 // Installs a pending exception to be inserted later 579 static void send_async_exception(oop thread_oop, oop java_throwable); 580 581 // Resource area 582 ResourceArea* resource_area() const { return _resource_area; } 583 void set_resource_area(ResourceArea* area) { _resource_area = area; } 584 585 OSThread* osthread() const { return _osthread; } 586 void set_osthread(OSThread* thread) { _osthread = thread; } 587 588 // JNI handle support 589 JNIHandleBlock* active_handles() const { return _active_handles; } 590 void set_active_handles(JNIHandleBlock* block) { _active_handles = block; } 591 JNIHandleBlock* free_handle_block() const { return _free_handle_block; } 592 void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; } 593 594 // Internal handle support 595 HandleArea* handle_area() const { return _handle_area; } 596 void set_handle_area(HandleArea* area) { _handle_area = area; } 597 598 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; } 599 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; } 600 601 // Thread-Local Allocation Buffer (TLAB) support 602 ThreadLocalAllocBuffer& tlab() { return _tlab; } 603 void initialize_tlab() { 604 if (UseTLAB) { 605 tlab().initialize(); 606 } 607 } 608 609 jlong allocated_bytes() { return _allocated_bytes; } 610 void set_allocated_bytes(jlong value) { _allocated_bytes = value; } 611 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } 612 inline jlong cooked_allocated_bytes(); 613 614 ThreadHeapSampler& heap_sampler() { return _heap_sampler; } 615 616 ThreadStatisticalInfo& statistical_info() { return _statistical_info; } 617 618 JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;) 619 620 bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; } 621 622 bool is_ea_obj_deopt_suspend() { return (_suspend_flags & _ea_obj_deopt) != 0; } 623 624 // VM operation support 625 int vm_operation_ticket() { return ++_vm_operation_started_count; } 626 int vm_operation_completed_count() { return _vm_operation_completed_count; } 627 void increment_vm_operation_completed_count() { _vm_operation_completed_count++; } 628 629 // For tracking the heavyweight monitor the thread is pending on. 630 ObjectMonitor* current_pending_monitor() { 631 return _current_pending_monitor; 632 } 633 void set_current_pending_monitor(ObjectMonitor* monitor) { 634 _current_pending_monitor = monitor; 635 } 636 void set_current_pending_monitor_is_from_java(bool from_java) { 637 _current_pending_monitor_is_from_java = from_java; 638 } 639 bool current_pending_monitor_is_from_java() { 640 return _current_pending_monitor_is_from_java; 641 } 642 643 // For tracking the ObjectMonitor on which this thread called Object.wait() 644 ObjectMonitor* current_waiting_monitor() { 645 return _current_waiting_monitor; 646 } 647 void set_current_waiting_monitor(ObjectMonitor* monitor) { 648 _current_waiting_monitor = monitor; 649 } 650 651 // GC support 652 // Apply "f->do_oop" to all root oops in "this". 653 // Used by JavaThread::oops_do. 654 // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames 655 virtual void oops_do(OopClosure* f, CodeBlobClosure* cf); 656 657 // Handles the parallel case for claim_threads_do. 658 private: 659 bool claim_par_threads_do(uintx claim_token); 660 public: 661 // Requires that "claim_token" is that of the current iteration. 662 // If "is_par" is false, sets the token of "this" to 663 // "claim_token", and returns "true". If "is_par" is true, 664 // uses an atomic instruction to set the current thread's token to 665 // "claim_token", if it is not already. Returns "true" iff the 666 // calling thread does the update, this indicates that the calling thread 667 // has claimed the thread in the current iteration. 668 bool claim_threads_do(bool is_par, uintx claim_token) { 669 if (!is_par) { 670 _threads_do_token = claim_token; 671 return true; 672 } else { 673 return claim_par_threads_do(claim_token); 674 } 675 } 676 677 uintx threads_do_token() const { return _threads_do_token; } 678 679 // jvmtiRedefineClasses support 680 void metadata_handles_do(void f(Metadata*)); 681 682 // Used by fast lock support 683 virtual bool is_lock_owned(address adr) const; 684 685 // Check if address is in the stack of the thread (not just for locks). 686 // Warning: the method can only be used on the running thread 687 bool is_in_stack(address adr) const; 688 // Check if address is in the usable part of the stack (excludes protected 689 // guard pages) 690 bool is_in_usable_stack(address adr) const; 691 692 // Sets this thread as starting thread. Returns failure if thread 693 // creation fails due to lack of memory, too many threads etc. 694 bool set_as_starting_thread(); 695 696 protected: 697 // OS data associated with the thread 698 OSThread* _osthread; // Platform-specific thread information 699 700 // Thread local resource area for temporary allocation within the VM 701 ResourceArea* _resource_area; 702 703 DEBUG_ONLY(ResourceMark* _current_resource_mark;) 704 705 // Thread local handle area for allocation of handles within the VM 706 HandleArea* _handle_area; 707 GrowableArray<Metadata*>* _metadata_handles; 708 709 // Support for stack overflow handling, get_thread, etc. 710 address _stack_base; 711 size_t _stack_size; 712 int _lgrp_id; 713 714 volatile void** polling_page_addr() { return &_polling_page; } 715 716 public: 717 // Stack overflow support 718 address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } 719 void set_stack_base(address base) { _stack_base = base; } 720 size_t stack_size() const { return _stack_size; } 721 void set_stack_size(size_t size) { _stack_size = size; } 722 address stack_end() const { return stack_base() - stack_size(); } 723 void record_stack_base_and_size(); 724 void register_thread_stack_with_NMT() NOT_NMT_RETURN; 725 726 bool on_local_stack(address adr) const { 727 // QQQ this has knowledge of direction, ought to be a stack method 728 return (_stack_base >= adr && adr >= stack_end()); 729 } 730 731 int lgrp_id() const { return _lgrp_id; } 732 void set_lgrp_id(int value) { _lgrp_id = value; } 733 734 // Printing 735 void print_on(outputStream* st, bool print_extended_info) const; 736 virtual void print_on(outputStream* st) const { print_on(st, false); } 737 void print() const; 738 virtual void print_on_error(outputStream* st, char* buf, int buflen) const; 739 void print_value_on(outputStream* st) const; 740 741 // Debug-only code 742 #ifdef ASSERT 743 private: 744 // Deadlock detection support for Mutex locks. List of locks own by thread. 745 Mutex* _owned_locks; 746 // Mutex::set_owner_implementation is the only place where _owned_locks is modified, 747 // thus the friendship 748 friend class Mutex; 749 friend class Monitor; 750 751 public: 752 void print_owned_locks_on(outputStream* st) const; 753 void print_owned_locks() const { print_owned_locks_on(tty); } 754 Mutex* owned_locks() const { return _owned_locks; } 755 bool owns_locks() const { return owned_locks() != NULL; } 756 757 // Deadlock detection 758 ResourceMark* current_resource_mark() { return _current_resource_mark; } 759 void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; } 760 #endif // ASSERT 761 762 // These functions check conditions on a JavaThread before possibly going to a safepoint, 763 // including NoSafepointVerifier. 764 void check_for_valid_safepoint_state(bool potential_vm_operation) NOT_DEBUG_RETURN; 765 void check_possible_safepoint() NOT_DEBUG_RETURN; 766 767 private: 768 volatile int _jvmti_env_iteration_count; 769 770 public: 771 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; } 772 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; } 773 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; } 774 775 // Code generation 776 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); } 777 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); } 778 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); } 779 780 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); } 781 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); } 782 783 static ByteSize polling_page_offset() { return byte_offset_of(Thread, _polling_page); } 784 785 static ByteSize tlab_start_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::start_offset(); } 786 static ByteSize tlab_end_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::end_offset(); } 787 static ByteSize tlab_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::top_offset(); } 788 static ByteSize tlab_pf_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::pf_top_offset(); } 789 790 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); } 791 792 JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;) 793 794 public: 795 volatile intptr_t _Stalled; 796 volatile int _TypeTag; 797 ParkEvent * _ParkEvent; // for synchronized() 798 ParkEvent * _SleepEvent; // for Thread.sleep 799 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease 800 int NativeSyncRecursion; // diagnostic 801 802 volatile int _OnTrap; // Resume-at IP delta 803 jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG 804 jint _hashStateX; // thread-specific hashCode generator state 805 jint _hashStateY; 806 jint _hashStateZ; 807 808 // Low-level leaf-lock primitives used to implement synchronization 809 // and native monitor-mutex infrastructure. 810 // Not for general synchronization use. 811 static void SpinAcquire(volatile int * Lock, const char * Name); 812 static void SpinRelease(volatile int * Lock); 813 static void muxAcquire(volatile intptr_t * Lock, const char * Name); 814 static void muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev); 815 static void muxRelease(volatile intptr_t * Lock); 816 }; 817 818 // Inline implementation of Thread::current() 819 inline Thread* Thread::current() { 820 Thread* current = current_or_null(); 821 assert(current != NULL, "Thread::current() called on detached thread"); 822 return current; 823 } 824 825 inline Thread* Thread::current_or_null() { 826 #ifndef USE_LIBRARY_BASED_TLS_ONLY 827 return _thr_current; 828 #else 829 if (ThreadLocalStorage::is_initialized()) { 830 return ThreadLocalStorage::thread(); 831 } 832 return NULL; 833 #endif 834 } 835 836 inline Thread* Thread::current_or_null_safe() { 837 if (ThreadLocalStorage::is_initialized()) { 838 return ThreadLocalStorage::thread(); 839 } 840 return NULL; 841 } 842 843 class NonJavaThread: public Thread { 844 friend class VMStructs; 845 846 NonJavaThread* volatile _next; 847 848 class List; 849 static List _the_list; 850 851 void add_to_the_list(); 852 void remove_from_the_list(); 853 854 protected: 855 virtual void pre_run(); 856 virtual void post_run(); 857 858 public: 859 NonJavaThread(); 860 ~NonJavaThread(); 861 862 class Iterator; 863 }; 864 865 // Provides iteration over the list of NonJavaThreads. 866 // List addition occurs in pre_run(), and removal occurs in post_run(), 867 // so that only live fully-initialized threads can be found in the list. 868 // Threads created after an iterator is constructed will not be visited 869 // by the iterator. The scope of an iterator is a critical section; there 870 // must be no safepoint checks in that scope. 871 class NonJavaThread::Iterator : public StackObj { 872 uint _protect_enter; 873 NonJavaThread* _current; 874 875 // Noncopyable. 876 Iterator(const Iterator&); 877 Iterator& operator=(const Iterator&); 878 879 public: 880 Iterator(); 881 ~Iterator(); 882 883 bool end() const { return _current == NULL; } 884 NonJavaThread* current() const { return _current; } 885 void step(); 886 }; 887 888 // Name support for threads. non-JavaThread subclasses with multiple 889 // uniquely named instances should derive from this. 890 class NamedThread: public NonJavaThread { 891 friend class VMStructs; 892 enum { 893 max_name_len = 64 894 }; 895 private: 896 char* _name; 897 // log JavaThread being processed by oops_do 898 JavaThread* _processed_thread; 899 uint _gc_id; // The current GC id when a thread takes part in GC 900 901 public: 902 NamedThread(); 903 ~NamedThread(); 904 // May only be called once per thread. 905 void set_name(const char* format, ...) ATTRIBUTE_PRINTF(2, 3); 906 virtual bool is_Named_thread() const { return true; } 907 virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; } 908 JavaThread *processed_thread() { return _processed_thread; } 909 void set_processed_thread(JavaThread *thread) { _processed_thread = thread; } 910 virtual void print_on(outputStream* st) const; 911 912 void set_gc_id(uint gc_id) { _gc_id = gc_id; } 913 uint gc_id() { return _gc_id; } 914 }; 915 916 // Worker threads are named and have an id of an assigned work. 917 class WorkerThread: public NamedThread { 918 private: 919 uint _id; 920 public: 921 WorkerThread() : _id(0) { } 922 virtual bool is_Worker_thread() const { return true; } 923 924 virtual WorkerThread* as_Worker_thread() const { 925 assert(is_Worker_thread(), "Dubious cast to WorkerThread*?"); 926 return (WorkerThread*) this; 927 } 928 929 void set_id(uint work_id) { _id = work_id; } 930 uint id() const { return _id; } 931 }; 932 933 // A single WatcherThread is used for simulating timer interrupts. 934 class WatcherThread: public NonJavaThread { 935 friend class VMStructs; 936 protected: 937 virtual void run(); 938 939 private: 940 static WatcherThread* _watcher_thread; 941 942 static bool _startable; 943 // volatile due to at least one lock-free read 944 volatile static bool _should_terminate; 945 public: 946 enum SomeConstants { 947 delay_interval = 10 // interrupt delay in milliseconds 948 }; 949 950 // Constructor 951 WatcherThread(); 952 953 // No destruction allowed 954 ~WatcherThread() { 955 guarantee(false, "WatcherThread deletion must fix the race with VM termination"); 956 } 957 958 // Tester 959 bool is_Watcher_thread() const { return true; } 960 961 // Printing 962 char* name() const { return (char*)"VM Periodic Task Thread"; } 963 void print_on(outputStream* st) const; 964 void unpark(); 965 966 // Returns the single instance of WatcherThread 967 static WatcherThread* watcher_thread() { return _watcher_thread; } 968 969 // Create and start the single instance of WatcherThread, or stop it on shutdown 970 static void start(); 971 static void stop(); 972 // Only allow start once the VM is sufficiently initialized 973 // Otherwise the first task to enroll will trigger the start 974 static void make_startable(); 975 private: 976 int sleep() const; 977 }; 978 979 980 class CompilerThread; 981 982 typedef void (*ThreadFunction)(JavaThread*, TRAPS); 983 984 // Holds updates for compiled frames by JVMTI agents that cannot be performed immediately. 985 class JvmtiDeferredUpdates : public CHeapObj<mtCompiler> { 986 987 // Relocking has to be deferred, if the lock owning thread is currently waiting on the monitor. 988 int _relock_count_after_wait; 989 990 // Deferred updates of locals, expressions and monitors 991 GrowableArray<jvmtiDeferredLocalVariableSet*> _deferred_locals_updates; 992 993 public: 994 JvmtiDeferredUpdates() : 995 _relock_count_after_wait(0), 996 _deferred_locals_updates((ResourceObj::set_allocation_type((address) &_deferred_locals_updates, 997 ResourceObj::C_HEAP), 1), true, mtCompiler) { } 998 999 GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() { return &_deferred_locals_updates; } 1000 1001 int get_and_reset_relock_count_after_wait() { 1002 int result = _relock_count_after_wait; 1003 _relock_count_after_wait = 0; 1004 return result; 1005 } 1006 void inc_relock_count_after_wait() { 1007 _relock_count_after_wait++; 1008 } 1009 }; 1010 1011 1012 class JavaThread: public Thread { 1013 friend class VMStructs; 1014 friend class JVMCIVMStructs; 1015 friend class WhiteBox; 1016 private: 1017 bool _on_thread_list; // Is set when this JavaThread is added to the Threads list 1018 oop _threadObj; // The Java level thread object 1019 1020 #ifdef ASSERT 1021 private: 1022 int _java_call_counter; 1023 1024 public: 1025 int java_call_counter() { return _java_call_counter; } 1026 void inc_java_call_counter() { _java_call_counter++; } 1027 void dec_java_call_counter() { 1028 assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper"); 1029 _java_call_counter--; 1030 } 1031 private: // restore original namespace restriction 1032 #endif // ifdef ASSERT 1033 1034 #ifndef PRODUCT 1035 public: 1036 enum { 1037 jump_ring_buffer_size = 16 1038 }; 1039 private: // restore original namespace restriction 1040 #endif 1041 1042 JavaFrameAnchor _anchor; // Encapsulation of current java frame and it state 1043 1044 ThreadFunction _entry_point; 1045 1046 JNIEnv _jni_environment; 1047 1048 // Deopt support 1049 DeoptResourceMark* _deopt_mark; // Holds special ResourceMark for deoptimization 1050 1051 CompiledMethod* _deopt_nmethod; // CompiledMethod that is currently being deoptimized 1052 vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays 1053 vframeArray* _vframe_array_last; // Holds last vFrameArray we popped 1054 // Holds updates by JVMTI agents for compiled frames that cannot be performed immediately. They 1055 // will be carried out as soon as possible, which, in most cases, is just before deoptimization of 1056 // the frame, when control returns to it. 1057 JvmtiDeferredUpdates* _jvmti_deferred_updates; 1058 1059 // Handshake value for fixing 6243940. We need a place for the i2c 1060 // adapter to store the callee Method*. This value is NEVER live 1061 // across a gc point so it does NOT have to be gc'd 1062 // The handshake is open ended since we can't be certain that it will 1063 // be NULLed. This is because we rarely ever see the race and end up 1064 // in handle_wrong_method which is the backend of the handshake. See 1065 // code in i2c adapters and handle_wrong_method. 1066 1067 Method* _callee_target; 1068 1069 // Used to pass back results to the interpreter or generated code running Java code. 1070 oop _vm_result; // oop result is GC-preserved 1071 Metadata* _vm_result_2; // non-oop result 1072 1073 // See ReduceInitialCardMarks: this holds the precise space interval of 1074 // the most recent slow path allocation for which compiled code has 1075 // elided card-marks for performance along the fast-path. 1076 MemRegion _deferred_card_mark; 1077 1078 MonitorChunk* _monitor_chunks; // Contains the off stack monitors 1079 // allocated during deoptimization 1080 // and by JNI_MonitorEnter/Exit 1081 1082 // Async. requests support 1083 enum AsyncRequests { 1084 _no_async_condition = 0, 1085 _async_exception, 1086 _async_unsafe_access_error 1087 }; 1088 AsyncRequests _special_runtime_exit_condition; // Enum indicating pending async. request 1089 oop _pending_async_exception; 1090 1091 // Safepoint support 1092 public: // Expose _thread_state for SafeFetchInt() 1093 volatile JavaThreadState _thread_state; 1094 private: 1095 ThreadSafepointState* _safepoint_state; // Holds information about a thread during a safepoint 1096 address _saved_exception_pc; // Saved pc of instruction where last implicit exception happened 1097 1098 // JavaThread termination support 1099 enum TerminatedTypes { 1100 _not_terminated = 0xDEAD - 2, 1101 _thread_exiting, // JavaThread::exit() has been called for this thread 1102 _thread_terminated, // JavaThread is removed from thread list 1103 _vm_exited // JavaThread is still executing native code, but VM is terminated 1104 // only VM_Exit can set _vm_exited 1105 }; 1106 1107 // In general a JavaThread's _terminated field transitions as follows: 1108 // 1109 // _not_terminated => _thread_exiting => _thread_terminated 1110 // 1111 // _vm_exited is a special value to cover the case of a JavaThread 1112 // executing native code after the VM itself is terminated. 1113 volatile TerminatedTypes _terminated; 1114 // suspend/resume support 1115 volatile bool _suspend_equivalent; // Suspend equivalent condition 1116 jint _in_deopt_handler; // count of deoptimization 1117 // handlers thread is in 1118 volatile bool _doing_unsafe_access; // Thread may fault due to unsafe access 1119 bool _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was 1120 // never locked) when throwing an exception. Used by interpreter only. 1121 1122 // JNI attach states: 1123 enum JNIAttachStates { 1124 _not_attaching_via_jni = 1, // thread is not attaching via JNI 1125 _attaching_via_jni, // thread is attaching via JNI 1126 _attached_via_jni // thread has attached via JNI 1127 }; 1128 1129 // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni. 1130 // A native thread that is attaching via JNI starts with a value 1131 // of _attaching_via_jni and transitions to _attached_via_jni. 1132 volatile JNIAttachStates _jni_attach_state; 1133 1134 public: 1135 // State of the stack guard pages for this thread. 1136 enum StackGuardState { 1137 stack_guard_unused, // not needed 1138 stack_guard_reserved_disabled, 1139 stack_guard_yellow_reserved_disabled,// disabled (temporarily) after stack overflow 1140 stack_guard_enabled // enabled 1141 }; 1142 1143 private: 1144 1145 #if INCLUDE_JVMCI 1146 // The _pending_* fields below are used to communicate extra information 1147 // from an uncommon trap in JVMCI compiled code to the uncommon trap handler. 1148 1149 // Communicates the DeoptReason and DeoptAction of the uncommon trap 1150 int _pending_deoptimization; 1151 1152 // Specifies whether the uncommon trap is to bci 0 of a synchronized method 1153 // before the monitor has been acquired. 1154 bool _pending_monitorenter; 1155 1156 // Specifies if the DeoptReason for the last uncommon trap was Reason_transfer_to_interpreter 1157 bool _pending_transfer_to_interpreter; 1158 1159 // True if in a runtime call from compiled code that will deoptimize 1160 // and re-execute a failed heap allocation in the interpreter. 1161 bool _in_retryable_allocation; 1162 1163 // An id of a speculation that JVMCI compiled code can use to further describe and 1164 // uniquely identify the speculative optimization guarded by the uncommon trap 1165 jlong _pending_failed_speculation; 1166 1167 // These fields are mutually exclusive in terms of live ranges. 1168 union { 1169 // Communicates the pc at which the most recent implicit exception occurred 1170 // from the signal handler to a deoptimization stub. 1171 address _implicit_exception_pc; 1172 1173 // Communicates an alternative call target to an i2c stub from a JavaCall . 1174 address _alternate_call_target; 1175 } _jvmci; 1176 1177 // Support for high precision, thread sensitive counters in JVMCI compiled code. 1178 jlong* _jvmci_counters; 1179 1180 public: 1181 static jlong* _jvmci_old_thread_counters; 1182 static void collect_counters(jlong* array, int length); 1183 1184 bool resize_counters(int current_size, int new_size); 1185 1186 static bool resize_all_jvmci_counters(int new_size); 1187 1188 private: 1189 #endif // INCLUDE_JVMCI 1190 1191 StackGuardState _stack_guard_state; 1192 1193 // Precompute the limit of the stack as used in stack overflow checks. 1194 // We load it from here to simplify the stack overflow check in assembly. 1195 address _stack_overflow_limit; 1196 address _reserved_stack_activation; 1197 1198 // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is 1199 // used to temp. parsing values into and out of the runtime system during exception handling for compiled 1200 // code) 1201 volatile oop _exception_oop; // Exception thrown in compiled code 1202 volatile address _exception_pc; // PC where exception happened 1203 volatile address _exception_handler_pc; // PC for handler of exception 1204 volatile int _is_method_handle_return; // true (== 1) if the current exception PC is a MethodHandle call site. 1205 1206 private: 1207 // support for JNI critical regions 1208 jint _jni_active_critical; // count of entries into JNI critical region 1209 1210 // Checked JNI: function name requires exception check 1211 char* _pending_jni_exception_check_fn; 1212 1213 // For deadlock detection. 1214 int _depth_first_number; 1215 1216 // JVMTI PopFrame support 1217 // This is set to popframe_pending to signal that top Java frame should be popped immediately 1218 int _popframe_condition; 1219 1220 // If reallocation of scalar replaced objects fails, we throw OOM 1221 // and during exception propagation, pop the top 1222 // _frames_to_pop_failed_realloc frames, the ones that reference 1223 // failed reallocations. 1224 int _frames_to_pop_failed_realloc; 1225 1226 friend class VMThread; 1227 friend class ThreadWaitTransition; 1228 friend class VM_Exit; 1229 1230 void initialize(); // Initialized the instance variables 1231 1232 public: 1233 // Constructor 1234 JavaThread(bool is_attaching_via_jni = false); // for main thread and JNI attached threads 1235 JavaThread(ThreadFunction entry_point, size_t stack_size = 0); 1236 ~JavaThread(); 1237 1238 #ifdef ASSERT 1239 // verify this JavaThread hasn't be published in the Threads::list yet 1240 void verify_not_published(); 1241 #endif // ASSERT 1242 1243 //JNI functiontable getter/setter for JVMTI jni function table interception API. 1244 void set_jni_functions(struct JNINativeInterface_* functionTable) { 1245 _jni_environment.functions = functionTable; 1246 } 1247 struct JNINativeInterface_* get_jni_functions() { 1248 return (struct JNINativeInterface_ *)_jni_environment.functions; 1249 } 1250 1251 // This function is called at thread creation to allow 1252 // platform specific thread variables to be initialized. 1253 void cache_global_variables(); 1254 1255 // Executes Shutdown.shutdown() 1256 void invoke_shutdown_hooks(); 1257 1258 // Cleanup on thread exit 1259 enum ExitType { 1260 normal_exit, 1261 jni_detach 1262 }; 1263 void exit(bool destroy_vm, ExitType exit_type = normal_exit); 1264 1265 void cleanup_failed_attach_current_thread(bool is_daemon); 1266 1267 // Testers 1268 virtual bool is_Java_thread() const { return true; } 1269 virtual bool can_call_java() const { return true; } 1270 1271 virtual bool is_active_Java_thread() const { 1272 return on_thread_list() && !is_terminated(); 1273 } 1274 1275 // Thread oop. threadObj() can be NULL for initial JavaThread 1276 // (or for threads attached via JNI) 1277 oop threadObj() const { return _threadObj; } 1278 void set_threadObj(oop p) { _threadObj = p; } 1279 1280 // Prepare thread and add to priority queue. If a priority is 1281 // not specified, use the priority of the thread object. Threads_lock 1282 // must be held while this function is called. 1283 void prepare(jobject jni_thread, ThreadPriority prio=NoPriority); 1284 1285 void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; } 1286 address saved_exception_pc() { return _saved_exception_pc; } 1287 1288 1289 ThreadFunction entry_point() const { return _entry_point; } 1290 1291 // Allocates a new Java level thread object for this thread. thread_name may be NULL. 1292 void allocate_threadObj(Handle thread_group, const char* thread_name, bool daemon, TRAPS); 1293 1294 // Last frame anchor routines 1295 1296 JavaFrameAnchor* frame_anchor(void) { return &_anchor; } 1297 1298 // last_Java_sp 1299 bool has_last_Java_frame() const { return _anchor.has_last_Java_frame(); } 1300 intptr_t* last_Java_sp() const { return _anchor.last_Java_sp(); } 1301 1302 // last_Java_pc 1303 1304 address last_Java_pc(void) { return _anchor.last_Java_pc(); } 1305 1306 // Safepoint support 1307 inline JavaThreadState thread_state() const; 1308 inline void set_thread_state(JavaThreadState s); 1309 inline void set_thread_state_fence(JavaThreadState s); // fence after setting thread state 1310 inline ThreadSafepointState* safepoint_state() const; 1311 inline void set_safepoint_state(ThreadSafepointState* state); 1312 inline bool is_at_poll_safepoint(); 1313 1314 // JavaThread termination and lifecycle support: 1315 void smr_delete(); 1316 bool on_thread_list() const { return _on_thread_list; } 1317 void set_on_thread_list() { _on_thread_list = true; } 1318 1319 // thread has called JavaThread::exit() or is terminated 1320 bool is_exiting() const; 1321 // thread is terminated (no longer on the threads list); we compare 1322 // against the two non-terminated values so that a freed JavaThread 1323 // will also be considered terminated. 1324 bool check_is_terminated(TerminatedTypes l_terminated) const { 1325 return l_terminated != _not_terminated && l_terminated != _thread_exiting; 1326 } 1327 bool is_terminated() const; 1328 void set_terminated(TerminatedTypes t); 1329 // special for Threads::remove() which is static: 1330 void set_terminated_value(); 1331 void block_if_vm_exited(); 1332 1333 bool doing_unsafe_access() { return _doing_unsafe_access; } 1334 void set_doing_unsafe_access(bool val) { _doing_unsafe_access = val; } 1335 1336 bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; } 1337 void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; } 1338 1339 inline void set_polling_page_release(void* poll_value); 1340 inline void set_polling_page(void* poll_value); 1341 inline volatile void* get_polling_page(); 1342 1343 private: 1344 // Support for thread handshake operations 1345 HandshakeState _handshake; 1346 public: 1347 void set_handshake_operation(HandshakeOperation* op) { 1348 _handshake.set_operation(this, op); 1349 } 1350 1351 bool has_handshake() const { 1352 return _handshake.has_operation(); 1353 } 1354 1355 void handshake_process_by_self() { 1356 _handshake.process_by_self(this); 1357 } 1358 1359 void handshake_process_by_vmthread() { 1360 _handshake.process_by_vmthread(this); 1361 } 1362 1363 // Suspend/resume support for JavaThread 1364 private: 1365 inline void set_ext_suspended(); 1366 inline void clear_ext_suspended(); 1367 1368 // Synchronize with another thread (most likely a JVMTI agent) that is deoptimizing objects of the 1369 // current thread, i.e. reverts optimizations based on escape analysis. 1370 void wait_for_object_deoptimization(); 1371 1372 public: 1373 void java_suspend(); // higher-level suspension logic called by the public APIs 1374 void java_resume(); // higher-level resume logic called by the public APIs 1375 int java_suspend_self(); // low-level self-suspension mechanics 1376 1377 private: 1378 // mid-level wrapper around java_suspend_self to set up correct state and 1379 // check for a pending safepoint at the end 1380 void java_suspend_self_with_safepoint_check(); 1381 1382 public: 1383 void check_and_wait_while_suspended() { 1384 assert(JavaThread::current() == this, "sanity check"); 1385 1386 bool do_self_suspend; 1387 do { 1388 // were we externally suspended while we were waiting? 1389 do_self_suspend = handle_special_suspend_equivalent_condition(); 1390 if (do_self_suspend) { 1391 // don't surprise the thread that suspended us by returning 1392 java_suspend_self(); 1393 set_suspend_equivalent(); 1394 } 1395 } while (do_self_suspend); 1396 } 1397 static void check_safepoint_and_suspend_for_native_trans(JavaThread *thread); 1398 // Check for async exception in addition to safepoint and suspend request. 1399 static void check_special_condition_for_native_trans(JavaThread *thread); 1400 1401 // Same as check_special_condition_for_native_trans but finishes the 1402 // transition into thread_in_Java mode so that it can potentially 1403 // block. 1404 static void check_special_condition_for_native_trans_and_transition(JavaThread *thread); 1405 1406 bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits); 1407 bool is_ext_suspend_completed_with_lock(uint32_t *bits) { 1408 MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1409 // Warning: is_ext_suspend_completed() may temporarily drop the 1410 // SR_lock to allow the thread to reach a stable thread state if 1411 // it is currently in a transient thread state. 1412 return is_ext_suspend_completed(false /* !called_by_wait */, 1413 SuspendRetryDelay, bits); 1414 } 1415 1416 // We cannot allow wait_for_ext_suspend_completion() to run forever or 1417 // we could hang. SuspendRetryCount and SuspendRetryDelay are normally 1418 // passed as the count and delay parameters. Experiments with specific 1419 // calls to wait_for_ext_suspend_completion() can be done by passing 1420 // other values in the code. Experiments with all calls can be done 1421 // via the appropriate -XX options. 1422 bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits); 1423 1424 // test for suspend - most (all?) of these should go away 1425 bool is_thread_fully_suspended(bool wait_for_suspend, uint32_t *bits); 1426 1427 inline void set_external_suspend(); 1428 inline void clear_external_suspend(); 1429 1430 bool is_external_suspend() const { 1431 return (_suspend_flags & _external_suspend) != 0; 1432 } 1433 // Whenever a thread transitions from native to vm/java it must suspend 1434 // if external|deopt suspend is present. 1435 bool is_suspend_after_native() const { 1436 return (_suspend_flags & (_external_suspend | _ea_obj_deopt JFR_ONLY(| _trace_flag))) != 0; 1437 } 1438 1439 // external suspend request is completed 1440 bool is_ext_suspended() const { 1441 return (_suspend_flags & _ext_suspended) != 0; 1442 } 1443 1444 bool is_external_suspend_with_lock() const { 1445 MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1446 return is_external_suspend(); 1447 } 1448 1449 // Special method to handle a pending external suspend request 1450 // when a suspend equivalent condition lifts. 1451 bool handle_special_suspend_equivalent_condition() { 1452 assert(is_suspend_equivalent(), 1453 "should only be called in a suspend equivalence condition"); 1454 MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1455 bool ret = is_external_suspend(); 1456 if (!ret) { 1457 // not about to self-suspend so clear suspend equivalence 1458 clear_suspend_equivalent(); 1459 } 1460 // implied else: 1461 // We have a pending external suspend request so we leave the 1462 // suspend_equivalent flag set until java_suspend_self() sets 1463 // the ext_suspended flag and clears the suspend_equivalent 1464 // flag. This insures that wait_for_ext_suspend_completion() 1465 // will return consistent values. 1466 return ret; 1467 } 1468 1469 // utility methods to see if we are doing some kind of suspension 1470 bool is_being_ext_suspended() const { 1471 MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1472 return is_ext_suspended() || is_external_suspend(); 1473 } 1474 1475 bool is_suspend_equivalent() const { return _suspend_equivalent; } 1476 1477 void set_suspend_equivalent() { _suspend_equivalent = true; } 1478 void clear_suspend_equivalent() { _suspend_equivalent = false; } 1479 1480 // Thread.stop support 1481 void send_thread_stop(oop throwable); 1482 AsyncRequests clear_special_runtime_exit_condition() { 1483 AsyncRequests x = _special_runtime_exit_condition; 1484 _special_runtime_exit_condition = _no_async_condition; 1485 return x; 1486 } 1487 1488 // Are any async conditions present? 1489 bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); } 1490 1491 void check_and_handle_async_exceptions(bool check_unsafe_error = true); 1492 1493 // these next two are also used for self-suspension and async exception support 1494 void handle_special_runtime_exit_condition(bool check_asyncs = true); 1495 1496 // Return true if JavaThread has an asynchronous condition or 1497 // if external suspension is requested. 1498 bool has_special_runtime_exit_condition() { 1499 // Because we don't use is_external_suspend_with_lock 1500 // it is possible that we won't see an asynchronous external suspend 1501 // request that has just gotten started, i.e., SR_lock grabbed but 1502 // _external_suspend field change either not made yet or not visible 1503 // yet. However, this is okay because the request is asynchronous and 1504 // we will see the new flag value the next time through. It's also 1505 // possible that the external suspend request is dropped after 1506 // we have checked is_external_suspend(), we will recheck its value 1507 // under SR_lock in java_suspend_self(). 1508 return (_special_runtime_exit_condition != _no_async_condition) || 1509 is_external_suspend() || is_trace_suspend() || is_ea_obj_deopt_suspend(); 1510 } 1511 1512 void set_pending_unsafe_access_error() { _special_runtime_exit_condition = _async_unsafe_access_error; } 1513 1514 inline void set_pending_async_exception(oop e); 1515 1516 // Fast-locking support 1517 bool is_lock_owned(address adr) const; 1518 1519 // Accessors for vframe array top 1520 // The linked list of vframe arrays are sorted on sp. This means when we 1521 // unpack the head must contain the vframe array to unpack. 1522 void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; } 1523 vframeArray* vframe_array_head() const { return _vframe_array_head; } 1524 1525 // Side structure for deferring update of java frame locals until deopt occurs 1526 JvmtiDeferredUpdates* deferred_updates() const { return _jvmti_deferred_updates; } 1527 void reset_deferred_updates() { _jvmti_deferred_updates = NULL; } 1528 void allocate_deferred_updates() { 1529 assert(_jvmti_deferred_updates == NULL, "already allocated"); 1530 _jvmti_deferred_updates = new JvmtiDeferredUpdates(); 1531 } 1532 GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _jvmti_deferred_updates == NULL ? NULL : _jvmti_deferred_updates->deferred_locals(); } 1533 1534 // Relocking has to be deferred, if the lock owning thread is currently waiting on the monitor. 1535 int get_and_reset_relock_count_after_wait() { 1536 return deferred_updates() == NULL ? 0 : deferred_updates()->get_and_reset_relock_count_after_wait(); 1537 } 1538 void inc_relock_count_after_wait() { 1539 if (deferred_updates() == NULL) { 1540 allocate_deferred_updates(); 1541 } 1542 deferred_updates()->inc_relock_count_after_wait(); 1543 } 1544 1545 // These only really exist to make debugging deopt problems simpler 1546 1547 void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; } 1548 vframeArray* vframe_array_last() const { return _vframe_array_last; } 1549 1550 // The special resourceMark used during deoptimization 1551 1552 void set_deopt_mark(DeoptResourceMark* value) { _deopt_mark = value; } 1553 DeoptResourceMark* deopt_mark(void) { return _deopt_mark; } 1554 1555 void set_deopt_compiled_method(CompiledMethod* nm) { _deopt_nmethod = nm; } 1556 CompiledMethod* deopt_compiled_method() { return _deopt_nmethod; } 1557 1558 Method* callee_target() const { return _callee_target; } 1559 void set_callee_target (Method* x) { _callee_target = x; } 1560 1561 // Oop results of vm runtime calls 1562 oop vm_result() const { return _vm_result; } 1563 void set_vm_result (oop x) { _vm_result = x; } 1564 1565 Metadata* vm_result_2() const { return _vm_result_2; } 1566 void set_vm_result_2 (Metadata* x) { _vm_result_2 = x; } 1567 1568 MemRegion deferred_card_mark() const { return _deferred_card_mark; } 1569 void set_deferred_card_mark(MemRegion mr) { _deferred_card_mark = mr; } 1570 1571 #if INCLUDE_JVMCI 1572 int pending_deoptimization() const { return _pending_deoptimization; } 1573 jlong pending_failed_speculation() const { return _pending_failed_speculation; } 1574 bool has_pending_monitorenter() const { return _pending_monitorenter; } 1575 void set_pending_monitorenter(bool b) { _pending_monitorenter = b; } 1576 void set_pending_deoptimization(int reason) { _pending_deoptimization = reason; } 1577 void set_pending_failed_speculation(jlong failed_speculation) { _pending_failed_speculation = failed_speculation; } 1578 void set_pending_transfer_to_interpreter(bool b) { _pending_transfer_to_interpreter = b; } 1579 void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == NULL, "must be"); _jvmci._alternate_call_target = a; } 1580 void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == NULL, "must be"); _jvmci._implicit_exception_pc = a; } 1581 1582 virtual bool in_retryable_allocation() const { return _in_retryable_allocation; } 1583 void set_in_retryable_allocation(bool b) { _in_retryable_allocation = b; } 1584 #endif // INCLUDE_JVMCI 1585 1586 // Exception handling for compiled methods 1587 oop exception_oop() const { return _exception_oop; } 1588 address exception_pc() const { return _exception_pc; } 1589 address exception_handler_pc() const { return _exception_handler_pc; } 1590 bool is_method_handle_return() const { return _is_method_handle_return == 1; } 1591 1592 void set_exception_oop(oop o) { (void)const_cast<oop&>(_exception_oop = o); } 1593 void set_exception_pc(address a) { _exception_pc = a; } 1594 void set_exception_handler_pc(address a) { _exception_handler_pc = a; } 1595 void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; } 1596 1597 void clear_exception_oop_and_pc() { 1598 set_exception_oop(NULL); 1599 set_exception_pc(NULL); 1600 } 1601 1602 // Stack overflow support 1603 // 1604 // (small addresses) 1605 // 1606 // -- <-- stack_end() --- 1607 // | | 1608 // | red pages | 1609 // | | 1610 // -- <-- stack_red_zone_base() | 1611 // | | 1612 // | guard 1613 // | yellow pages zone 1614 // | | 1615 // | | 1616 // -- <-- stack_yellow_zone_base() | 1617 // | | 1618 // | | 1619 // | reserved pages | 1620 // | | 1621 // -- <-- stack_reserved_zone_base() --- --- 1622 // /|\ shadow <-- stack_overflow_limit() (somewhere in here) 1623 // | zone 1624 // \|/ size 1625 // some untouched memory --- 1626 // 1627 // 1628 // -- 1629 // | 1630 // | shadow zone 1631 // | 1632 // -- 1633 // x frame n 1634 // -- 1635 // x frame n-1 1636 // x 1637 // -- 1638 // ... 1639 // 1640 // -- 1641 // x frame 0 1642 // -- <-- stack_base() 1643 // 1644 // (large addresses) 1645 // 1646 1647 private: 1648 // These values are derived from flags StackRedPages, StackYellowPages, 1649 // StackReservedPages and StackShadowPages. The zone size is determined 1650 // ergonomically if page_size > 4K. 1651 static size_t _stack_red_zone_size; 1652 static size_t _stack_yellow_zone_size; 1653 static size_t _stack_reserved_zone_size; 1654 static size_t _stack_shadow_zone_size; 1655 public: 1656 inline size_t stack_available(address cur_sp); 1657 1658 static size_t stack_red_zone_size() { 1659 assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized."); 1660 return _stack_red_zone_size; 1661 } 1662 static void set_stack_red_zone_size(size_t s) { 1663 assert(is_aligned(s, os::vm_page_size()), 1664 "We can not protect if the red zone size is not page aligned."); 1665 assert(_stack_red_zone_size == 0, "This should be called only once."); 1666 _stack_red_zone_size = s; 1667 } 1668 address stack_red_zone_base() { 1669 return (address)(stack_end() + stack_red_zone_size()); 1670 } 1671 bool in_stack_red_zone(address a) { 1672 return a <= stack_red_zone_base() && a >= stack_end(); 1673 } 1674 1675 static size_t stack_yellow_zone_size() { 1676 assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized."); 1677 return _stack_yellow_zone_size; 1678 } 1679 static void set_stack_yellow_zone_size(size_t s) { 1680 assert(is_aligned(s, os::vm_page_size()), 1681 "We can not protect if the yellow zone size is not page aligned."); 1682 assert(_stack_yellow_zone_size == 0, "This should be called only once."); 1683 _stack_yellow_zone_size = s; 1684 } 1685 1686 static size_t stack_reserved_zone_size() { 1687 // _stack_reserved_zone_size may be 0. This indicates the feature is off. 1688 return _stack_reserved_zone_size; 1689 } 1690 static void set_stack_reserved_zone_size(size_t s) { 1691 assert(is_aligned(s, os::vm_page_size()), 1692 "We can not protect if the reserved zone size is not page aligned."); 1693 assert(_stack_reserved_zone_size == 0, "This should be called only once."); 1694 _stack_reserved_zone_size = s; 1695 } 1696 address stack_reserved_zone_base() { 1697 return (address)(stack_end() + 1698 (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size())); 1699 } 1700 bool in_stack_reserved_zone(address a) { 1701 return (a <= stack_reserved_zone_base()) && 1702 (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size())); 1703 } 1704 1705 static size_t stack_yellow_reserved_zone_size() { 1706 return _stack_yellow_zone_size + _stack_reserved_zone_size; 1707 } 1708 bool in_stack_yellow_reserved_zone(address a) { 1709 return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base()); 1710 } 1711 1712 // Size of red + yellow + reserved zones. 1713 static size_t stack_guard_zone_size() { 1714 return stack_red_zone_size() + stack_yellow_reserved_zone_size(); 1715 } 1716 1717 static size_t stack_shadow_zone_size() { 1718 assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized."); 1719 return _stack_shadow_zone_size; 1720 } 1721 static void set_stack_shadow_zone_size(size_t s) { 1722 // The shadow area is not allocated or protected, so 1723 // it needs not be page aligned. 1724 // But the stack bang currently assumes that it is a 1725 // multiple of page size. This guarantees that the bang 1726 // loop touches all pages in the shadow zone. 1727 // This can be guaranteed differently, as well. E.g., if 1728 // the page size is a multiple of 4K, banging in 4K steps 1729 // suffices to touch all pages. (Some pages are banged 1730 // several times, though.) 1731 assert(is_aligned(s, os::vm_page_size()), 1732 "Stack bang assumes multiple of page size."); 1733 assert(_stack_shadow_zone_size == 0, "This should be called only once."); 1734 _stack_shadow_zone_size = s; 1735 } 1736 1737 void create_stack_guard_pages(); 1738 void remove_stack_guard_pages(); 1739 1740 void enable_stack_reserved_zone(); 1741 void disable_stack_reserved_zone(); 1742 void enable_stack_yellow_reserved_zone(); 1743 void disable_stack_yellow_reserved_zone(); 1744 void enable_stack_red_zone(); 1745 void disable_stack_red_zone(); 1746 1747 inline bool stack_guard_zone_unused(); 1748 inline bool stack_yellow_reserved_zone_disabled(); 1749 inline bool stack_reserved_zone_disabled(); 1750 inline bool stack_guards_enabled(); 1751 1752 address reserved_stack_activation() const { return _reserved_stack_activation; } 1753 void set_reserved_stack_activation(address addr) { 1754 assert(_reserved_stack_activation == stack_base() 1755 || _reserved_stack_activation == NULL 1756 || addr == stack_base(), "Must not be set twice"); 1757 _reserved_stack_activation = addr; 1758 } 1759 1760 // Attempt to reguard the stack after a stack overflow may have occurred. 1761 // Returns true if (a) guard pages are not needed on this thread, (b) the 1762 // pages are already guarded, or (c) the pages were successfully reguarded. 1763 // Returns false if there is not enough stack space to reguard the pages, in 1764 // which case the caller should unwind a frame and try again. The argument 1765 // should be the caller's (approximate) sp. 1766 bool reguard_stack(address cur_sp); 1767 // Similar to above but see if current stackpoint is out of the guard area 1768 // and reguard if possible. 1769 bool reguard_stack(void); 1770 1771 address stack_overflow_limit() { return _stack_overflow_limit; } 1772 void set_stack_overflow_limit() { 1773 _stack_overflow_limit = 1774 stack_end() + MAX2(JavaThread::stack_guard_zone_size(), JavaThread::stack_shadow_zone_size()); 1775 } 1776 1777 // Misc. accessors/mutators 1778 void set_do_not_unlock(void) { _do_not_unlock_if_synchronized = true; } 1779 void clr_do_not_unlock(void) { _do_not_unlock_if_synchronized = false; } 1780 bool do_not_unlock(void) { return _do_not_unlock_if_synchronized; } 1781 1782 // For assembly stub generation 1783 static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj); } 1784 static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment); } 1785 static ByteSize pending_jni_exception_check_fn_offset() { 1786 return byte_offset_of(JavaThread, _pending_jni_exception_check_fn); 1787 } 1788 static ByteSize last_Java_sp_offset() { 1789 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset(); 1790 } 1791 static ByteSize last_Java_pc_offset() { 1792 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset(); 1793 } 1794 static ByteSize frame_anchor_offset() { 1795 return byte_offset_of(JavaThread, _anchor); 1796 } 1797 static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target); } 1798 static ByteSize vm_result_offset() { return byte_offset_of(JavaThread, _vm_result); } 1799 static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2); } 1800 static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); } 1801 static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); } 1802 static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); } 1803 #if INCLUDE_JVMCI 1804 static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); } 1805 static ByteSize pending_monitorenter_offset() { return byte_offset_of(JavaThread, _pending_monitorenter); } 1806 static ByteSize pending_failed_speculation_offset() { return byte_offset_of(JavaThread, _pending_failed_speculation); } 1807 static ByteSize jvmci_alternate_call_target_offset() { return byte_offset_of(JavaThread, _jvmci._alternate_call_target); } 1808 static ByteSize jvmci_implicit_exception_pc_offset() { return byte_offset_of(JavaThread, _jvmci._implicit_exception_pc); } 1809 static ByteSize jvmci_counters_offset() { return byte_offset_of(JavaThread, _jvmci_counters); } 1810 #endif // INCLUDE_JVMCI 1811 static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop); } 1812 static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc); } 1813 static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); } 1814 static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); } 1815 static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); } 1816 static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); } 1817 static ByteSize reserved_stack_activation_offset() { return byte_offset_of(JavaThread, _reserved_stack_activation); } 1818 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); } 1819 1820 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); } 1821 static ByteSize should_post_on_exceptions_flag_offset() { 1822 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag); 1823 } 1824 static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); } 1825 1826 // Returns the jni environment for this thread 1827 JNIEnv* jni_environment() { return &_jni_environment; } 1828 1829 static JavaThread* thread_from_jni_environment(JNIEnv* env) { 1830 JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset())); 1831 // Only return NULL if thread is off the thread list; starting to 1832 // exit should not return NULL. 1833 if (thread_from_jni_env->is_terminated()) { 1834 thread_from_jni_env->block_if_vm_exited(); 1835 return NULL; 1836 } else { 1837 return thread_from_jni_env; 1838 } 1839 } 1840 1841 // JNI critical regions. These can nest. 1842 bool in_critical() { return _jni_active_critical > 0; } 1843 bool in_last_critical() { return _jni_active_critical == 1; } 1844 inline void enter_critical(); 1845 void exit_critical() { 1846 assert(Thread::current() == this, "this must be current thread"); 1847 _jni_active_critical--; 1848 assert(_jni_active_critical >= 0, "JNI critical nesting problem?"); 1849 } 1850 1851 // Checked JNI: is the programmer required to check for exceptions, if so specify 1852 // which function name. Returning to a Java frame should implicitly clear the 1853 // pending check, this is done for Native->Java transitions (i.e. user JNI code). 1854 // VM->Java transistions are not cleared, it is expected that JNI code enclosed 1855 // within ThreadToNativeFromVM makes proper exception checks (i.e. VM internal). 1856 bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; } 1857 void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; } 1858 const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; } 1859 void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; } 1860 1861 // For deadlock detection 1862 int depth_first_number() { return _depth_first_number; } 1863 void set_depth_first_number(int dfn) { _depth_first_number = dfn; } 1864 1865 private: 1866 void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; } 1867 1868 public: 1869 MonitorChunk* monitor_chunks() const { return _monitor_chunks; } 1870 void add_monitor_chunk(MonitorChunk* chunk); 1871 void remove_monitor_chunk(MonitorChunk* chunk); 1872 bool in_deopt_handler() const { return _in_deopt_handler > 0; } 1873 void inc_in_deopt_handler() { _in_deopt_handler++; } 1874 void dec_in_deopt_handler() { 1875 assert(_in_deopt_handler > 0, "mismatched deopt nesting"); 1876 if (_in_deopt_handler > 0) { // robustness 1877 _in_deopt_handler--; 1878 } 1879 } 1880 1881 private: 1882 void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; } 1883 1884 public: 1885 1886 // Frame iteration; calls the function f for all frames on the stack 1887 void frames_do(void f(frame*, const RegisterMap*)); 1888 1889 // Memory operations 1890 void oops_do(OopClosure* f, CodeBlobClosure* cf); 1891 1892 // Sweeper operations 1893 virtual void nmethods_do(CodeBlobClosure* cf); 1894 1895 // RedefineClasses Support 1896 void metadata_do(MetadataClosure* f); 1897 1898 // Debug method asserting thread states are correct during a handshake operation. 1899 DEBUG_ONLY(void verify_states_for_handshake();) 1900 1901 // Misc. operations 1902 char* name() const { return (char*)get_thread_name(); } 1903 void print_on(outputStream* st, bool print_extended_info) const; 1904 void print_on(outputStream* st) const { print_on(st, false); } 1905 void print() const; 1906 void print_thread_state_on(outputStream*) const PRODUCT_RETURN; 1907 void print_on_error(outputStream* st, char* buf, int buflen) const; 1908 void print_name_on_error(outputStream* st, char* buf, int buflen) const; 1909 void verify(); 1910 const char* get_thread_name() const; 1911 protected: 1912 // factor out low-level mechanics for use in both normal and error cases 1913 virtual const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const; 1914 public: 1915 // Accessing frames 1916 frame last_frame() { 1917 _anchor.make_walkable(this); 1918 return pd_last_frame(); 1919 } 1920 javaVFrame* last_java_vframe(RegisterMap* reg_map); 1921 1922 // Returns method at 'depth' java or native frames down the stack 1923 // Used for security checks 1924 Klass* security_get_caller_class(int depth); 1925 1926 // Print stack trace in external format 1927 void print_stack_on(outputStream* st); 1928 void print_stack() { print_stack_on(tty); } 1929 1930 // Print stack traces in various internal formats 1931 void trace_stack() PRODUCT_RETURN; 1932 void trace_stack_from(vframe* start_vf) PRODUCT_RETURN; 1933 void trace_frames() PRODUCT_RETURN; 1934 1935 // Print an annotated view of the stack frames 1936 void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN; 1937 void validate_frame_layout() { 1938 print_frame_layout(0, true); 1939 } 1940 1941 // Function for testing deoptimization 1942 void deoptimize(); 1943 void make_zombies(); 1944 1945 void deoptimized_wrt_marked_nmethods(); 1946 1947 public: 1948 // Returns the running thread as a JavaThread 1949 static inline JavaThread* current(); 1950 1951 // Returns the active Java thread. Do not use this if you know you are calling 1952 // from a JavaThread, as it's slower than JavaThread::current. If called from 1953 // the VMThread, it also returns the JavaThread that instigated the VMThread's 1954 // operation. You may not want that either. 1955 static JavaThread* active(); 1956 1957 inline CompilerThread* as_CompilerThread(); 1958 1959 protected: 1960 virtual void pre_run(); 1961 virtual void run(); 1962 void thread_main_inner(); 1963 virtual void post_run(); 1964 1965 1966 private: 1967 GrowableArray<oop>* _array_for_gc; 1968 public: 1969 1970 void register_array_for_gc(GrowableArray<oop>* array) { _array_for_gc = array; } 1971 1972 public: 1973 // Thread local information maintained by JVMTI. 1974 void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; } 1975 // A JvmtiThreadState is lazily allocated. This jvmti_thread_state() 1976 // getter is used to get this JavaThread's JvmtiThreadState if it has 1977 // one which means NULL can be returned. JvmtiThreadState::state_for() 1978 // is used to get the specified JavaThread's JvmtiThreadState if it has 1979 // one or it allocates a new JvmtiThreadState for the JavaThread and 1980 // returns it. JvmtiThreadState::state_for() will return NULL only if 1981 // the specified JavaThread is exiting. 1982 JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; } 1983 static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); } 1984 1985 // JVMTI PopFrame support 1986 // Setting and clearing popframe_condition 1987 // All of these enumerated values are bits. popframe_pending 1988 // indicates that a PopFrame() has been requested and not yet been 1989 // completed. popframe_processing indicates that that PopFrame() is in 1990 // the process of being completed. popframe_force_deopt_reexecution_bit 1991 // indicates that special handling is required when returning to a 1992 // deoptimized caller. 1993 enum PopCondition { 1994 popframe_inactive = 0x00, 1995 popframe_pending_bit = 0x01, 1996 popframe_processing_bit = 0x02, 1997 popframe_force_deopt_reexecution_bit = 0x04 1998 }; 1999 PopCondition popframe_condition() { return (PopCondition) _popframe_condition; } 2000 void set_popframe_condition(PopCondition c) { _popframe_condition = c; } 2001 void set_popframe_condition_bit(PopCondition c) { _popframe_condition |= c; } 2002 void clear_popframe_condition() { _popframe_condition = popframe_inactive; } 2003 static ByteSize popframe_condition_offset() { return byte_offset_of(JavaThread, _popframe_condition); } 2004 bool has_pending_popframe() { return (popframe_condition() & popframe_pending_bit) != 0; } 2005 bool popframe_forcing_deopt_reexecution() { return (popframe_condition() & popframe_force_deopt_reexecution_bit) != 0; } 2006 void clear_popframe_forcing_deopt_reexecution() { _popframe_condition &= ~popframe_force_deopt_reexecution_bit; } 2007 #ifdef CC_INTERP 2008 bool pop_frame_pending(void) { return ((_popframe_condition & popframe_pending_bit) != 0); } 2009 void clr_pop_frame_pending(void) { _popframe_condition = popframe_inactive; } 2010 bool pop_frame_in_process(void) { return ((_popframe_condition & popframe_processing_bit) != 0); } 2011 void set_pop_frame_in_process(void) { _popframe_condition |= popframe_processing_bit; } 2012 void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; } 2013 #endif 2014 2015 int frames_to_pop_failed_realloc() const { return _frames_to_pop_failed_realloc; } 2016 void set_frames_to_pop_failed_realloc(int nb) { _frames_to_pop_failed_realloc = nb; } 2017 void dec_frames_to_pop_failed_realloc() { _frames_to_pop_failed_realloc--; } 2018 2019 private: 2020 // Saved incoming arguments to popped frame. 2021 // Used only when popped interpreted frame returns to deoptimized frame. 2022 void* _popframe_preserved_args; 2023 int _popframe_preserved_args_size; 2024 2025 public: 2026 void popframe_preserve_args(ByteSize size_in_bytes, void* start); 2027 void* popframe_preserved_args(); 2028 ByteSize popframe_preserved_args_size(); 2029 WordSize popframe_preserved_args_size_in_words(); 2030 void popframe_free_preserved_args(); 2031 2032 2033 private: 2034 JvmtiThreadState *_jvmti_thread_state; 2035 2036 // Used by the interpreter in fullspeed mode for frame pop, method 2037 // entry, method exit and single stepping support. This field is 2038 // only set to non-zero by the VM_EnterInterpOnlyMode VM operation. 2039 // It can be set to zero asynchronously (i.e., without a VM operation 2040 // or a lock) so we have to be very careful. 2041 int _interp_only_mode; 2042 2043 public: 2044 // used by the interpreter for fullspeed debugging support (see above) 2045 static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); } 2046 bool is_interp_only_mode() { return (_interp_only_mode != 0); } 2047 int get_interp_only_mode() { return _interp_only_mode; } 2048 void increment_interp_only_mode() { ++_interp_only_mode; } 2049 void decrement_interp_only_mode() { --_interp_only_mode; } 2050 2051 // support for cached flag that indicates whether exceptions need to be posted for this thread 2052 // if this is false, we can avoid deoptimizing when events are thrown 2053 // this gets set to reflect whether jvmtiExport::post_exception_throw would actually do anything 2054 private: 2055 int _should_post_on_exceptions_flag; 2056 2057 public: 2058 int should_post_on_exceptions_flag() { return _should_post_on_exceptions_flag; } 2059 void set_should_post_on_exceptions_flag(int val) { _should_post_on_exceptions_flag = val; } 2060 2061 private: 2062 ThreadStatistics *_thread_stat; 2063 2064 public: 2065 ThreadStatistics* get_thread_stat() const { return _thread_stat; } 2066 2067 // Return a blocker object for which this thread is blocked parking. 2068 oop current_park_blocker(); 2069 2070 private: 2071 static size_t _stack_size_at_create; 2072 2073 public: 2074 static inline size_t stack_size_at_create(void) { 2075 return _stack_size_at_create; 2076 } 2077 static inline void set_stack_size_at_create(size_t value) { 2078 _stack_size_at_create = value; 2079 } 2080 2081 // Machine dependent stuff 2082 #include OS_CPU_HEADER(thread) 2083 2084 // JSR166 per-thread parker 2085 private: 2086 Parker* _parker; 2087 public: 2088 Parker* parker() { return _parker; } 2089 2090 // Biased locking support 2091 private: 2092 GrowableArray<MonitorInfo*>* _cached_monitor_info; 2093 public: 2094 GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; } 2095 void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; } 2096 2097 // clearing/querying jni attach status 2098 bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; } 2099 bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; } 2100 inline void set_done_attaching_via_jni(); 2101 2102 // Stack dump assistance: 2103 // Track the class we want to initialize but for which we have to wait 2104 // on its init_lock() because it is already being initialized. 2105 void set_class_to_be_initialized(InstanceKlass* k); 2106 InstanceKlass* class_to_be_initialized() const; 2107 2108 private: 2109 InstanceKlass* _class_to_be_initialized; 2110 2111 }; 2112 2113 // Inline implementation of JavaThread::current 2114 inline JavaThread* JavaThread::current() { 2115 Thread* thread = Thread::current(); 2116 assert(thread->is_Java_thread(), "just checking"); 2117 return (JavaThread*)thread; 2118 } 2119 2120 inline CompilerThread* JavaThread::as_CompilerThread() { 2121 assert(is_Compiler_thread(), "just checking"); 2122 return (CompilerThread*)this; 2123 } 2124 2125 // Dedicated thread to sweep the code cache 2126 class CodeCacheSweeperThread : public JavaThread { 2127 CompiledMethod* _scanned_compiled_method; // nmethod being scanned by the sweeper 2128 public: 2129 CodeCacheSweeperThread(); 2130 // Track the nmethod currently being scanned by the sweeper 2131 void set_scanned_compiled_method(CompiledMethod* cm) { 2132 assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value"); 2133 _scanned_compiled_method = cm; 2134 } 2135 2136 // Hide sweeper thread from external view. 2137 bool is_hidden_from_external_view() const { return true; } 2138 2139 bool is_Code_cache_sweeper_thread() const { return true; } 2140 2141 // Prevent GC from unloading _scanned_compiled_method 2142 void oops_do(OopClosure* f, CodeBlobClosure* cf); 2143 void nmethods_do(CodeBlobClosure* cf); 2144 }; 2145 2146 #if defined(ASSERT) && COMPILER2_OR_JVMCI 2147 // See Deoptimization::deoptimize_objects_alot_loop() 2148 class DeoptimizeObjectsALotThread : public JavaThread { 2149 public: 2150 DeoptimizeObjectsALotThread(); 2151 }; 2152 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 2153 2154 // A thread used for Compilation. 2155 class CompilerThread : public JavaThread { 2156 friend class VMStructs; 2157 private: 2158 CompilerCounters* _counters; 2159 2160 ciEnv* _env; 2161 CompileLog* _log; 2162 CompileTask* volatile _task; // print_threads_compiling can read this concurrently. 2163 CompileQueue* _queue; 2164 BufferBlob* _buffer_blob; 2165 2166 AbstractCompiler* _compiler; 2167 TimeStamp _idle_time; 2168 2169 public: 2170 2171 static CompilerThread* current(); 2172 2173 CompilerThread(CompileQueue* queue, CompilerCounters* counters); 2174 ~CompilerThread(); 2175 2176 bool is_Compiler_thread() const { return true; } 2177 2178 virtual bool can_call_java() const; 2179 2180 // Hide native compiler threads from external view. 2181 bool is_hidden_from_external_view() const { return !can_call_java(); } 2182 2183 void set_compiler(AbstractCompiler* c) { _compiler = c; } 2184 AbstractCompiler* compiler() const { return _compiler; } 2185 2186 CompileQueue* queue() const { return _queue; } 2187 CompilerCounters* counters() const { return _counters; } 2188 2189 // Get/set the thread's compilation environment. 2190 ciEnv* env() { return _env; } 2191 void set_env(ciEnv* env) { _env = env; } 2192 2193 BufferBlob* get_buffer_blob() const { return _buffer_blob; } 2194 void set_buffer_blob(BufferBlob* b) { _buffer_blob = b; } 2195 2196 // Get/set the thread's logging information 2197 CompileLog* log() { return _log; } 2198 void init_log(CompileLog* log) { 2199 // Set once, for good. 2200 assert(_log == NULL, "set only once"); 2201 _log = log; 2202 } 2203 2204 void start_idle_timer() { _idle_time.update(); } 2205 jlong idle_time_millis() { 2206 return TimeHelper::counter_to_millis(_idle_time.ticks_since_update()); 2207 } 2208 2209 #ifndef PRODUCT 2210 private: 2211 IdealGraphPrinter *_ideal_graph_printer; 2212 public: 2213 IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; } 2214 void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; } 2215 #endif 2216 2217 // Get/set the thread's current task 2218 CompileTask* task() { return _task; } 2219 void set_task(CompileTask* task) { _task = task; } 2220 }; 2221 2222 inline CompilerThread* CompilerThread::current() { 2223 return JavaThread::current()->as_CompilerThread(); 2224 } 2225 2226 // The active thread queue. It also keeps track of the current used 2227 // thread priorities. 2228 class Threads: AllStatic { 2229 friend class VMStructs; 2230 private: 2231 static int _number_of_threads; 2232 static int _number_of_non_daemon_threads; 2233 static int _return_code; 2234 static uintx _thread_claim_token; 2235 #ifdef ASSERT 2236 static bool _vm_complete; 2237 #endif 2238 2239 static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS); 2240 static void initialize_jsr292_core_classes(TRAPS); 2241 2242 public: 2243 // Thread management 2244 // force_daemon is a concession to JNI, where we may need to add a 2245 // thread to the thread list before allocating its thread object 2246 static void add(JavaThread* p, bool force_daemon = false); 2247 static void remove(JavaThread* p, bool is_daemon); 2248 static void non_java_threads_do(ThreadClosure* tc); 2249 static void java_threads_do(ThreadClosure* tc); 2250 static void java_threads_and_vm_thread_do(ThreadClosure* tc); 2251 static void threads_do(ThreadClosure* tc); 2252 static void possibly_parallel_threads_do(bool is_par, ThreadClosure* tc); 2253 2254 // Initializes the vm and creates the vm thread 2255 static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain); 2256 static void convert_vm_init_libraries_to_agents(); 2257 static void create_vm_init_libraries(); 2258 static void create_vm_init_agents(); 2259 static void shutdown_vm_agents(); 2260 static bool destroy_vm(); 2261 // Supported VM versions via JNI 2262 // Includes JNI_VERSION_1_1 2263 static jboolean is_supported_jni_version_including_1_1(jint version); 2264 // Does not include JNI_VERSION_1_1 2265 static jboolean is_supported_jni_version(jint version); 2266 2267 // The "thread claim token" provides a way for threads to be claimed 2268 // by parallel worker tasks. 2269 // 2270 // Each thread contains a "token" field. A task will claim the 2271 // thread only if its token is different from the global token, 2272 // which is updated by calling change_thread_claim_token(). When 2273 // a thread is claimed, it's token is set to the global token value 2274 // so other threads in the same iteration pass won't claim it. 2275 // 2276 // For this to work change_thread_claim_token() needs to be called 2277 // exactly once in sequential code before starting parallel tasks 2278 // that should claim threads. 2279 // 2280 // New threads get their token set to 0 and change_thread_claim_token() 2281 // never sets the global token to 0. 2282 static uintx thread_claim_token() { return _thread_claim_token; } 2283 static void change_thread_claim_token(); 2284 static void assert_all_threads_claimed() NOT_DEBUG_RETURN; 2285 2286 // Apply "f->do_oop" to all root oops in all threads. 2287 // This version may only be called by sequential code. 2288 static void oops_do(OopClosure* f, CodeBlobClosure* cf); 2289 // This version may be called by sequential or parallel code. 2290 static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf); 2291 2292 // Sweeper 2293 static void nmethods_do(CodeBlobClosure* cf); 2294 2295 // RedefineClasses support 2296 static void metadata_do(MetadataClosure* f); 2297 static void metadata_handles_do(void f(Metadata*)); 2298 2299 #ifdef ASSERT 2300 static bool is_vm_complete() { return _vm_complete; } 2301 #endif // ASSERT 2302 2303 // Verification 2304 static void verify(); 2305 static void print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks, bool print_extended_info); 2306 static void print(bool print_stacks, bool internal_format) { 2307 // this function is only used by debug.cpp 2308 print_on(tty, print_stacks, internal_format, false /* no concurrent lock printed */, false /* simple format */); 2309 } 2310 static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen); 2311 static void print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf, 2312 int buflen, bool* found_current); 2313 static void print_threads_compiling(outputStream* st, char* buf, int buflen, bool short_form = false); 2314 2315 // Get Java threads that are waiting to enter a monitor. 2316 static GrowableArray<JavaThread*>* get_pending_threads(ThreadsList * t_list, 2317 int count, address monitor); 2318 2319 // Get owning Java thread from the monitor's owner field. 2320 static JavaThread *owning_thread_from_monitor_owner(ThreadsList * t_list, 2321 address owner); 2322 2323 // Number of threads on the active threads list 2324 static int number_of_threads() { return _number_of_threads; } 2325 // Number of non-daemon threads on the active threads list 2326 static int number_of_non_daemon_threads() { return _number_of_non_daemon_threads; } 2327 2328 // Deoptimizes all frames tied to marked nmethods 2329 static void deoptimized_wrt_marked_nmethods(); 2330 2331 struct Test; // For private gtest access. 2332 }; 2333 2334 2335 // Thread iterator 2336 class ThreadClosure: public StackObj { 2337 public: 2338 virtual void do_thread(Thread* thread) = 0; 2339 }; 2340 2341 class SignalHandlerMark: public StackObj { 2342 private: 2343 Thread* _thread; 2344 public: 2345 SignalHandlerMark(Thread* t) { 2346 _thread = t; 2347 if (_thread) _thread->enter_signal_handler(); 2348 } 2349 ~SignalHandlerMark() { 2350 if (_thread) _thread->leave_signal_handler(); 2351 _thread = NULL; 2352 } 2353 }; 2354 2355 2356 #endif // SHARE_RUNTIME_THREAD_HPP