1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_RUNTIME_THREAD_HPP 26 #define SHARE_RUNTIME_THREAD_HPP 27 28 #include "jni.h" 29 #include "code/compiledMethod.hpp" 30 #include "gc/shared/gcThreadLocalData.hpp" 31 #include "gc/shared/threadLocalAllocBuffer.hpp" 32 #include "memory/allocation.hpp" 33 #include "oops/oop.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "runtime/frame.hpp" 36 #include "runtime/globals.hpp" 37 #include "runtime/handshake.hpp" 38 #include "runtime/javaFrameAnchor.hpp" 39 #include "runtime/jniHandles.hpp" 40 #include "runtime/mutexLocker.hpp" 41 #include "runtime/os.hpp" 42 #include "runtime/osThread.hpp" 43 #include "runtime/park.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/threadHeapSampler.hpp" 46 #include "runtime/threadLocalStorage.hpp" 47 #include "runtime/threadStatisticalInfo.hpp" 48 #include "runtime/unhandledOops.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/exceptions.hpp" 51 #include "utilities/globalDefinitions.hpp" 52 #include "utilities/macros.hpp" 53 #ifdef ZERO 54 # include "stack_zero.hpp" 55 #endif 56 #if INCLUDE_JFR 57 #include "jfr/support/jfrThreadExtension.hpp" 58 #endif 59 60 61 class SafeThreadsListPtr; 62 class ThreadSafepointState; 63 class ThreadsList; 64 class ThreadsSMRSupport; 65 66 class JvmtiRawMonitor; 67 class JvmtiThreadState; 68 class ThreadStatistics; 69 class ConcurrentLocksDump; 70 class ParkEvent; 71 class Parker; 72 class MonitorInfo; 73 74 class ciEnv; 75 class CompileThread; 76 class CompileLog; 77 class CompileTask; 78 class CompileQueue; 79 class CompilerCounters; 80 81 class vframeArray; 82 class vframe; 83 class javaVFrame; 84 85 class DeoptResourceMark; 86 class jvmtiDeferredLocalVariableSet; 87 88 class ThreadClosure; 89 class ICRefillVerifier; 90 class IdealGraphPrinter; 91 92 class JVMCIEnv; 93 class JVMCIPrimitiveArray; 94 95 class Metadata; 96 class ResourceArea; 97 98 DEBUG_ONLY(class ResourceMark;) 99 100 class WorkerThread; 101 102 // Class hierarchy 103 // - Thread 104 // - JavaThread 105 // - various subclasses eg CompilerThread, ServiceThread 106 // - NonJavaThread 107 // - NamedThread 108 // - VMThread 109 // - ConcurrentGCThread 110 // - WorkerThread 111 // - GangWorker 112 // - WatcherThread 113 // - JfrThreadSampler 114 // 115 // All Thread subclasses must be either JavaThread or NonJavaThread. 116 // This means !t->is_Java_thread() iff t is a NonJavaThread, or t is 117 // a partially constructed/destroyed Thread. 118 119 // Thread execution sequence and actions: 120 // All threads: 121 // - thread_native_entry // per-OS native entry point 122 // - stack initialization 123 // - other OS-level initialization (signal masks etc) 124 // - handshake with creating thread (if not started suspended) 125 // - this->call_run() // common shared entry point 126 // - shared common initialization 127 // - this->pre_run() // virtual per-thread-type initialization 128 // - this->run() // virtual per-thread-type "main" logic 129 // - shared common tear-down 130 // - this->post_run() // virtual per-thread-type tear-down 131 // - // 'this' no longer referenceable 132 // - OS-level tear-down (minimal) 133 // - final logging 134 // 135 // For JavaThread: 136 // - this->run() // virtual but not normally overridden 137 // - this->thread_main_inner() // extra call level to ensure correct stack calculations 138 // - this->entry_point() // set differently for each kind of JavaThread 139 140 class Thread: public ThreadShadow { 141 friend class VMStructs; 142 friend class JVMCIVMStructs; 143 private: 144 145 #ifndef USE_LIBRARY_BASED_TLS_ONLY 146 // Current thread is maintained as a thread-local variable 147 static THREAD_LOCAL Thread* _thr_current; 148 #endif 149 150 // Thread local data area available to the GC. The internal 151 // structure and contents of this data area is GC-specific. 152 // Only GC and GC barrier code should access this data area. 153 GCThreadLocalData _gc_data; 154 155 public: 156 static ByteSize gc_data_offset() { 157 return byte_offset_of(Thread, _gc_data); 158 } 159 160 template <typename T> T* gc_data() { 161 STATIC_ASSERT(sizeof(T) <= sizeof(_gc_data)); 162 return reinterpret_cast<T*>(&_gc_data); 163 } 164 165 // Exception handling 166 // (Note: _pending_exception and friends are in ThreadShadow) 167 //oop _pending_exception; // pending exception for current thread 168 // const char* _exception_file; // file information for exception (debugging only) 169 // int _exception_line; // line information for exception (debugging only) 170 protected: 171 172 DEBUG_ONLY(static Thread* _starting_thread;) 173 174 // Support for forcing alignment of thread objects for biased locking 175 void* _real_malloc_address; 176 177 // JavaThread lifecycle support: 178 friend class SafeThreadsListPtr; // for _threads_list_ptr, cmpxchg_threads_hazard_ptr(), {dec_,inc_,}nested_threads_hazard_ptr_cnt(), {g,s}et_threads_hazard_ptr(), inc_nested_handle_cnt(), tag_hazard_ptr() access 179 friend class ScanHazardPtrGatherProtectedThreadsClosure; // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access 180 friend class ScanHazardPtrGatherThreadsListClosure; // for get_threads_hazard_ptr(), untag_hazard_ptr() access 181 friend class ScanHazardPtrPrintMatchingThreadsClosure; // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access 182 friend class ThreadsSMRSupport; // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access 183 184 ThreadsList* volatile _threads_hazard_ptr; 185 SafeThreadsListPtr* _threads_list_ptr; 186 ThreadsList* cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value); 187 ThreadsList* get_threads_hazard_ptr(); 188 void set_threads_hazard_ptr(ThreadsList* new_list); 189 static bool is_hazard_ptr_tagged(ThreadsList* list) { 190 return (intptr_t(list) & intptr_t(1)) == intptr_t(1); 191 } 192 static ThreadsList* tag_hazard_ptr(ThreadsList* list) { 193 return (ThreadsList*)(intptr_t(list) | intptr_t(1)); 194 } 195 static ThreadsList* untag_hazard_ptr(ThreadsList* list) { 196 return (ThreadsList*)(intptr_t(list) & ~intptr_t(1)); 197 } 198 // This field is enabled via -XX:+EnableThreadSMRStatistics: 199 uint _nested_threads_hazard_ptr_cnt; 200 void dec_nested_threads_hazard_ptr_cnt() { 201 assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()"); 202 _nested_threads_hazard_ptr_cnt--; 203 } 204 void inc_nested_threads_hazard_ptr_cnt() { 205 _nested_threads_hazard_ptr_cnt++; 206 } 207 uint nested_threads_hazard_ptr_cnt() { 208 return _nested_threads_hazard_ptr_cnt; 209 } 210 211 public: 212 void* operator new(size_t size) throw() { return allocate(size, true); } 213 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { 214 return allocate(size, false); } 215 void operator delete(void* p); 216 217 protected: 218 static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread); 219 private: 220 221 // *************************************************************** 222 // Suspend and resume support 223 // *************************************************************** 224 // 225 // VM suspend/resume no longer exists - it was once used for various 226 // things including safepoints but was deprecated and finally removed 227 // in Java 7. Because VM suspension was considered "internal" Java-level 228 // suspension was considered "external", and this legacy naming scheme 229 // remains. 230 // 231 // External suspend/resume requests come from JVM_SuspendThread, 232 // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI 233 // ResumeThread. External 234 // suspend requests cause _external_suspend to be set and external 235 // resume requests cause _external_suspend to be cleared. 236 // External suspend requests do not nest on top of other external 237 // suspend requests. The higher level APIs reject suspend requests 238 // for already suspended threads. 239 // 240 // The external_suspend 241 // flag is checked by has_special_runtime_exit_condition() and java thread 242 // will self-suspend when handle_special_runtime_exit_condition() is 243 // called. Most uses of the _thread_blocked state in JavaThreads are 244 // considered the same as being externally suspended; if the blocking 245 // condition lifts, the JavaThread will self-suspend. Other places 246 // where VM checks for external_suspend include: 247 // + mutex granting (do not enter monitors when thread is suspended) 248 // + state transitions from _thread_in_native 249 // 250 // In general, java_suspend() does not wait for an external suspend 251 // request to complete. When it returns, the only guarantee is that 252 // the _external_suspend field is true. 253 // 254 // wait_for_ext_suspend_completion() is used to wait for an external 255 // suspend request to complete. External suspend requests are usually 256 // followed by some other interface call that requires the thread to 257 // be quiescent, e.g., GetCallTrace(). By moving the "wait time" into 258 // the interface that requires quiescence, we give the JavaThread a 259 // chance to self-suspend before we need it to be quiescent. This 260 // improves overall suspend/query performance. 261 // 262 // _suspend_flags controls the behavior of java_ suspend/resume. 263 // It must be set under the protection of SR_lock. Read from the flag is 264 // OK without SR_lock as long as the value is only used as a hint. 265 // (e.g., check _external_suspend first without lock and then recheck 266 // inside SR_lock and finish the suspension) 267 // 268 // _suspend_flags is also overloaded for other "special conditions" so 269 // that a single check indicates whether any special action is needed 270 // eg. for async exceptions. 271 // ------------------------------------------------------------------- 272 // Notes: 273 // 1. The suspend/resume logic no longer uses ThreadState in OSThread 274 // but we still update its value to keep other part of the system (mainly 275 // JVMTI) happy. ThreadState is legacy code (see notes in 276 // osThread.hpp). 277 // 278 // 2. It would be more natural if set_external_suspend() is private and 279 // part of java_suspend(), but that probably would affect the suspend/query 280 // performance. Need more investigation on this. 281 282 // suspend/resume lock: used for self-suspend 283 Monitor* _SR_lock; 284 285 protected: 286 enum SuspendFlags { 287 // NOTE: avoid using the sign-bit as cc generates different test code 288 // when the sign-bit is used, and sometimes incorrectly - see CR 6398077 289 290 _external_suspend = 0x20000000U, // thread is asked to self suspend 291 _ext_suspended = 0x40000000U, // thread has self-suspended 292 293 _has_async_exception = 0x00000001U, // there is a pending async exception 294 _critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock 295 296 _trace_flag = 0x00000004U // call tracing backend 297 }; 298 299 // various suspension related flags - atomically updated 300 // overloaded for async exception checking in check_special_condition_for_native_trans. 301 volatile uint32_t _suspend_flags; 302 303 private: 304 int _num_nested_signal; 305 306 DEBUG_ONLY(bool _suspendible_thread;) 307 308 public: 309 void enter_signal_handler() { _num_nested_signal++; } 310 void leave_signal_handler() { _num_nested_signal--; } 311 bool is_inside_signal_handler() const { return _num_nested_signal > 0; } 312 313 // Determines if a heap allocation failure will be retried 314 // (e.g., by deoptimizing and re-executing in the interpreter). 315 // In this case, the failed allocation must raise 316 // Universe::out_of_memory_error_retry() and omit side effects 317 // such as JVMTI events and handling -XX:+HeapDumpOnOutOfMemoryError 318 // and -XX:OnOutOfMemoryError. 319 virtual bool in_retryable_allocation() const { return false; } 320 321 #ifdef ASSERT 322 void set_suspendible_thread() { 323 _suspendible_thread = true; 324 } 325 326 void clear_suspendible_thread() { 327 _suspendible_thread = false; 328 } 329 330 bool is_suspendible_thread() { return _suspendible_thread; } 331 #endif 332 333 private: 334 // Active_handles points to a block of handles 335 JNIHandleBlock* _active_handles; 336 337 // One-element thread local free list 338 JNIHandleBlock* _free_handle_block; 339 340 // Point to the last handle mark 341 HandleMark* _last_handle_mark; 342 343 // Claim value for parallel iteration over threads. 344 uintx _threads_do_token; 345 346 // Support for GlobalCounter 347 private: 348 volatile uintx _rcu_counter; 349 public: 350 volatile uintx* get_rcu_counter() { 351 return &_rcu_counter; 352 } 353 354 public: 355 void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; } 356 HandleMark* last_handle_mark() const { return _last_handle_mark; } 357 private: 358 359 #ifdef ASSERT 360 ICRefillVerifier* _missed_ic_stub_refill_verifier; 361 362 public: 363 ICRefillVerifier* missed_ic_stub_refill_verifier() { 364 return _missed_ic_stub_refill_verifier; 365 } 366 367 void set_missed_ic_stub_refill_verifier(ICRefillVerifier* verifier) { 368 _missed_ic_stub_refill_verifier = verifier; 369 } 370 #endif // ASSERT 371 372 private: 373 374 // Debug support for checking if code allows safepoints or not. 375 // Safepoints in the VM can happen because of allocation, invoking a VM operation, or blocking on 376 // mutex, or blocking on an object synchronizer (Java locking). 377 // If _no_safepoint_count is non-zero, then an assertion failure will happen in any of 378 // the above cases. 379 // 380 // The class NoSafepointVerifier is used to set this counter. 381 // 382 NOT_PRODUCT(int _no_safepoint_count;) // If 0, thread allow a safepoint to happen 383 384 private: 385 // Used by SkipGCALot class. 386 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot? 387 388 friend class GCLocker; 389 friend class NoSafepointVerifier; 390 friend class PauseNoSafepointVerifier; 391 392 volatile void* _polling_page; // Thread local polling page 393 394 ThreadLocalAllocBuffer _tlab; // Thread-local eden 395 jlong _allocated_bytes; // Cumulative number of bytes allocated on 396 // the Java heap 397 ThreadHeapSampler _heap_sampler; // For use when sampling the memory. 398 399 ThreadStatisticalInfo _statistical_info; // Statistics about the thread 400 401 JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;) // Thread-local data for jfr 402 403 int _vm_operation_started_count; // VM_Operation support 404 int _vm_operation_completed_count; // VM_Operation support 405 406 ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread 407 // is waiting to lock 408 bool _current_pending_monitor_is_from_java; // locking is from Java code 409 JvmtiRawMonitor* _current_pending_raw_monitor; // JvmtiRawMonitor this thread 410 // is waiting to lock 411 412 413 // ObjectMonitor on which this thread called Object.wait() 414 ObjectMonitor* _current_waiting_monitor; 415 416 // Per-thread ObjectMonitor lists: 417 public: 418 ObjectMonitor* om_free_list; // SLL of free ObjectMonitors 419 int om_free_count; // # on om_free_list 420 int om_free_provision; // # to try to allocate next 421 ObjectMonitor* om_in_use_list; // SLL of in-use ObjectMonitors 422 int om_in_use_count; // # on om_in_use_list 423 424 #ifdef ASSERT 425 private: 426 volatile uint64_t _visited_for_critical_count; 427 428 public: 429 void set_visited_for_critical_count(uint64_t safepoint_id) { 430 assert(_visited_for_critical_count == 0, "Must be reset before set"); 431 assert((safepoint_id & 0x1) == 1, "Must be odd"); 432 _visited_for_critical_count = safepoint_id; 433 } 434 void reset_visited_for_critical_count(uint64_t safepoint_id) { 435 assert(_visited_for_critical_count == safepoint_id, "Was not visited"); 436 _visited_for_critical_count = 0; 437 } 438 bool was_visited_for_critical_count(uint64_t safepoint_id) const { 439 return _visited_for_critical_count == safepoint_id; 440 } 441 #endif 442 443 public: 444 enum { 445 is_definitely_current_thread = true 446 }; 447 448 // Constructor 449 Thread(); 450 virtual ~Thread() = 0; // Thread is abstract. 451 452 // Manage Thread::current() 453 void initialize_thread_current(); 454 static void clear_thread_current(); // TLS cleanup needed before threads terminate 455 456 protected: 457 // To be implemented by children. 458 virtual void run() = 0; 459 virtual void pre_run() = 0; 460 virtual void post_run() = 0; // Note: Thread must not be deleted prior to calling this! 461 462 #ifdef ASSERT 463 enum RunState { 464 PRE_CALL_RUN, 465 CALL_RUN, 466 PRE_RUN, 467 RUN, 468 POST_RUN 469 // POST_CALL_RUN - can't define this one as 'this' may be deleted when we want to set it 470 }; 471 RunState _run_state; // for lifecycle checks 472 #endif 473 474 475 public: 476 // invokes <ChildThreadClass>::run(), with common preparations and cleanups. 477 void call_run(); 478 479 // Testers 480 virtual bool is_VM_thread() const { return false; } 481 virtual bool is_Java_thread() const { return false; } 482 virtual bool is_Compiler_thread() const { return false; } 483 virtual bool is_Code_cache_sweeper_thread() const { return false; } 484 virtual bool is_service_thread() const { return false; } 485 virtual bool is_hidden_from_external_view() const { return false; } 486 virtual bool is_jvmti_agent_thread() const { return false; } 487 // True iff the thread can perform GC operations at a safepoint. 488 // Generally will be true only of VM thread and parallel GC WorkGang 489 // threads. 490 virtual bool is_GC_task_thread() const { return false; } 491 virtual bool is_Watcher_thread() const { return false; } 492 virtual bool is_ConcurrentGC_thread() const { return false; } 493 virtual bool is_Named_thread() const { return false; } 494 virtual bool is_Worker_thread() const { return false; } 495 496 // Can this thread make Java upcalls 497 virtual bool can_call_java() const { return false; } 498 499 // Is this a JavaThread that is on the VM's current ThreadsList? 500 // If so it must participate in the safepoint protocol. 501 virtual bool is_active_Java_thread() const { return false; } 502 503 // Casts 504 virtual WorkerThread* as_Worker_thread() const { return NULL; } 505 506 virtual char* name() const { return (char*)"Unknown thread"; } 507 508 // Returns the current thread (ASSERTS if NULL) 509 static inline Thread* current(); 510 // Returns the current thread, or NULL if not attached 511 static inline Thread* current_or_null(); 512 // Returns the current thread, or NULL if not attached, and is 513 // safe for use from signal-handlers 514 static inline Thread* current_or_null_safe(); 515 516 // Common thread operations 517 #ifdef ASSERT 518 static void check_for_dangling_thread_pointer(Thread *thread); 519 #endif 520 static void set_priority(Thread* thread, ThreadPriority priority); 521 static ThreadPriority get_priority(const Thread* const thread); 522 static void start(Thread* thread); 523 524 void set_native_thread_name(const char *name) { 525 assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread"); 526 os::set_native_thread_name(name); 527 } 528 529 Monitor* SR_lock() const { return _SR_lock; } 530 531 bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; } 532 533 inline void set_suspend_flag(SuspendFlags f); 534 inline void clear_suspend_flag(SuspendFlags f); 535 536 inline void set_has_async_exception(); 537 inline void clear_has_async_exception(); 538 539 bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; } 540 541 inline void set_critical_native_unlock(); 542 inline void clear_critical_native_unlock(); 543 544 inline void set_trace_flag(); 545 inline void clear_trace_flag(); 546 547 // Support for Unhandled Oop detection 548 // Add the field for both, fastdebug and debug, builds to keep 549 // Thread's fields layout the same. 550 // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build. 551 #ifdef CHECK_UNHANDLED_OOPS 552 private: 553 UnhandledOops* _unhandled_oops; 554 #elif defined(ASSERT) 555 private: 556 void* _unhandled_oops; 557 #endif 558 #ifdef CHECK_UNHANDLED_OOPS 559 public: 560 UnhandledOops* unhandled_oops() { return _unhandled_oops; } 561 // Mark oop safe for gc. It may be stack allocated but won't move. 562 void allow_unhandled_oop(oop *op) { 563 if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op); 564 } 565 // Clear oops at safepoint so crashes point to unhandled oop violator 566 void clear_unhandled_oops() { 567 if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops(); 568 } 569 #endif // CHECK_UNHANDLED_OOPS 570 571 public: 572 #ifndef PRODUCT 573 bool skip_gcalot() { return _skip_gcalot; } 574 void set_skip_gcalot(bool v) { _skip_gcalot = v; } 575 #endif 576 577 // Installs a pending exception to be inserted later 578 static void send_async_exception(oop thread_oop, oop java_throwable); 579 580 // Resource area 581 ResourceArea* resource_area() const { return _resource_area; } 582 void set_resource_area(ResourceArea* area) { _resource_area = area; } 583 584 OSThread* osthread() const { return _osthread; } 585 void set_osthread(OSThread* thread) { _osthread = thread; } 586 587 // JNI handle support 588 JNIHandleBlock* active_handles() const { return _active_handles; } 589 void set_active_handles(JNIHandleBlock* block) { _active_handles = block; } 590 JNIHandleBlock* free_handle_block() const { return _free_handle_block; } 591 void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; } 592 593 // Internal handle support 594 HandleArea* handle_area() const { return _handle_area; } 595 void set_handle_area(HandleArea* area) { _handle_area = area; } 596 597 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; } 598 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; } 599 600 // Thread-Local Allocation Buffer (TLAB) support 601 ThreadLocalAllocBuffer& tlab() { return _tlab; } 602 void initialize_tlab() { 603 if (UseTLAB) { 604 tlab().initialize(); 605 } 606 } 607 608 jlong allocated_bytes() { return _allocated_bytes; } 609 void set_allocated_bytes(jlong value) { _allocated_bytes = value; } 610 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } 611 inline jlong cooked_allocated_bytes(); 612 613 ThreadHeapSampler& heap_sampler() { return _heap_sampler; } 614 615 ThreadStatisticalInfo& statistical_info() { return _statistical_info; } 616 617 JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;) 618 619 bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; } 620 621 // VM operation support 622 int vm_operation_ticket() { return ++_vm_operation_started_count; } 623 int vm_operation_completed_count() { return _vm_operation_completed_count; } 624 void increment_vm_operation_completed_count() { _vm_operation_completed_count++; } 625 626 // For tracking the heavyweight monitor the thread is pending on. 627 ObjectMonitor* current_pending_monitor() { 628 return _current_pending_monitor; 629 } 630 void set_current_pending_monitor(ObjectMonitor* monitor) { 631 _current_pending_monitor = monitor; 632 } 633 void set_current_pending_monitor_is_from_java(bool from_java) { 634 _current_pending_monitor_is_from_java = from_java; 635 } 636 bool current_pending_monitor_is_from_java() { 637 return _current_pending_monitor_is_from_java; 638 } 639 640 // For tracking the ObjectMonitor on which this thread called Object.wait() 641 ObjectMonitor* current_waiting_monitor() { 642 return _current_waiting_monitor; 643 } 644 void set_current_waiting_monitor(ObjectMonitor* monitor) { 645 _current_waiting_monitor = monitor; 646 } 647 648 // For tracking the Jvmti raw monitor the thread is pending on. 649 JvmtiRawMonitor* current_pending_raw_monitor() { 650 return _current_pending_raw_monitor; 651 } 652 void set_current_pending_raw_monitor(JvmtiRawMonitor* monitor) { 653 _current_pending_raw_monitor = monitor; 654 } 655 656 // GC support 657 // Apply "f->do_oop" to all root oops in "this". 658 // Used by JavaThread::oops_do. 659 // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames 660 virtual void oops_do(OopClosure* f, CodeBlobClosure* cf); 661 662 // Handles the parallel case for claim_threads_do. 663 private: 664 bool claim_par_threads_do(uintx claim_token); 665 public: 666 // Requires that "claim_token" is that of the current iteration. 667 // If "is_par" is false, sets the token of "this" to 668 // "claim_token", and returns "true". If "is_par" is true, 669 // uses an atomic instruction to set the current thread's token to 670 // "claim_token", if it is not already. Returns "true" iff the 671 // calling thread does the update, this indicates that the calling thread 672 // has claimed the thread in the current iteration. 673 bool claim_threads_do(bool is_par, uintx claim_token) { 674 if (!is_par) { 675 _threads_do_token = claim_token; 676 return true; 677 } else { 678 return claim_par_threads_do(claim_token); 679 } 680 } 681 682 uintx threads_do_token() const { return _threads_do_token; } 683 684 // jvmtiRedefineClasses support 685 void metadata_handles_do(void f(Metadata*)); 686 687 // Used by fast lock support 688 virtual bool is_lock_owned(address adr) const; 689 690 // Check if address is in the live stack of this thread (not just for locks). 691 // Warning: can only be called by the current thread on itself. 692 bool is_in_stack(address adr) const; 693 694 // Check if address in the stack mapped to this thread. Used mainly in 695 // error reporting (so has to include guard zone) and frame printing. 696 bool on_local_stack(address adr) const { 697 return (_stack_base > adr && adr >= stack_end()); 698 } 699 700 // Sets this thread as starting thread. Returns failure if thread 701 // creation fails due to lack of memory, too many threads etc. 702 bool set_as_starting_thread(); 703 704 protected: 705 // OS data associated with the thread 706 OSThread* _osthread; // Platform-specific thread information 707 708 // Thread local resource area for temporary allocation within the VM 709 ResourceArea* _resource_area; 710 711 DEBUG_ONLY(ResourceMark* _current_resource_mark;) 712 713 // Thread local handle area for allocation of handles within the VM 714 HandleArea* _handle_area; 715 GrowableArray<Metadata*>* _metadata_handles; 716 717 // Support for stack overflow handling, get_thread, etc. 718 address _stack_base; 719 size_t _stack_size; 720 int _lgrp_id; 721 722 volatile void** polling_page_addr() { return &_polling_page; } 723 724 public: 725 // Stack overflow support 726 address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } 727 void set_stack_base(address base) { _stack_base = base; } 728 size_t stack_size() const { return _stack_size; } 729 void set_stack_size(size_t size) { _stack_size = size; } 730 address stack_end() const { return stack_base() - stack_size(); } 731 void record_stack_base_and_size(); 732 void register_thread_stack_with_NMT() NOT_NMT_RETURN; 733 734 int lgrp_id() const { return _lgrp_id; } 735 void set_lgrp_id(int value) { _lgrp_id = value; } 736 737 // Printing 738 void print_on(outputStream* st, bool print_extended_info) const; 739 virtual void print_on(outputStream* st) const { print_on(st, false); } 740 void print() const; 741 virtual void print_on_error(outputStream* st, char* buf, int buflen) const; 742 void print_value_on(outputStream* st) const; 743 744 // Debug-only code 745 #ifdef ASSERT 746 private: 747 // Deadlock detection support for Mutex locks. List of locks own by thread. 748 Mutex* _owned_locks; 749 // Mutex::set_owner_implementation is the only place where _owned_locks is modified, 750 // thus the friendship 751 friend class Mutex; 752 friend class Monitor; 753 754 public: 755 void print_owned_locks_on(outputStream* st) const; 756 void print_owned_locks() const { print_owned_locks_on(tty); } 757 Mutex* owned_locks() const { return _owned_locks; } 758 bool owns_locks() const { return owned_locks() != NULL; } 759 760 // Deadlock detection 761 ResourceMark* current_resource_mark() { return _current_resource_mark; } 762 void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; } 763 #endif // ASSERT 764 765 // These functions check conditions on a JavaThread before possibly going to a safepoint, 766 // including NoSafepointVerifier. 767 void check_for_valid_safepoint_state() NOT_DEBUG_RETURN; 768 void check_possible_safepoint() NOT_DEBUG_RETURN; 769 770 private: 771 volatile int _jvmti_env_iteration_count; 772 773 public: 774 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; } 775 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; } 776 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; } 777 778 // Code generation 779 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); } 780 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); } 781 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); } 782 783 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); } 784 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); } 785 786 static ByteSize polling_page_offset() { return byte_offset_of(Thread, _polling_page); } 787 788 static ByteSize tlab_start_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::start_offset(); } 789 static ByteSize tlab_end_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::end_offset(); } 790 static ByteSize tlab_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::top_offset(); } 791 static ByteSize tlab_pf_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::pf_top_offset(); } 792 793 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); } 794 795 JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;) 796 797 public: 798 volatile intptr_t _Stalled; 799 volatile int _TypeTag; 800 ParkEvent * _ParkEvent; // for Object monitors and JVMTI raw monitors 801 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease 802 int NativeSyncRecursion; // diagnostic 803 804 volatile int _OnTrap; // Resume-at IP delta 805 jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG 806 jint _hashStateX; // thread-specific hashCode generator state 807 jint _hashStateY; 808 jint _hashStateZ; 809 810 // Low-level leaf-lock primitives used to implement synchronization 811 // and native monitor-mutex infrastructure. 812 // Not for general synchronization use. 813 static void SpinAcquire(volatile int * Lock, const char * Name); 814 static void SpinRelease(volatile int * Lock); 815 static void muxAcquire(volatile intptr_t * Lock, const char * Name); 816 static void muxRelease(volatile intptr_t * Lock); 817 }; 818 819 // Inline implementation of Thread::current() 820 inline Thread* Thread::current() { 821 Thread* current = current_or_null(); 822 assert(current != NULL, "Thread::current() called on detached thread"); 823 return current; 824 } 825 826 inline Thread* Thread::current_or_null() { 827 #ifndef USE_LIBRARY_BASED_TLS_ONLY 828 return _thr_current; 829 #else 830 if (ThreadLocalStorage::is_initialized()) { 831 return ThreadLocalStorage::thread(); 832 } 833 return NULL; 834 #endif 835 } 836 837 inline Thread* Thread::current_or_null_safe() { 838 if (ThreadLocalStorage::is_initialized()) { 839 return ThreadLocalStorage::thread(); 840 } 841 return NULL; 842 } 843 844 class NonJavaThread: public Thread { 845 friend class VMStructs; 846 847 NonJavaThread* volatile _next; 848 849 class List; 850 static List _the_list; 851 852 void add_to_the_list(); 853 void remove_from_the_list(); 854 855 protected: 856 virtual void pre_run(); 857 virtual void post_run(); 858 859 public: 860 NonJavaThread(); 861 ~NonJavaThread(); 862 863 class Iterator; 864 }; 865 866 // Provides iteration over the list of NonJavaThreads. 867 // List addition occurs in pre_run(), and removal occurs in post_run(), 868 // so that only live fully-initialized threads can be found in the list. 869 // Threads created after an iterator is constructed will not be visited 870 // by the iterator. The scope of an iterator is a critical section; there 871 // must be no safepoint checks in that scope. 872 class NonJavaThread::Iterator : public StackObj { 873 uint _protect_enter; 874 NonJavaThread* _current; 875 876 NONCOPYABLE(Iterator); 877 878 public: 879 Iterator(); 880 ~Iterator(); 881 882 bool end() const { return _current == NULL; } 883 NonJavaThread* current() const { return _current; } 884 void step(); 885 }; 886 887 // Name support for threads. non-JavaThread subclasses with multiple 888 // uniquely named instances should derive from this. 889 class NamedThread: public NonJavaThread { 890 friend class VMStructs; 891 enum { 892 max_name_len = 64 893 }; 894 private: 895 char* _name; 896 // log JavaThread being processed by oops_do 897 JavaThread* _processed_thread; 898 uint _gc_id; // The current GC id when a thread takes part in GC 899 900 public: 901 NamedThread(); 902 ~NamedThread(); 903 // May only be called once per thread. 904 void set_name(const char* format, ...) ATTRIBUTE_PRINTF(2, 3); 905 virtual bool is_Named_thread() const { return true; } 906 virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; } 907 JavaThread *processed_thread() { return _processed_thread; } 908 void set_processed_thread(JavaThread *thread) { _processed_thread = thread; } 909 virtual void print_on(outputStream* st) const; 910 911 void set_gc_id(uint gc_id) { _gc_id = gc_id; } 912 uint gc_id() { return _gc_id; } 913 }; 914 915 // Worker threads are named and have an id of an assigned work. 916 class WorkerThread: public NamedThread { 917 private: 918 uint _id; 919 public: 920 WorkerThread() : _id(0) { } 921 virtual bool is_Worker_thread() const { return true; } 922 923 virtual WorkerThread* as_Worker_thread() const { 924 assert(is_Worker_thread(), "Dubious cast to WorkerThread*?"); 925 return (WorkerThread*) this; 926 } 927 928 void set_id(uint work_id) { _id = work_id; } 929 uint id() const { return _id; } 930 }; 931 932 // A single WatcherThread is used for simulating timer interrupts. 933 class WatcherThread: public NonJavaThread { 934 friend class VMStructs; 935 protected: 936 virtual void run(); 937 938 private: 939 static WatcherThread* _watcher_thread; 940 941 static bool _startable; 942 // volatile due to at least one lock-free read 943 volatile static bool _should_terminate; 944 public: 945 enum SomeConstants { 946 delay_interval = 10 // interrupt delay in milliseconds 947 }; 948 949 // Constructor 950 WatcherThread(); 951 952 // No destruction allowed 953 ~WatcherThread() { 954 guarantee(false, "WatcherThread deletion must fix the race with VM termination"); 955 } 956 957 // Tester 958 bool is_Watcher_thread() const { return true; } 959 960 // Printing 961 char* name() const { return (char*)"VM Periodic Task Thread"; } 962 void print_on(outputStream* st) const; 963 void unpark(); 964 965 // Returns the single instance of WatcherThread 966 static WatcherThread* watcher_thread() { return _watcher_thread; } 967 968 // Create and start the single instance of WatcherThread, or stop it on shutdown 969 static void start(); 970 static void stop(); 971 // Only allow start once the VM is sufficiently initialized 972 // Otherwise the first task to enroll will trigger the start 973 static void make_startable(); 974 private: 975 int sleep() const; 976 }; 977 978 979 class CompilerThread; 980 981 typedef void (*ThreadFunction)(JavaThread*, TRAPS); 982 983 class JavaThread: public Thread { 984 friend class VMStructs; 985 friend class JVMCIVMStructs; 986 friend class WhiteBox; 987 private: 988 bool _on_thread_list; // Is set when this JavaThread is added to the Threads list 989 oop _threadObj; // The Java level thread object 990 991 #ifdef ASSERT 992 private: 993 int _java_call_counter; 994 995 public: 996 int java_call_counter() { return _java_call_counter; } 997 void inc_java_call_counter() { _java_call_counter++; } 998 void dec_java_call_counter() { 999 assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper"); 1000 _java_call_counter--; 1001 } 1002 private: // restore original namespace restriction 1003 #endif // ifdef ASSERT 1004 1005 #ifndef PRODUCT 1006 public: 1007 enum { 1008 jump_ring_buffer_size = 16 1009 }; 1010 private: // restore original namespace restriction 1011 #endif 1012 1013 JavaFrameAnchor _anchor; // Encapsulation of current java frame and it state 1014 1015 ThreadFunction _entry_point; 1016 1017 JNIEnv _jni_environment; 1018 1019 // Deopt support 1020 DeoptResourceMark* _deopt_mark; // Holds special ResourceMark for deoptimization 1021 1022 CompiledMethod* _deopt_nmethod; // CompiledMethod that is currently being deoptimized 1023 vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays 1024 vframeArray* _vframe_array_last; // Holds last vFrameArray we popped 1025 // Because deoptimization is lazy we must save jvmti requests to set locals 1026 // in compiled frames until we deoptimize and we have an interpreter frame. 1027 // This holds the pointer to array (yeah like there might be more than one) of 1028 // description of compiled vframes that have locals that need to be updated. 1029 GrowableArray<jvmtiDeferredLocalVariableSet*>* _deferred_locals_updates; 1030 1031 // Handshake value for fixing 6243940. We need a place for the i2c 1032 // adapter to store the callee Method*. This value is NEVER live 1033 // across a gc point so it does NOT have to be gc'd 1034 // The handshake is open ended since we can't be certain that it will 1035 // be NULLed. This is because we rarely ever see the race and end up 1036 // in handle_wrong_method which is the backend of the handshake. See 1037 // code in i2c adapters and handle_wrong_method. 1038 1039 Method* _callee_target; 1040 1041 // Used to pass back results to the interpreter or generated code running Java code. 1042 oop _vm_result; // oop result is GC-preserved 1043 Metadata* _vm_result_2; // non-oop result 1044 1045 // See ReduceInitialCardMarks: this holds the precise space interval of 1046 // the most recent slow path allocation for which compiled code has 1047 // elided card-marks for performance along the fast-path. 1048 MemRegion _deferred_card_mark; 1049 1050 MonitorChunk* _monitor_chunks; // Contains the off stack monitors 1051 // allocated during deoptimization 1052 // and by JNI_MonitorEnter/Exit 1053 1054 // Async. requests support 1055 enum AsyncRequests { 1056 _no_async_condition = 0, 1057 _async_exception, 1058 _async_unsafe_access_error 1059 }; 1060 AsyncRequests _special_runtime_exit_condition; // Enum indicating pending async. request 1061 oop _pending_async_exception; 1062 1063 // Safepoint support 1064 public: // Expose _thread_state for SafeFetchInt() 1065 volatile JavaThreadState _thread_state; 1066 private: 1067 ThreadSafepointState* _safepoint_state; // Holds information about a thread during a safepoint 1068 address _saved_exception_pc; // Saved pc of instruction where last implicit exception happened 1069 1070 // JavaThread termination support 1071 enum TerminatedTypes { 1072 _not_terminated = 0xDEAD - 2, 1073 _thread_exiting, // JavaThread::exit() has been called for this thread 1074 _thread_terminated, // JavaThread is removed from thread list 1075 _vm_exited // JavaThread is still executing native code, but VM is terminated 1076 // only VM_Exit can set _vm_exited 1077 }; 1078 1079 // In general a JavaThread's _terminated field transitions as follows: 1080 // 1081 // _not_terminated => _thread_exiting => _thread_terminated 1082 // 1083 // _vm_exited is a special value to cover the case of a JavaThread 1084 // executing native code after the VM itself is terminated. 1085 volatile TerminatedTypes _terminated; 1086 // suspend/resume support 1087 volatile bool _suspend_equivalent; // Suspend equivalent condition 1088 jint _in_deopt_handler; // count of deoptimization 1089 // handlers thread is in 1090 volatile bool _doing_unsafe_access; // Thread may fault due to unsafe access 1091 bool _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was 1092 // never locked) when throwing an exception. Used by interpreter only. 1093 1094 // JNI attach states: 1095 enum JNIAttachStates { 1096 _not_attaching_via_jni = 1, // thread is not attaching via JNI 1097 _attaching_via_jni, // thread is attaching via JNI 1098 _attached_via_jni // thread has attached via JNI 1099 }; 1100 1101 // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni. 1102 // A native thread that is attaching via JNI starts with a value 1103 // of _attaching_via_jni and transitions to _attached_via_jni. 1104 volatile JNIAttachStates _jni_attach_state; 1105 1106 public: 1107 // State of the stack guard pages for this thread. 1108 enum StackGuardState { 1109 stack_guard_unused, // not needed 1110 stack_guard_reserved_disabled, 1111 stack_guard_yellow_reserved_disabled,// disabled (temporarily) after stack overflow 1112 stack_guard_enabled // enabled 1113 }; 1114 1115 private: 1116 1117 #if INCLUDE_JVMCI 1118 // The _pending_* fields below are used to communicate extra information 1119 // from an uncommon trap in JVMCI compiled code to the uncommon trap handler. 1120 1121 // Communicates the DeoptReason and DeoptAction of the uncommon trap 1122 int _pending_deoptimization; 1123 1124 // Specifies whether the uncommon trap is to bci 0 of a synchronized method 1125 // before the monitor has been acquired. 1126 bool _pending_monitorenter; 1127 1128 // Specifies if the DeoptReason for the last uncommon trap was Reason_transfer_to_interpreter 1129 bool _pending_transfer_to_interpreter; 1130 1131 // True if in a runtime call from compiled code that will deoptimize 1132 // and re-execute a failed heap allocation in the interpreter. 1133 bool _in_retryable_allocation; 1134 1135 // An id of a speculation that JVMCI compiled code can use to further describe and 1136 // uniquely identify the speculative optimization guarded by the uncommon trap 1137 jlong _pending_failed_speculation; 1138 1139 // These fields are mutually exclusive in terms of live ranges. 1140 union { 1141 // Communicates the pc at which the most recent implicit exception occurred 1142 // from the signal handler to a deoptimization stub. 1143 address _implicit_exception_pc; 1144 1145 // Communicates an alternative call target to an i2c stub from a JavaCall . 1146 address _alternate_call_target; 1147 } _jvmci; 1148 1149 // Support for high precision, thread sensitive counters in JVMCI compiled code. 1150 jlong* _jvmci_counters; 1151 1152 public: 1153 static jlong* _jvmci_old_thread_counters; 1154 static void collect_counters(jlong* array, int length); 1155 void resize_counters(int current_size, int new_size); 1156 static void resize_all_jvmci_counters(int new_size); 1157 1158 private: 1159 #endif // INCLUDE_JVMCI 1160 1161 StackGuardState _stack_guard_state; 1162 1163 // Precompute the limit of the stack as used in stack overflow checks. 1164 // We load it from here to simplify the stack overflow check in assembly. 1165 address _stack_overflow_limit; 1166 address _reserved_stack_activation; 1167 1168 // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is 1169 // used to temp. parsing values into and out of the runtime system during exception handling for compiled 1170 // code) 1171 volatile oop _exception_oop; // Exception thrown in compiled code 1172 volatile address _exception_pc; // PC where exception happened 1173 volatile address _exception_handler_pc; // PC for handler of exception 1174 volatile int _is_method_handle_return; // true (== 1) if the current exception PC is a MethodHandle call site. 1175 1176 private: 1177 // support for JNI critical regions 1178 jint _jni_active_critical; // count of entries into JNI critical region 1179 1180 // Checked JNI: function name requires exception check 1181 char* _pending_jni_exception_check_fn; 1182 1183 // For deadlock detection. 1184 int _depth_first_number; 1185 1186 // JVMTI PopFrame support 1187 // This is set to popframe_pending to signal that top Java frame should be popped immediately 1188 int _popframe_condition; 1189 1190 // If reallocation of scalar replaced objects fails, we throw OOM 1191 // and during exception propagation, pop the top 1192 // _frames_to_pop_failed_realloc frames, the ones that reference 1193 // failed reallocations. 1194 int _frames_to_pop_failed_realloc; 1195 1196 friend class VMThread; 1197 friend class ThreadWaitTransition; 1198 friend class VM_Exit; 1199 1200 void initialize(); // Initialized the instance variables 1201 1202 public: 1203 // Constructor 1204 JavaThread(bool is_attaching_via_jni = false); // for main thread and JNI attached threads 1205 JavaThread(ThreadFunction entry_point, size_t stack_size = 0); 1206 ~JavaThread(); 1207 1208 #ifdef ASSERT 1209 // verify this JavaThread hasn't be published in the Threads::list yet 1210 void verify_not_published(); 1211 #endif // ASSERT 1212 1213 //JNI functiontable getter/setter for JVMTI jni function table interception API. 1214 void set_jni_functions(struct JNINativeInterface_* functionTable) { 1215 _jni_environment.functions = functionTable; 1216 } 1217 struct JNINativeInterface_* get_jni_functions() { 1218 return (struct JNINativeInterface_ *)_jni_environment.functions; 1219 } 1220 1221 // This function is called at thread creation to allow 1222 // platform specific thread variables to be initialized. 1223 void cache_global_variables(); 1224 1225 // Executes Shutdown.shutdown() 1226 void invoke_shutdown_hooks(); 1227 1228 // Cleanup on thread exit 1229 enum ExitType { 1230 normal_exit, 1231 jni_detach 1232 }; 1233 void exit(bool destroy_vm, ExitType exit_type = normal_exit); 1234 1235 void cleanup_failed_attach_current_thread(bool is_daemon); 1236 1237 // Testers 1238 virtual bool is_Java_thread() const { return true; } 1239 virtual bool can_call_java() const { return true; } 1240 1241 virtual bool is_active_Java_thread() const { 1242 return on_thread_list() && !is_terminated(); 1243 } 1244 1245 // Thread oop. threadObj() can be NULL for initial JavaThread 1246 // (or for threads attached via JNI) 1247 oop threadObj() const { return _threadObj; } 1248 void set_threadObj(oop p) { _threadObj = p; } 1249 1250 // Prepare thread and add to priority queue. If a priority is 1251 // not specified, use the priority of the thread object. Threads_lock 1252 // must be held while this function is called. 1253 void prepare(jobject jni_thread, ThreadPriority prio=NoPriority); 1254 1255 void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; } 1256 address saved_exception_pc() { return _saved_exception_pc; } 1257 1258 1259 ThreadFunction entry_point() const { return _entry_point; } 1260 1261 // Allocates a new Java level thread object for this thread. thread_name may be NULL. 1262 void allocate_threadObj(Handle thread_group, const char* thread_name, bool daemon, TRAPS); 1263 1264 // Last frame anchor routines 1265 1266 JavaFrameAnchor* frame_anchor(void) { return &_anchor; } 1267 1268 // last_Java_sp 1269 bool has_last_Java_frame() const { return _anchor.has_last_Java_frame(); } 1270 intptr_t* last_Java_sp() const { return _anchor.last_Java_sp(); } 1271 1272 // last_Java_pc 1273 1274 address last_Java_pc(void) { return _anchor.last_Java_pc(); } 1275 1276 // Safepoint support 1277 inline JavaThreadState thread_state() const; 1278 inline void set_thread_state(JavaThreadState s); 1279 inline void set_thread_state_fence(JavaThreadState s); // fence after setting thread state 1280 inline ThreadSafepointState* safepoint_state() const; 1281 inline void set_safepoint_state(ThreadSafepointState* state); 1282 inline bool is_at_poll_safepoint(); 1283 1284 // JavaThread termination and lifecycle support: 1285 void smr_delete(); 1286 bool on_thread_list() const { return _on_thread_list; } 1287 void set_on_thread_list() { _on_thread_list = true; } 1288 1289 // thread has called JavaThread::exit() or is terminated 1290 bool is_exiting() const; 1291 // thread is terminated (no longer on the threads list); we compare 1292 // against the two non-terminated values so that a freed JavaThread 1293 // will also be considered terminated. 1294 bool check_is_terminated(TerminatedTypes l_terminated) const { 1295 return l_terminated != _not_terminated && l_terminated != _thread_exiting; 1296 } 1297 bool is_terminated() const; 1298 void set_terminated(TerminatedTypes t); 1299 // special for Threads::remove() which is static: 1300 void set_terminated_value(); 1301 void block_if_vm_exited(); 1302 1303 bool doing_unsafe_access() { return _doing_unsafe_access; } 1304 void set_doing_unsafe_access(bool val) { _doing_unsafe_access = val; } 1305 1306 bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; } 1307 void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; } 1308 1309 inline void set_polling_page_release(void* poll_value); 1310 inline void set_polling_page(void* poll_value); 1311 inline volatile void* get_polling_page(); 1312 1313 private: 1314 // Support for thread handshake operations 1315 HandshakeState _handshake; 1316 public: 1317 void set_handshake_operation(HandshakeOperation* op) { 1318 _handshake.set_operation(this, op); 1319 } 1320 1321 bool has_handshake() const { 1322 return _handshake.has_operation(); 1323 } 1324 1325 void handshake_process_by_self() { 1326 _handshake.process_by_self(this); 1327 } 1328 1329 bool handshake_try_process_by_vmThread() { 1330 return _handshake.try_process_by_vmThread(this); 1331 } 1332 1333 #ifdef ASSERT 1334 bool is_vmthread_processing_handshake() const { 1335 return _handshake.is_vmthread_processing_handshake(); 1336 } 1337 #endif 1338 1339 // Suspend/resume support for JavaThread 1340 private: 1341 inline void set_ext_suspended(); 1342 inline void clear_ext_suspended(); 1343 1344 public: 1345 void java_suspend(); // higher-level suspension logic called by the public APIs 1346 void java_resume(); // higher-level resume logic called by the public APIs 1347 int java_suspend_self(); // low-level self-suspension mechanics 1348 1349 private: 1350 // mid-level wrapper around java_suspend_self to set up correct state and 1351 // check for a pending safepoint at the end 1352 void java_suspend_self_with_safepoint_check(); 1353 1354 public: 1355 void check_and_wait_while_suspended() { 1356 assert(JavaThread::current() == this, "sanity check"); 1357 1358 bool do_self_suspend; 1359 do { 1360 // were we externally suspended while we were waiting? 1361 do_self_suspend = handle_special_suspend_equivalent_condition(); 1362 if (do_self_suspend) { 1363 // don't surprise the thread that suspended us by returning 1364 java_suspend_self(); 1365 set_suspend_equivalent(); 1366 } 1367 } while (do_self_suspend); 1368 } 1369 static void check_safepoint_and_suspend_for_native_trans(JavaThread *thread); 1370 // Check for async exception in addition to safepoint and suspend request. 1371 static void check_special_condition_for_native_trans(JavaThread *thread); 1372 1373 // Same as check_special_condition_for_native_trans but finishes the 1374 // transition into thread_in_Java mode so that it can potentially 1375 // block. 1376 static void check_special_condition_for_native_trans_and_transition(JavaThread *thread); 1377 1378 bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits); 1379 bool is_ext_suspend_completed_with_lock(uint32_t *bits) { 1380 MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1381 // Warning: is_ext_suspend_completed() may temporarily drop the 1382 // SR_lock to allow the thread to reach a stable thread state if 1383 // it is currently in a transient thread state. 1384 return is_ext_suspend_completed(false /* !called_by_wait */, 1385 SuspendRetryDelay, bits); 1386 } 1387 1388 // We cannot allow wait_for_ext_suspend_completion() to run forever or 1389 // we could hang. SuspendRetryCount and SuspendRetryDelay are normally 1390 // passed as the count and delay parameters. Experiments with specific 1391 // calls to wait_for_ext_suspend_completion() can be done by passing 1392 // other values in the code. Experiments with all calls can be done 1393 // via the appropriate -XX options. 1394 bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits); 1395 1396 // test for suspend - most (all?) of these should go away 1397 bool is_thread_fully_suspended(bool wait_for_suspend, uint32_t *bits); 1398 1399 inline void set_external_suspend(); 1400 inline void clear_external_suspend(); 1401 1402 bool is_external_suspend() const { 1403 return (_suspend_flags & _external_suspend) != 0; 1404 } 1405 // Whenever a thread transitions from native to vm/java it must suspend 1406 // if external|deopt suspend is present. 1407 bool is_suspend_after_native() const { 1408 return (_suspend_flags & (_external_suspend JFR_ONLY(| _trace_flag))) != 0; 1409 } 1410 1411 // external suspend request is completed 1412 bool is_ext_suspended() const { 1413 return (_suspend_flags & _ext_suspended) != 0; 1414 } 1415 1416 bool is_external_suspend_with_lock() const { 1417 MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1418 return is_external_suspend(); 1419 } 1420 1421 // Special method to handle a pending external suspend request 1422 // when a suspend equivalent condition lifts. 1423 bool handle_special_suspend_equivalent_condition() { 1424 assert(is_suspend_equivalent(), 1425 "should only be called in a suspend equivalence condition"); 1426 MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1427 bool ret = is_external_suspend(); 1428 if (!ret) { 1429 // not about to self-suspend so clear suspend equivalence 1430 clear_suspend_equivalent(); 1431 } 1432 // implied else: 1433 // We have a pending external suspend request so we leave the 1434 // suspend_equivalent flag set until java_suspend_self() sets 1435 // the ext_suspended flag and clears the suspend_equivalent 1436 // flag. This insures that wait_for_ext_suspend_completion() 1437 // will return consistent values. 1438 return ret; 1439 } 1440 1441 // utility methods to see if we are doing some kind of suspension 1442 bool is_being_ext_suspended() const { 1443 MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1444 return is_ext_suspended() || is_external_suspend(); 1445 } 1446 1447 bool is_suspend_equivalent() const { return _suspend_equivalent; } 1448 1449 void set_suspend_equivalent() { _suspend_equivalent = true; } 1450 void clear_suspend_equivalent() { _suspend_equivalent = false; } 1451 1452 // Thread.stop support 1453 void send_thread_stop(oop throwable); 1454 AsyncRequests clear_special_runtime_exit_condition() { 1455 AsyncRequests x = _special_runtime_exit_condition; 1456 _special_runtime_exit_condition = _no_async_condition; 1457 return x; 1458 } 1459 1460 // Are any async conditions present? 1461 bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); } 1462 1463 void check_and_handle_async_exceptions(bool check_unsafe_error = true); 1464 1465 // these next two are also used for self-suspension and async exception support 1466 void handle_special_runtime_exit_condition(bool check_asyncs = true); 1467 1468 // Return true if JavaThread has an asynchronous condition or 1469 // if external suspension is requested. 1470 bool has_special_runtime_exit_condition() { 1471 // Because we don't use is_external_suspend_with_lock 1472 // it is possible that we won't see an asynchronous external suspend 1473 // request that has just gotten started, i.e., SR_lock grabbed but 1474 // _external_suspend field change either not made yet or not visible 1475 // yet. However, this is okay because the request is asynchronous and 1476 // we will see the new flag value the next time through. It's also 1477 // possible that the external suspend request is dropped after 1478 // we have checked is_external_suspend(), we will recheck its value 1479 // under SR_lock in java_suspend_self(). 1480 return (_special_runtime_exit_condition != _no_async_condition) || 1481 is_external_suspend() || is_trace_suspend(); 1482 } 1483 1484 void set_pending_unsafe_access_error() { _special_runtime_exit_condition = _async_unsafe_access_error; } 1485 1486 inline void set_pending_async_exception(oop e); 1487 1488 // Fast-locking support 1489 bool is_lock_owned(address adr) const; 1490 1491 // Accessors for vframe array top 1492 // The linked list of vframe arrays are sorted on sp. This means when we 1493 // unpack the head must contain the vframe array to unpack. 1494 void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; } 1495 vframeArray* vframe_array_head() const { return _vframe_array_head; } 1496 1497 // Side structure for deferring update of java frame locals until deopt occurs 1498 GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; } 1499 void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; } 1500 1501 // These only really exist to make debugging deopt problems simpler 1502 1503 void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; } 1504 vframeArray* vframe_array_last() const { return _vframe_array_last; } 1505 1506 // The special resourceMark used during deoptimization 1507 1508 void set_deopt_mark(DeoptResourceMark* value) { _deopt_mark = value; } 1509 DeoptResourceMark* deopt_mark(void) { return _deopt_mark; } 1510 1511 void set_deopt_compiled_method(CompiledMethod* nm) { _deopt_nmethod = nm; } 1512 CompiledMethod* deopt_compiled_method() { return _deopt_nmethod; } 1513 1514 Method* callee_target() const { return _callee_target; } 1515 void set_callee_target (Method* x) { _callee_target = x; } 1516 1517 // Oop results of vm runtime calls 1518 oop vm_result() const { return _vm_result; } 1519 void set_vm_result (oop x) { _vm_result = x; } 1520 1521 Metadata* vm_result_2() const { return _vm_result_2; } 1522 void set_vm_result_2 (Metadata* x) { _vm_result_2 = x; } 1523 1524 MemRegion deferred_card_mark() const { return _deferred_card_mark; } 1525 void set_deferred_card_mark(MemRegion mr) { _deferred_card_mark = mr; } 1526 1527 #if INCLUDE_JVMCI 1528 int pending_deoptimization() const { return _pending_deoptimization; } 1529 jlong pending_failed_speculation() const { return _pending_failed_speculation; } 1530 bool has_pending_monitorenter() const { return _pending_monitorenter; } 1531 void set_pending_monitorenter(bool b) { _pending_monitorenter = b; } 1532 void set_pending_deoptimization(int reason) { _pending_deoptimization = reason; } 1533 void set_pending_failed_speculation(jlong failed_speculation) { _pending_failed_speculation = failed_speculation; } 1534 void set_pending_transfer_to_interpreter(bool b) { _pending_transfer_to_interpreter = b; } 1535 void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == NULL, "must be"); _jvmci._alternate_call_target = a; } 1536 void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == NULL, "must be"); _jvmci._implicit_exception_pc = a; } 1537 1538 virtual bool in_retryable_allocation() const { return _in_retryable_allocation; } 1539 void set_in_retryable_allocation(bool b) { _in_retryable_allocation = b; } 1540 #endif // INCLUDE_JVMCI 1541 1542 // Exception handling for compiled methods 1543 oop exception_oop() const { return _exception_oop; } 1544 address exception_pc() const { return _exception_pc; } 1545 address exception_handler_pc() const { return _exception_handler_pc; } 1546 bool is_method_handle_return() const { return _is_method_handle_return == 1; } 1547 1548 void set_exception_oop(oop o) { (void)const_cast<oop&>(_exception_oop = o); } 1549 void set_exception_pc(address a) { _exception_pc = a; } 1550 void set_exception_handler_pc(address a) { _exception_handler_pc = a; } 1551 void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; } 1552 1553 void clear_exception_oop_and_pc() { 1554 set_exception_oop(NULL); 1555 set_exception_pc(NULL); 1556 } 1557 1558 // Stack overflow support 1559 // 1560 // (small addresses) 1561 // 1562 // -- <-- stack_end() --- 1563 // | | 1564 // | red pages | 1565 // | | 1566 // -- <-- stack_red_zone_base() | 1567 // | | 1568 // | guard 1569 // | yellow pages zone 1570 // | | 1571 // | | 1572 // -- <-- stack_yellow_zone_base() | 1573 // | | 1574 // | | 1575 // | reserved pages | 1576 // | | 1577 // -- <-- stack_reserved_zone_base() --- --- 1578 // /|\ shadow <-- stack_overflow_limit() (somewhere in here) 1579 // | zone 1580 // \|/ size 1581 // some untouched memory --- 1582 // 1583 // 1584 // -- 1585 // | 1586 // | shadow zone 1587 // | 1588 // -- 1589 // x frame n 1590 // -- 1591 // x frame n-1 1592 // x 1593 // -- 1594 // ... 1595 // 1596 // -- 1597 // x frame 0 1598 // -- <-- stack_base() 1599 // 1600 // (large addresses) 1601 // 1602 1603 private: 1604 // These values are derived from flags StackRedPages, StackYellowPages, 1605 // StackReservedPages and StackShadowPages. The zone size is determined 1606 // ergonomically if page_size > 4K. 1607 static size_t _stack_red_zone_size; 1608 static size_t _stack_yellow_zone_size; 1609 static size_t _stack_reserved_zone_size; 1610 static size_t _stack_shadow_zone_size; 1611 public: 1612 inline size_t stack_available(address cur_sp); 1613 1614 static size_t stack_red_zone_size() { 1615 assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized."); 1616 return _stack_red_zone_size; 1617 } 1618 static void set_stack_red_zone_size(size_t s) { 1619 assert(is_aligned(s, os::vm_page_size()), 1620 "We can not protect if the red zone size is not page aligned."); 1621 assert(_stack_red_zone_size == 0, "This should be called only once."); 1622 _stack_red_zone_size = s; 1623 } 1624 address stack_red_zone_base() { 1625 return (address)(stack_end() + stack_red_zone_size()); 1626 } 1627 bool in_stack_red_zone(address a) { 1628 return a <= stack_red_zone_base() && a >= stack_end(); 1629 } 1630 1631 static size_t stack_yellow_zone_size() { 1632 assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized."); 1633 return _stack_yellow_zone_size; 1634 } 1635 static void set_stack_yellow_zone_size(size_t s) { 1636 assert(is_aligned(s, os::vm_page_size()), 1637 "We can not protect if the yellow zone size is not page aligned."); 1638 assert(_stack_yellow_zone_size == 0, "This should be called only once."); 1639 _stack_yellow_zone_size = s; 1640 } 1641 1642 static size_t stack_reserved_zone_size() { 1643 // _stack_reserved_zone_size may be 0. This indicates the feature is off. 1644 return _stack_reserved_zone_size; 1645 } 1646 static void set_stack_reserved_zone_size(size_t s) { 1647 assert(is_aligned(s, os::vm_page_size()), 1648 "We can not protect if the reserved zone size is not page aligned."); 1649 assert(_stack_reserved_zone_size == 0, "This should be called only once."); 1650 _stack_reserved_zone_size = s; 1651 } 1652 address stack_reserved_zone_base() { 1653 return (address)(stack_end() + 1654 (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size())); 1655 } 1656 bool in_stack_reserved_zone(address a) { 1657 return (a <= stack_reserved_zone_base()) && 1658 (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size())); 1659 } 1660 1661 static size_t stack_yellow_reserved_zone_size() { 1662 return _stack_yellow_zone_size + _stack_reserved_zone_size; 1663 } 1664 bool in_stack_yellow_reserved_zone(address a) { 1665 return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base()); 1666 } 1667 1668 // Size of red + yellow + reserved zones. 1669 static size_t stack_guard_zone_size() { 1670 return stack_red_zone_size() + stack_yellow_reserved_zone_size(); 1671 } 1672 1673 static size_t stack_shadow_zone_size() { 1674 assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized."); 1675 return _stack_shadow_zone_size; 1676 } 1677 static void set_stack_shadow_zone_size(size_t s) { 1678 // The shadow area is not allocated or protected, so 1679 // it needs not be page aligned. 1680 // But the stack bang currently assumes that it is a 1681 // multiple of page size. This guarantees that the bang 1682 // loop touches all pages in the shadow zone. 1683 // This can be guaranteed differently, as well. E.g., if 1684 // the page size is a multiple of 4K, banging in 4K steps 1685 // suffices to touch all pages. (Some pages are banged 1686 // several times, though.) 1687 assert(is_aligned(s, os::vm_page_size()), 1688 "Stack bang assumes multiple of page size."); 1689 assert(_stack_shadow_zone_size == 0, "This should be called only once."); 1690 _stack_shadow_zone_size = s; 1691 } 1692 1693 void create_stack_guard_pages(); 1694 void remove_stack_guard_pages(); 1695 1696 void enable_stack_reserved_zone(); 1697 void disable_stack_reserved_zone(); 1698 void enable_stack_yellow_reserved_zone(); 1699 void disable_stack_yellow_reserved_zone(); 1700 void enable_stack_red_zone(); 1701 void disable_stack_red_zone(); 1702 1703 inline bool stack_guard_zone_unused(); 1704 inline bool stack_yellow_reserved_zone_disabled(); 1705 inline bool stack_reserved_zone_disabled(); 1706 inline bool stack_guards_enabled(); 1707 1708 address reserved_stack_activation() const { return _reserved_stack_activation; } 1709 void set_reserved_stack_activation(address addr) { 1710 assert(_reserved_stack_activation == stack_base() 1711 || _reserved_stack_activation == NULL 1712 || addr == stack_base(), "Must not be set twice"); 1713 _reserved_stack_activation = addr; 1714 } 1715 1716 // Attempt to reguard the stack after a stack overflow may have occurred. 1717 // Returns true if (a) guard pages are not needed on this thread, (b) the 1718 // pages are already guarded, or (c) the pages were successfully reguarded. 1719 // Returns false if there is not enough stack space to reguard the pages, in 1720 // which case the caller should unwind a frame and try again. The argument 1721 // should be the caller's (approximate) sp. 1722 bool reguard_stack(address cur_sp); 1723 // Similar to above but see if current stackpoint is out of the guard area 1724 // and reguard if possible. 1725 bool reguard_stack(void); 1726 1727 address stack_overflow_limit() { return _stack_overflow_limit; } 1728 void set_stack_overflow_limit() { 1729 _stack_overflow_limit = 1730 stack_end() + MAX2(JavaThread::stack_guard_zone_size(), JavaThread::stack_shadow_zone_size()); 1731 } 1732 1733 // Check if address is in the usable part of the stack (excludes protected 1734 // guard pages). Can be applied to any thread and is an approximation for 1735 // using is_in_stack when the query has to happen from another thread. 1736 bool is_in_usable_stack(address adr) const; 1737 1738 // Misc. accessors/mutators 1739 void set_do_not_unlock(void) { _do_not_unlock_if_synchronized = true; } 1740 void clr_do_not_unlock(void) { _do_not_unlock_if_synchronized = false; } 1741 bool do_not_unlock(void) { return _do_not_unlock_if_synchronized; } 1742 1743 // For assembly stub generation 1744 static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj); } 1745 static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment); } 1746 static ByteSize pending_jni_exception_check_fn_offset() { 1747 return byte_offset_of(JavaThread, _pending_jni_exception_check_fn); 1748 } 1749 static ByteSize last_Java_sp_offset() { 1750 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset(); 1751 } 1752 static ByteSize last_Java_pc_offset() { 1753 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset(); 1754 } 1755 static ByteSize frame_anchor_offset() { 1756 return byte_offset_of(JavaThread, _anchor); 1757 } 1758 static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target); } 1759 static ByteSize vm_result_offset() { return byte_offset_of(JavaThread, _vm_result); } 1760 static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2); } 1761 static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); } 1762 static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); } 1763 static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); } 1764 #if INCLUDE_JVMCI 1765 static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); } 1766 static ByteSize pending_monitorenter_offset() { return byte_offset_of(JavaThread, _pending_monitorenter); } 1767 static ByteSize pending_failed_speculation_offset() { return byte_offset_of(JavaThread, _pending_failed_speculation); } 1768 static ByteSize jvmci_alternate_call_target_offset() { return byte_offset_of(JavaThread, _jvmci._alternate_call_target); } 1769 static ByteSize jvmci_implicit_exception_pc_offset() { return byte_offset_of(JavaThread, _jvmci._implicit_exception_pc); } 1770 static ByteSize jvmci_counters_offset() { return byte_offset_of(JavaThread, _jvmci_counters); } 1771 #endif // INCLUDE_JVMCI 1772 static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop); } 1773 static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc); } 1774 static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); } 1775 static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); } 1776 static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); } 1777 static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); } 1778 static ByteSize reserved_stack_activation_offset() { return byte_offset_of(JavaThread, _reserved_stack_activation); } 1779 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); } 1780 1781 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); } 1782 static ByteSize should_post_on_exceptions_flag_offset() { 1783 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag); 1784 } 1785 static ByteSize doing_unsafe_access_offset() { return byte_offset_of(JavaThread, _doing_unsafe_access); } 1786 1787 // Returns the jni environment for this thread 1788 JNIEnv* jni_environment() { return &_jni_environment; } 1789 1790 static JavaThread* thread_from_jni_environment(JNIEnv* env) { 1791 JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset())); 1792 // Only return NULL if thread is off the thread list; starting to 1793 // exit should not return NULL. 1794 if (thread_from_jni_env->is_terminated()) { 1795 thread_from_jni_env->block_if_vm_exited(); 1796 return NULL; 1797 } else { 1798 return thread_from_jni_env; 1799 } 1800 } 1801 1802 // JNI critical regions. These can nest. 1803 bool in_critical() { return _jni_active_critical > 0; } 1804 bool in_last_critical() { return _jni_active_critical == 1; } 1805 inline void enter_critical(); 1806 void exit_critical() { 1807 assert(Thread::current() == this, "this must be current thread"); 1808 _jni_active_critical--; 1809 assert(_jni_active_critical >= 0, "JNI critical nesting problem?"); 1810 } 1811 1812 // Checked JNI: is the programmer required to check for exceptions, if so specify 1813 // which function name. Returning to a Java frame should implicitly clear the 1814 // pending check, this is done for Native->Java transitions (i.e. user JNI code). 1815 // VM->Java transistions are not cleared, it is expected that JNI code enclosed 1816 // within ThreadToNativeFromVM makes proper exception checks (i.e. VM internal). 1817 bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; } 1818 void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; } 1819 const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; } 1820 void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; } 1821 1822 // For deadlock detection 1823 int depth_first_number() { return _depth_first_number; } 1824 void set_depth_first_number(int dfn) { _depth_first_number = dfn; } 1825 1826 private: 1827 void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; } 1828 1829 public: 1830 MonitorChunk* monitor_chunks() const { return _monitor_chunks; } 1831 void add_monitor_chunk(MonitorChunk* chunk); 1832 void remove_monitor_chunk(MonitorChunk* chunk); 1833 bool in_deopt_handler() const { return _in_deopt_handler > 0; } 1834 void inc_in_deopt_handler() { _in_deopt_handler++; } 1835 void dec_in_deopt_handler() { 1836 assert(_in_deopt_handler > 0, "mismatched deopt nesting"); 1837 if (_in_deopt_handler > 0) { // robustness 1838 _in_deopt_handler--; 1839 } 1840 } 1841 1842 private: 1843 void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; } 1844 1845 public: 1846 1847 // Frame iteration; calls the function f for all frames on the stack 1848 void frames_do(void f(frame*, const RegisterMap*)); 1849 1850 // Memory operations 1851 void oops_do(OopClosure* f, CodeBlobClosure* cf); 1852 1853 // Sweeper operations 1854 virtual void nmethods_do(CodeBlobClosure* cf); 1855 1856 // RedefineClasses Support 1857 void metadata_do(MetadataClosure* f); 1858 1859 // Debug method asserting thread states are correct during a handshake operation. 1860 DEBUG_ONLY(void verify_states_for_handshake();) 1861 1862 // Misc. operations 1863 char* name() const { return (char*)get_thread_name(); } 1864 void print_on(outputStream* st, bool print_extended_info) const; 1865 void print_on(outputStream* st) const { print_on(st, false); } 1866 void print() const; 1867 void print_thread_state_on(outputStream*) const PRODUCT_RETURN; 1868 void print_on_error(outputStream* st, char* buf, int buflen) const; 1869 void print_name_on_error(outputStream* st, char* buf, int buflen) const; 1870 void verify(); 1871 const char* get_thread_name() const; 1872 protected: 1873 // factor out low-level mechanics for use in both normal and error cases 1874 virtual const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const; 1875 public: 1876 // Accessing frames 1877 frame last_frame() { 1878 _anchor.make_walkable(this); 1879 return pd_last_frame(); 1880 } 1881 javaVFrame* last_java_vframe(RegisterMap* reg_map); 1882 1883 // Returns method at 'depth' java or native frames down the stack 1884 // Used for security checks 1885 Klass* security_get_caller_class(int depth); 1886 1887 // Print stack trace in external format 1888 void print_stack_on(outputStream* st); 1889 void print_stack() { print_stack_on(tty); } 1890 1891 // Print stack traces in various internal formats 1892 void trace_stack() PRODUCT_RETURN; 1893 void trace_stack_from(vframe* start_vf) PRODUCT_RETURN; 1894 void trace_frames() PRODUCT_RETURN; 1895 1896 // Print an annotated view of the stack frames 1897 void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN; 1898 void validate_frame_layout() { 1899 print_frame_layout(0, true); 1900 } 1901 1902 // Function for testing deoptimization 1903 void deoptimize(); 1904 void make_zombies(); 1905 1906 void deoptimize_marked_methods(); 1907 1908 public: 1909 // Returns the running thread as a JavaThread 1910 static inline JavaThread* current(); 1911 1912 // Returns the active Java thread. Do not use this if you know you are calling 1913 // from a JavaThread, as it's slower than JavaThread::current. If called from 1914 // the VMThread, it also returns the JavaThread that instigated the VMThread's 1915 // operation. You may not want that either. 1916 static JavaThread* active(); 1917 1918 inline CompilerThread* as_CompilerThread(); 1919 1920 protected: 1921 virtual void pre_run(); 1922 virtual void run(); 1923 void thread_main_inner(); 1924 virtual void post_run(); 1925 1926 1927 private: 1928 GrowableArray<oop>* _array_for_gc; 1929 public: 1930 1931 void register_array_for_gc(GrowableArray<oop>* array) { _array_for_gc = array; } 1932 1933 public: 1934 // Thread local information maintained by JVMTI. 1935 void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; } 1936 // A JvmtiThreadState is lazily allocated. This jvmti_thread_state() 1937 // getter is used to get this JavaThread's JvmtiThreadState if it has 1938 // one which means NULL can be returned. JvmtiThreadState::state_for() 1939 // is used to get the specified JavaThread's JvmtiThreadState if it has 1940 // one or it allocates a new JvmtiThreadState for the JavaThread and 1941 // returns it. JvmtiThreadState::state_for() will return NULL only if 1942 // the specified JavaThread is exiting. 1943 JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; } 1944 static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); } 1945 1946 // JVMTI PopFrame support 1947 // Setting and clearing popframe_condition 1948 // All of these enumerated values are bits. popframe_pending 1949 // indicates that a PopFrame() has been requested and not yet been 1950 // completed. popframe_processing indicates that that PopFrame() is in 1951 // the process of being completed. popframe_force_deopt_reexecution_bit 1952 // indicates that special handling is required when returning to a 1953 // deoptimized caller. 1954 enum PopCondition { 1955 popframe_inactive = 0x00, 1956 popframe_pending_bit = 0x01, 1957 popframe_processing_bit = 0x02, 1958 popframe_force_deopt_reexecution_bit = 0x04 1959 }; 1960 PopCondition popframe_condition() { return (PopCondition) _popframe_condition; } 1961 void set_popframe_condition(PopCondition c) { _popframe_condition = c; } 1962 void set_popframe_condition_bit(PopCondition c) { _popframe_condition |= c; } 1963 void clear_popframe_condition() { _popframe_condition = popframe_inactive; } 1964 static ByteSize popframe_condition_offset() { return byte_offset_of(JavaThread, _popframe_condition); } 1965 bool has_pending_popframe() { return (popframe_condition() & popframe_pending_bit) != 0; } 1966 bool popframe_forcing_deopt_reexecution() { return (popframe_condition() & popframe_force_deopt_reexecution_bit) != 0; } 1967 void clear_popframe_forcing_deopt_reexecution() { _popframe_condition &= ~popframe_force_deopt_reexecution_bit; } 1968 #ifdef CC_INTERP 1969 bool pop_frame_pending(void) { return ((_popframe_condition & popframe_pending_bit) != 0); } 1970 void clr_pop_frame_pending(void) { _popframe_condition = popframe_inactive; } 1971 bool pop_frame_in_process(void) { return ((_popframe_condition & popframe_processing_bit) != 0); } 1972 void set_pop_frame_in_process(void) { _popframe_condition |= popframe_processing_bit; } 1973 void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; } 1974 #endif 1975 1976 int frames_to_pop_failed_realloc() const { return _frames_to_pop_failed_realloc; } 1977 void set_frames_to_pop_failed_realloc(int nb) { _frames_to_pop_failed_realloc = nb; } 1978 void dec_frames_to_pop_failed_realloc() { _frames_to_pop_failed_realloc--; } 1979 1980 private: 1981 // Saved incoming arguments to popped frame. 1982 // Used only when popped interpreted frame returns to deoptimized frame. 1983 void* _popframe_preserved_args; 1984 int _popframe_preserved_args_size; 1985 1986 public: 1987 void popframe_preserve_args(ByteSize size_in_bytes, void* start); 1988 void* popframe_preserved_args(); 1989 ByteSize popframe_preserved_args_size(); 1990 WordSize popframe_preserved_args_size_in_words(); 1991 void popframe_free_preserved_args(); 1992 1993 1994 private: 1995 JvmtiThreadState *_jvmti_thread_state; 1996 1997 // Used by the interpreter in fullspeed mode for frame pop, method 1998 // entry, method exit and single stepping support. This field is 1999 // only set to non-zero by the VM_EnterInterpOnlyMode VM operation. 2000 // It can be set to zero asynchronously (i.e., without a VM operation 2001 // or a lock) so we have to be very careful. 2002 int _interp_only_mode; 2003 2004 public: 2005 // used by the interpreter for fullspeed debugging support (see above) 2006 static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); } 2007 bool is_interp_only_mode() { return (_interp_only_mode != 0); } 2008 int get_interp_only_mode() { return _interp_only_mode; } 2009 void increment_interp_only_mode() { ++_interp_only_mode; } 2010 void decrement_interp_only_mode() { --_interp_only_mode; } 2011 2012 // support for cached flag that indicates whether exceptions need to be posted for this thread 2013 // if this is false, we can avoid deoptimizing when events are thrown 2014 // this gets set to reflect whether jvmtiExport::post_exception_throw would actually do anything 2015 private: 2016 int _should_post_on_exceptions_flag; 2017 2018 public: 2019 int should_post_on_exceptions_flag() { return _should_post_on_exceptions_flag; } 2020 void set_should_post_on_exceptions_flag(int val) { _should_post_on_exceptions_flag = val; } 2021 2022 private: 2023 ThreadStatistics *_thread_stat; 2024 2025 public: 2026 ThreadStatistics* get_thread_stat() const { return _thread_stat; } 2027 2028 // Return a blocker object for which this thread is blocked parking. 2029 oop current_park_blocker(); 2030 2031 private: 2032 static size_t _stack_size_at_create; 2033 2034 public: 2035 static inline size_t stack_size_at_create(void) { 2036 return _stack_size_at_create; 2037 } 2038 static inline void set_stack_size_at_create(size_t value) { 2039 _stack_size_at_create = value; 2040 } 2041 2042 // Machine dependent stuff 2043 #include OS_CPU_HEADER(thread) 2044 2045 // JSR166 per-thread parker 2046 private: 2047 Parker* _parker; 2048 public: 2049 Parker* parker() { return _parker; } 2050 2051 // Biased locking support 2052 private: 2053 GrowableArray<MonitorInfo*>* _cached_monitor_info; 2054 public: 2055 GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; } 2056 void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; } 2057 2058 // clearing/querying jni attach status 2059 bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; } 2060 bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; } 2061 inline void set_done_attaching_via_jni(); 2062 2063 // Stack dump assistance: 2064 // Track the class we want to initialize but for which we have to wait 2065 // on its init_lock() because it is already being initialized. 2066 void set_class_to_be_initialized(InstanceKlass* k); 2067 InstanceKlass* class_to_be_initialized() const; 2068 2069 private: 2070 InstanceKlass* _class_to_be_initialized; 2071 2072 // java.lang.Thread.sleep support 2073 ParkEvent * _SleepEvent; 2074 public: 2075 bool sleep(jlong millis); 2076 2077 // java.lang.Thread interruption support 2078 void interrupt(); 2079 bool is_interrupted(bool clear_interrupted); 2080 2081 }; 2082 2083 // Inline implementation of JavaThread::current 2084 inline JavaThread* JavaThread::current() { 2085 Thread* thread = Thread::current(); 2086 assert(thread->is_Java_thread(), "just checking"); 2087 return (JavaThread*)thread; 2088 } 2089 2090 inline CompilerThread* JavaThread::as_CompilerThread() { 2091 assert(is_Compiler_thread(), "just checking"); 2092 return (CompilerThread*)this; 2093 } 2094 2095 // Dedicated thread to sweep the code cache 2096 class CodeCacheSweeperThread : public JavaThread { 2097 CompiledMethod* _scanned_compiled_method; // nmethod being scanned by the sweeper 2098 public: 2099 CodeCacheSweeperThread(); 2100 // Track the nmethod currently being scanned by the sweeper 2101 void set_scanned_compiled_method(CompiledMethod* cm) { 2102 assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value"); 2103 _scanned_compiled_method = cm; 2104 } 2105 2106 // Hide sweeper thread from external view. 2107 bool is_hidden_from_external_view() const { return true; } 2108 2109 bool is_Code_cache_sweeper_thread() const { return true; } 2110 2111 // Prevent GC from unloading _scanned_compiled_method 2112 void oops_do(OopClosure* f, CodeBlobClosure* cf); 2113 void nmethods_do(CodeBlobClosure* cf); 2114 }; 2115 2116 // A thread used for Compilation. 2117 class CompilerThread : public JavaThread { 2118 friend class VMStructs; 2119 private: 2120 CompilerCounters* _counters; 2121 2122 ciEnv* _env; 2123 CompileLog* _log; 2124 CompileTask* volatile _task; // print_threads_compiling can read this concurrently. 2125 CompileQueue* _queue; 2126 BufferBlob* _buffer_blob; 2127 2128 AbstractCompiler* _compiler; 2129 TimeStamp _idle_time; 2130 2131 public: 2132 2133 static CompilerThread* current(); 2134 2135 CompilerThread(CompileQueue* queue, CompilerCounters* counters); 2136 ~CompilerThread(); 2137 2138 bool is_Compiler_thread() const { return true; } 2139 2140 virtual bool can_call_java() const; 2141 2142 // Hide native compiler threads from external view. 2143 bool is_hidden_from_external_view() const { return !can_call_java(); } 2144 2145 void set_compiler(AbstractCompiler* c) { _compiler = c; } 2146 AbstractCompiler* compiler() const { return _compiler; } 2147 2148 CompileQueue* queue() const { return _queue; } 2149 CompilerCounters* counters() const { return _counters; } 2150 2151 // Get/set the thread's compilation environment. 2152 ciEnv* env() { return _env; } 2153 void set_env(ciEnv* env) { _env = env; } 2154 2155 BufferBlob* get_buffer_blob() const { return _buffer_blob; } 2156 void set_buffer_blob(BufferBlob* b) { _buffer_blob = b; } 2157 2158 // Get/set the thread's logging information 2159 CompileLog* log() { return _log; } 2160 void init_log(CompileLog* log) { 2161 // Set once, for good. 2162 assert(_log == NULL, "set only once"); 2163 _log = log; 2164 } 2165 2166 void start_idle_timer() { _idle_time.update(); } 2167 jlong idle_time_millis() { 2168 return TimeHelper::counter_to_millis(_idle_time.ticks_since_update()); 2169 } 2170 2171 #ifndef PRODUCT 2172 private: 2173 IdealGraphPrinter *_ideal_graph_printer; 2174 public: 2175 IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; } 2176 void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; } 2177 #endif 2178 2179 // Get/set the thread's current task 2180 CompileTask* task() { return _task; } 2181 void set_task(CompileTask* task) { _task = task; } 2182 }; 2183 2184 inline CompilerThread* CompilerThread::current() { 2185 return JavaThread::current()->as_CompilerThread(); 2186 } 2187 2188 // The active thread queue. It also keeps track of the current used 2189 // thread priorities. 2190 class Threads: AllStatic { 2191 friend class VMStructs; 2192 private: 2193 static int _number_of_threads; 2194 static int _number_of_non_daemon_threads; 2195 static int _return_code; 2196 static uintx _thread_claim_token; 2197 #ifdef ASSERT 2198 static bool _vm_complete; 2199 #endif 2200 2201 static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS); 2202 static void initialize_jsr292_core_classes(TRAPS); 2203 2204 public: 2205 // Thread management 2206 // force_daemon is a concession to JNI, where we may need to add a 2207 // thread to the thread list before allocating its thread object 2208 static void add(JavaThread* p, bool force_daemon = false); 2209 static void remove(JavaThread* p, bool is_daemon); 2210 static void non_java_threads_do(ThreadClosure* tc); 2211 static void java_threads_do(ThreadClosure* tc); 2212 static void java_threads_and_vm_thread_do(ThreadClosure* tc); 2213 static void threads_do(ThreadClosure* tc); 2214 static void possibly_parallel_threads_do(bool is_par, ThreadClosure* tc); 2215 2216 // Initializes the vm and creates the vm thread 2217 static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain); 2218 static void convert_vm_init_libraries_to_agents(); 2219 static void create_vm_init_libraries(); 2220 static void create_vm_init_agents(); 2221 static void shutdown_vm_agents(); 2222 static bool destroy_vm(); 2223 // Supported VM versions via JNI 2224 // Includes JNI_VERSION_1_1 2225 static jboolean is_supported_jni_version_including_1_1(jint version); 2226 // Does not include JNI_VERSION_1_1 2227 static jboolean is_supported_jni_version(jint version); 2228 2229 // The "thread claim token" provides a way for threads to be claimed 2230 // by parallel worker tasks. 2231 // 2232 // Each thread contains a "token" field. A task will claim the 2233 // thread only if its token is different from the global token, 2234 // which is updated by calling change_thread_claim_token(). When 2235 // a thread is claimed, it's token is set to the global token value 2236 // so other threads in the same iteration pass won't claim it. 2237 // 2238 // For this to work change_thread_claim_token() needs to be called 2239 // exactly once in sequential code before starting parallel tasks 2240 // that should claim threads. 2241 // 2242 // New threads get their token set to 0 and change_thread_claim_token() 2243 // never sets the global token to 0. 2244 static uintx thread_claim_token() { return _thread_claim_token; } 2245 static void change_thread_claim_token(); 2246 static void assert_all_threads_claimed() NOT_DEBUG_RETURN; 2247 2248 // Apply "f->do_oop" to all root oops in all threads. 2249 // This version may only be called by sequential code. 2250 static void oops_do(OopClosure* f, CodeBlobClosure* cf); 2251 // This version may be called by sequential or parallel code. 2252 static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf); 2253 2254 // Sweeper 2255 static void nmethods_do(CodeBlobClosure* cf); 2256 2257 // RedefineClasses support 2258 static void metadata_do(MetadataClosure* f); 2259 static void metadata_handles_do(void f(Metadata*)); 2260 2261 #ifdef ASSERT 2262 static bool is_vm_complete() { return _vm_complete; } 2263 #endif // ASSERT 2264 2265 // Verification 2266 static void verify(); 2267 static void print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks, bool print_extended_info); 2268 static void print(bool print_stacks, bool internal_format) { 2269 // this function is only used by debug.cpp 2270 print_on(tty, print_stacks, internal_format, false /* no concurrent lock printed */, false /* simple format */); 2271 } 2272 static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen); 2273 static void print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf, 2274 int buflen, bool* found_current); 2275 static void print_threads_compiling(outputStream* st, char* buf, int buflen, bool short_form = false); 2276 2277 // Get Java threads that are waiting to enter a monitor. 2278 static GrowableArray<JavaThread*>* get_pending_threads(ThreadsList * t_list, 2279 int count, address monitor); 2280 2281 // Get owning Java thread from the monitor's owner field. 2282 static JavaThread *owning_thread_from_monitor_owner(ThreadsList * t_list, 2283 address owner); 2284 2285 // Number of threads on the active threads list 2286 static int number_of_threads() { return _number_of_threads; } 2287 // Number of non-daemon threads on the active threads list 2288 static int number_of_non_daemon_threads() { return _number_of_non_daemon_threads; } 2289 2290 // Deoptimizes all frames tied to marked nmethods 2291 static void deoptimized_wrt_marked_nmethods(); 2292 2293 struct Test; // For private gtest access. 2294 }; 2295 2296 class SignalHandlerMark: public StackObj { 2297 private: 2298 Thread* _thread; 2299 public: 2300 SignalHandlerMark(Thread* t) { 2301 _thread = t; 2302 if (_thread) _thread->enter_signal_handler(); 2303 } 2304 ~SignalHandlerMark() { 2305 if (_thread) _thread->leave_signal_handler(); 2306 _thread = NULL; 2307 } 2308 }; 2309 2310 2311 #endif // SHARE_RUNTIME_THREAD_HPP