1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_THREAD_HPP 26 #define SHARE_VM_RUNTIME_THREAD_HPP 27 28 #include "jni.h" 29 #include "code/compiledMethod.hpp" 30 #include "gc/shared/gcThreadLocalData.hpp" 31 #include "gc/shared/threadLocalAllocBuffer.hpp" 32 #include "memory/allocation.hpp" 33 #include "oops/oop.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "runtime/frame.hpp" 36 #include "runtime/globals.hpp" 37 #include "runtime/handshake.hpp" 38 #include "runtime/javaFrameAnchor.hpp" 39 #include "runtime/jniHandles.hpp" 40 #include "runtime/mutexLocker.hpp" 41 #include "runtime/os.hpp" 42 #include "runtime/osThread.hpp" 43 #include "runtime/park.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/threadHeapSampler.hpp" 46 #include "runtime/threadLocalStorage.hpp" 47 #include "runtime/threadStatisticalInfo.hpp" 48 #include "runtime/unhandledOops.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/exceptions.hpp" 51 #include "utilities/macros.hpp" 52 #ifdef ZERO 53 # include "stack_zero.hpp" 54 #endif 55 #if INCLUDE_JFR 56 #include "jfr/support/jfrThreadExtension.hpp" 57 #endif 58 59 60 class SafeThreadsListPtr; 61 class ThreadSafepointState; 62 class ThreadsList; 63 class ThreadsSMRSupport; 64 65 class JvmtiThreadState; 66 class ThreadStatistics; 67 class ConcurrentLocksDump; 68 class ParkEvent; 69 class Parker; 70 71 class ciEnv; 72 class CompileThread; 73 class CompileLog; 74 class CompileTask; 75 class CompileQueue; 76 class CompilerCounters; 77 class vframeArray; 78 79 class DeoptResourceMark; 80 class jvmtiDeferredLocalVariableSet; 81 82 class GCTaskQueue; 83 class ThreadClosure; 84 class IdealGraphPrinter; 85 86 class Metadata; 87 template <class T, MEMFLAGS F> class ChunkedList; 88 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer; 89 90 DEBUG_ONLY(class ResourceMark;) 91 92 class WorkerThread; 93 94 // Class hierarchy 95 // - Thread 96 // - JavaThread 97 // - various subclasses eg CompilerThread, ServiceThread 98 // - NonJavaThread 99 // - NamedThread 100 // - VMThread 101 // - ConcurrentGCThread 102 // - WorkerThread 103 // - GangWorker 104 // - GCTaskThread 105 // - WatcherThread 106 // - JfrThreadSampler 107 // 108 // All Thread subclasses must be either JavaThread or NonJavaThread. 109 // This means !t->is_Java_thread() iff t is a NonJavaThread, or t is 110 // a partially constructed/destroyed Thread. 111 112 class Thread: public ThreadShadow { 113 friend class VMStructs; 114 friend class JVMCIVMStructs; 115 private: 116 117 #ifndef USE_LIBRARY_BASED_TLS_ONLY 118 // Current thread is maintained as a thread-local variable 119 static THREAD_LOCAL_DECL Thread* _thr_current; 120 #endif 121 122 private: 123 // Thread local data area available to the GC. The internal 124 // structure and contents of this data area is GC-specific. 125 // Only GC and GC barrier code should access this data area. 126 GCThreadLocalData _gc_data; 127 128 public: 129 static ByteSize gc_data_offset() { 130 return byte_offset_of(Thread, _gc_data); 131 } 132 133 template <typename T> T* gc_data() { 134 STATIC_ASSERT(sizeof(T) <= sizeof(_gc_data)); 135 return reinterpret_cast<T*>(&_gc_data); 136 } 137 138 // Exception handling 139 // (Note: _pending_exception and friends are in ThreadShadow) 140 //oop _pending_exception; // pending exception for current thread 141 // const char* _exception_file; // file information for exception (debugging only) 142 // int _exception_line; // line information for exception (debugging only) 143 protected: 144 // Support for forcing alignment of thread objects for biased locking 145 void* _real_malloc_address; 146 147 // JavaThread lifecycle support: 148 friend class SafeThreadsListPtr; // for _threads_list_ptr, cmpxchg_threads_hazard_ptr(), {dec_,inc_,}nested_threads_hazard_ptr_cnt(), {g,s}et_threads_hazard_ptr(), inc_nested_handle_cnt(), tag_hazard_ptr() access 149 friend class ScanHazardPtrGatherProtectedThreadsClosure; // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access 150 friend class ScanHazardPtrGatherThreadsListClosure; // for get_threads_hazard_ptr(), untag_hazard_ptr() access 151 friend class ScanHazardPtrPrintMatchingThreadsClosure; // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access 152 friend class ThreadsSMRSupport; // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access 153 154 ThreadsList* volatile _threads_hazard_ptr; 155 SafeThreadsListPtr* _threads_list_ptr; 156 ThreadsList* cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value); 157 ThreadsList* get_threads_hazard_ptr(); 158 void set_threads_hazard_ptr(ThreadsList* new_list); 159 static bool is_hazard_ptr_tagged(ThreadsList* list) { 160 return (intptr_t(list) & intptr_t(1)) == intptr_t(1); 161 } 162 static ThreadsList* tag_hazard_ptr(ThreadsList* list) { 163 return (ThreadsList*)(intptr_t(list) | intptr_t(1)); 164 } 165 static ThreadsList* untag_hazard_ptr(ThreadsList* list) { 166 return (ThreadsList*)(intptr_t(list) & ~intptr_t(1)); 167 } 168 // This field is enabled via -XX:+EnableThreadSMRStatistics: 169 uint _nested_threads_hazard_ptr_cnt; 170 void dec_nested_threads_hazard_ptr_cnt() { 171 assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()"); 172 _nested_threads_hazard_ptr_cnt--; 173 } 174 void inc_nested_threads_hazard_ptr_cnt() { 175 _nested_threads_hazard_ptr_cnt++; 176 } 177 uint nested_threads_hazard_ptr_cnt() { 178 return _nested_threads_hazard_ptr_cnt; 179 } 180 181 public: 182 void* operator new(size_t size) throw() { return allocate(size, true); } 183 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { 184 return allocate(size, false); } 185 void operator delete(void* p); 186 187 protected: 188 static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread); 189 private: 190 191 // *************************************************************** 192 // Suspend and resume support 193 // *************************************************************** 194 // 195 // VM suspend/resume no longer exists - it was once used for various 196 // things including safepoints but was deprecated and finally removed 197 // in Java 7. Because VM suspension was considered "internal" Java-level 198 // suspension was considered "external", and this legacy naming scheme 199 // remains. 200 // 201 // External suspend/resume requests come from JVM_SuspendThread, 202 // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI 203 // ResumeThread. External 204 // suspend requests cause _external_suspend to be set and external 205 // resume requests cause _external_suspend to be cleared. 206 // External suspend requests do not nest on top of other external 207 // suspend requests. The higher level APIs reject suspend requests 208 // for already suspended threads. 209 // 210 // The external_suspend 211 // flag is checked by has_special_runtime_exit_condition() and java thread 212 // will self-suspend when handle_special_runtime_exit_condition() is 213 // called. Most uses of the _thread_blocked state in JavaThreads are 214 // considered the same as being externally suspended; if the blocking 215 // condition lifts, the JavaThread will self-suspend. Other places 216 // where VM checks for external_suspend include: 217 // + mutex granting (do not enter monitors when thread is suspended) 218 // + state transitions from _thread_in_native 219 // 220 // In general, java_suspend() does not wait for an external suspend 221 // request to complete. When it returns, the only guarantee is that 222 // the _external_suspend field is true. 223 // 224 // wait_for_ext_suspend_completion() is used to wait for an external 225 // suspend request to complete. External suspend requests are usually 226 // followed by some other interface call that requires the thread to 227 // be quiescent, e.g., GetCallTrace(). By moving the "wait time" into 228 // the interface that requires quiescence, we give the JavaThread a 229 // chance to self-suspend before we need it to be quiescent. This 230 // improves overall suspend/query performance. 231 // 232 // _suspend_flags controls the behavior of java_ suspend/resume. 233 // It must be set under the protection of SR_lock. Read from the flag is 234 // OK without SR_lock as long as the value is only used as a hint. 235 // (e.g., check _external_suspend first without lock and then recheck 236 // inside SR_lock and finish the suspension) 237 // 238 // _suspend_flags is also overloaded for other "special conditions" so 239 // that a single check indicates whether any special action is needed 240 // eg. for async exceptions. 241 // ------------------------------------------------------------------- 242 // Notes: 243 // 1. The suspend/resume logic no longer uses ThreadState in OSThread 244 // but we still update its value to keep other part of the system (mainly 245 // JVMTI) happy. ThreadState is legacy code (see notes in 246 // osThread.hpp). 247 // 248 // 2. It would be more natural if set_external_suspend() is private and 249 // part of java_suspend(), but that probably would affect the suspend/query 250 // performance. Need more investigation on this. 251 252 // suspend/resume lock: used for self-suspend 253 Monitor* _SR_lock; 254 255 protected: 256 enum SuspendFlags { 257 // NOTE: avoid using the sign-bit as cc generates different test code 258 // when the sign-bit is used, and sometimes incorrectly - see CR 6398077 259 260 _external_suspend = 0x20000000U, // thread is asked to self suspend 261 _ext_suspended = 0x40000000U, // thread has self-suspended 262 _deopt_suspend = 0x10000000U, // thread needs to self suspend for deopt 263 264 _has_async_exception = 0x00000001U, // there is a pending async exception 265 _critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock 266 267 _trace_flag = 0x00000004U // call tracing backend 268 }; 269 270 // various suspension related flags - atomically updated 271 // overloaded for async exception checking in check_special_condition_for_native_trans. 272 volatile uint32_t _suspend_flags; 273 274 private: 275 int _num_nested_signal; 276 277 DEBUG_ONLY(bool _suspendible_thread;) 278 279 public: 280 void enter_signal_handler() { _num_nested_signal++; } 281 void leave_signal_handler() { _num_nested_signal--; } 282 bool is_inside_signal_handler() const { return _num_nested_signal > 0; } 283 284 // Determines if a heap allocation failure will be retried 285 // (e.g., by deoptimizing and re-executing in the interpreter). 286 // In this case, the failed allocation must raise 287 // Universe::out_of_memory_error_retry() and omit side effects 288 // such as JVMTI events and handling -XX:+HeapDumpOnOutOfMemoryError 289 // and -XX:OnOutOfMemoryError. 290 virtual bool in_retryable_allocation() const { return false; } 291 292 #ifdef ASSERT 293 void set_suspendible_thread() { 294 _suspendible_thread = true; 295 } 296 297 void clear_suspendible_thread() { 298 _suspendible_thread = false; 299 } 300 301 bool is_suspendible_thread() { return _suspendible_thread; } 302 #endif 303 304 private: 305 // Active_handles points to a block of handles 306 JNIHandleBlock* _active_handles; 307 308 // One-element thread local free list 309 JNIHandleBlock* _free_handle_block; 310 311 // Point to the last handle mark 312 HandleMark* _last_handle_mark; 313 314 // The parity of the last strong_roots iteration in which this thread was 315 // claimed as a task. 316 int _oops_do_parity; 317 318 // Support for GlobalCounter 319 private: 320 volatile uintx _rcu_counter; 321 public: 322 volatile uintx* get_rcu_counter() { 323 return &_rcu_counter; 324 } 325 326 public: 327 void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; } 328 HandleMark* last_handle_mark() const { return _last_handle_mark; } 329 private: 330 331 #ifdef ASSERT 332 void* _missed_ic_stub_refill_mark; 333 334 public: 335 void* missed_ic_stub_refill_mark() { 336 return _missed_ic_stub_refill_mark; 337 } 338 339 void set_missed_ic_stub_refill_mark(void* mark) { 340 _missed_ic_stub_refill_mark = mark; 341 } 342 #endif 343 344 private: 345 346 // debug support for checking if code does allow safepoints or not 347 // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on 348 // mutex, or blocking on an object synchronizer (Java locking). 349 // If !allow_safepoint(), then an assertion failure will happen in any of the above cases 350 // If !allow_allocation(), then an assertion failure will happen during allocation 351 // (Hence, !allow_safepoint() => !allow_allocation()). 352 // 353 // The two classes NoSafepointVerifier and No_Allocation_Verifier are used to set these counters. 354 // 355 NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen 356 debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops. 357 358 // Used by SkipGCALot class. 359 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot? 360 361 friend class NoAllocVerifier; 362 friend class NoSafepointVerifier; 363 friend class PauseNoSafepointVerifier; 364 friend class GCLocker; 365 366 volatile void* _polling_page; // Thread local polling page 367 368 ThreadLocalAllocBuffer _tlab; // Thread-local eden 369 jlong _allocated_bytes; // Cumulative number of bytes allocated on 370 // the Java heap 371 ThreadHeapSampler _heap_sampler; // For use when sampling the memory. 372 373 ThreadStatisticalInfo _statistical_info; // Statistics about the thread 374 375 JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;) // Thread-local data for jfr 376 377 int _vm_operation_started_count; // VM_Operation support 378 int _vm_operation_completed_count; // VM_Operation support 379 380 ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread 381 // is waiting to lock 382 bool _current_pending_monitor_is_from_java; // locking is from Java code 383 384 // ObjectMonitor on which this thread called Object.wait() 385 ObjectMonitor* _current_waiting_monitor; 386 387 // Private thread-local objectmonitor list - a simple cache organized as a SLL. 388 public: 389 ObjectMonitor* omFreeList; 390 int omFreeCount; // length of omFreeList 391 int omFreeProvision; // reload chunk size 392 ObjectMonitor* omInUseList; // SLL to track monitors in circulation 393 int omInUseCount; // length of omInUseList 394 395 #ifdef ASSERT 396 private: 397 bool _visited_for_critical_count; 398 399 public: 400 void set_visited_for_critical_count(bool z) { _visited_for_critical_count = z; } 401 bool was_visited_for_critical_count() const { return _visited_for_critical_count; } 402 #endif 403 404 public: 405 enum { 406 is_definitely_current_thread = true 407 }; 408 409 // Constructor 410 Thread(); 411 virtual ~Thread() = 0; // Thread is abstract. 412 413 // Manage Thread::current() 414 void initialize_thread_current(); 415 static void clear_thread_current(); // TLS cleanup needed before threads terminate 416 417 protected: 418 // To be implemented by children. 419 virtual void run() = 0; 420 421 public: 422 // invokes <ChildThreadClass>::run(), with common preparations and cleanups. 423 void call_run(); 424 425 // Testers 426 virtual bool is_VM_thread() const { return false; } 427 virtual bool is_Java_thread() const { return false; } 428 virtual bool is_Compiler_thread() const { return false; } 429 virtual bool is_Code_cache_sweeper_thread() const { return false; } 430 virtual bool is_hidden_from_external_view() const { return false; } 431 virtual bool is_jvmti_agent_thread() const { return false; } 432 // True iff the thread can perform GC operations at a safepoint. 433 // Generally will be true only of VM thread and parallel GC WorkGang 434 // threads. 435 virtual bool is_GC_task_thread() const { return false; } 436 virtual bool is_Watcher_thread() const { return false; } 437 virtual bool is_ConcurrentGC_thread() const { return false; } 438 virtual bool is_Named_thread() const { return false; } 439 virtual bool is_Worker_thread() const { return false; } 440 441 // Can this thread make Java upcalls 442 virtual bool can_call_java() const { return false; } 443 444 // Casts 445 virtual WorkerThread* as_Worker_thread() const { return NULL; } 446 447 virtual char* name() const { return (char*)"Unknown thread"; } 448 449 // Returns the current thread (ASSERTS if NULL) 450 static inline Thread* current(); 451 // Returns the current thread, or NULL if not attached 452 static inline Thread* current_or_null(); 453 // Returns the current thread, or NULL if not attached, and is 454 // safe for use from signal-handlers 455 static inline Thread* current_or_null_safe(); 456 457 // Common thread operations 458 #ifdef ASSERT 459 static void check_for_dangling_thread_pointer(Thread *thread); 460 #endif 461 static void set_priority(Thread* thread, ThreadPriority priority); 462 static ThreadPriority get_priority(const Thread* const thread); 463 static void start(Thread* thread); 464 static void interrupt(Thread* thr); 465 static bool is_interrupted(Thread* thr, bool clear_interrupted); 466 467 void set_native_thread_name(const char *name) { 468 assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread"); 469 os::set_native_thread_name(name); 470 } 471 472 ObjectMonitor** omInUseList_addr() { return (ObjectMonitor **)&omInUseList; } 473 Monitor* SR_lock() const { return _SR_lock; } 474 475 bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; } 476 477 inline void set_suspend_flag(SuspendFlags f); 478 inline void clear_suspend_flag(SuspendFlags f); 479 480 inline void set_has_async_exception(); 481 inline void clear_has_async_exception(); 482 483 bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; } 484 485 inline void set_critical_native_unlock(); 486 inline void clear_critical_native_unlock(); 487 488 inline void set_trace_flag(); 489 inline void clear_trace_flag(); 490 491 // Support for Unhandled Oop detection 492 // Add the field for both, fastdebug and debug, builds to keep 493 // Thread's fields layout the same. 494 // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build. 495 #ifdef CHECK_UNHANDLED_OOPS 496 private: 497 UnhandledOops* _unhandled_oops; 498 #elif defined(ASSERT) 499 private: 500 void* _unhandled_oops; 501 #endif 502 #ifdef CHECK_UNHANDLED_OOPS 503 public: 504 UnhandledOops* unhandled_oops() { return _unhandled_oops; } 505 // Mark oop safe for gc. It may be stack allocated but won't move. 506 void allow_unhandled_oop(oop *op) { 507 if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op); 508 } 509 // Clear oops at safepoint so crashes point to unhandled oop violator 510 void clear_unhandled_oops() { 511 if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops(); 512 } 513 #endif // CHECK_UNHANDLED_OOPS 514 515 public: 516 #ifndef PRODUCT 517 bool skip_gcalot() { return _skip_gcalot; } 518 void set_skip_gcalot(bool v) { _skip_gcalot = v; } 519 #endif 520 521 // Installs a pending exception to be inserted later 522 static void send_async_exception(oop thread_oop, oop java_throwable); 523 524 // Resource area 525 ResourceArea* resource_area() const { return _resource_area; } 526 void set_resource_area(ResourceArea* area) { _resource_area = area; } 527 528 OSThread* osthread() const { return _osthread; } 529 void set_osthread(OSThread* thread) { _osthread = thread; } 530 531 // JNI handle support 532 JNIHandleBlock* active_handles() const { return _active_handles; } 533 void set_active_handles(JNIHandleBlock* block) { _active_handles = block; } 534 JNIHandleBlock* free_handle_block() const { return _free_handle_block; } 535 void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; } 536 537 // Internal handle support 538 HandleArea* handle_area() const { return _handle_area; } 539 void set_handle_area(HandleArea* area) { _handle_area = area; } 540 541 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; } 542 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; } 543 544 // Thread-Local Allocation Buffer (TLAB) support 545 ThreadLocalAllocBuffer& tlab() { return _tlab; } 546 void initialize_tlab() { 547 if (UseTLAB) { 548 tlab().initialize(); 549 } 550 } 551 552 jlong allocated_bytes() { return _allocated_bytes; } 553 void set_allocated_bytes(jlong value) { _allocated_bytes = value; } 554 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } 555 inline jlong cooked_allocated_bytes(); 556 557 ThreadHeapSampler& heap_sampler() { return _heap_sampler; } 558 559 ThreadStatisticalInfo& statistical_info() { return _statistical_info; } 560 561 JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;) 562 563 bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; } 564 565 // VM operation support 566 int vm_operation_ticket() { return ++_vm_operation_started_count; } 567 int vm_operation_completed_count() { return _vm_operation_completed_count; } 568 void increment_vm_operation_completed_count() { _vm_operation_completed_count++; } 569 570 // For tracking the heavyweight monitor the thread is pending on. 571 ObjectMonitor* current_pending_monitor() { 572 return _current_pending_monitor; 573 } 574 void set_current_pending_monitor(ObjectMonitor* monitor) { 575 _current_pending_monitor = monitor; 576 } 577 void set_current_pending_monitor_is_from_java(bool from_java) { 578 _current_pending_monitor_is_from_java = from_java; 579 } 580 bool current_pending_monitor_is_from_java() { 581 return _current_pending_monitor_is_from_java; 582 } 583 584 // For tracking the ObjectMonitor on which this thread called Object.wait() 585 ObjectMonitor* current_waiting_monitor() { 586 return _current_waiting_monitor; 587 } 588 void set_current_waiting_monitor(ObjectMonitor* monitor) { 589 _current_waiting_monitor = monitor; 590 } 591 592 // GC support 593 // Apply "f->do_oop" to all root oops in "this". 594 // Used by JavaThread::oops_do. 595 // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames 596 virtual void oops_do(OopClosure* f, CodeBlobClosure* cf); 597 598 // Handles the parallel case for the method below. 599 private: 600 bool claim_oops_do_par_case(int collection_parity); 601 public: 602 // Requires that "collection_parity" is that of the current roots 603 // iteration. If "is_par" is false, sets the parity of "this" to 604 // "collection_parity", and returns "true". If "is_par" is true, 605 // uses an atomic instruction to set the current threads parity to 606 // "collection_parity", if it is not already. Returns "true" iff the 607 // calling thread does the update, this indicates that the calling thread 608 // has claimed the thread's stack as a root groop in the current 609 // collection. 610 bool claim_oops_do(bool is_par, int collection_parity) { 611 if (!is_par) { 612 _oops_do_parity = collection_parity; 613 return true; 614 } else { 615 return claim_oops_do_par_case(collection_parity); 616 } 617 } 618 619 // jvmtiRedefineClasses support 620 void metadata_handles_do(void f(Metadata*)); 621 622 // Used by fast lock support 623 virtual bool is_lock_owned(address adr) const; 624 625 // Check if address is in the stack of the thread (not just for locks). 626 // Warning: the method can only be used on the running thread 627 bool is_in_stack(address adr) const; 628 // Check if address is in the usable part of the stack (excludes protected 629 // guard pages) 630 bool is_in_usable_stack(address adr) const; 631 632 // Sets this thread as starting thread. Returns failure if thread 633 // creation fails due to lack of memory, too many threads etc. 634 bool set_as_starting_thread(); 635 636 protected: 637 // OS data associated with the thread 638 OSThread* _osthread; // Platform-specific thread information 639 640 // Thread local resource area for temporary allocation within the VM 641 ResourceArea* _resource_area; 642 643 DEBUG_ONLY(ResourceMark* _current_resource_mark;) 644 645 // Thread local handle area for allocation of handles within the VM 646 HandleArea* _handle_area; 647 GrowableArray<Metadata*>* _metadata_handles; 648 649 // Support for stack overflow handling, get_thread, etc. 650 address _stack_base; 651 size_t _stack_size; 652 uintptr_t _self_raw_id; // used by get_thread (mutable) 653 int _lgrp_id; 654 655 volatile void** polling_page_addr() { return &_polling_page; } 656 657 public: 658 // Stack overflow support 659 address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } 660 void set_stack_base(address base) { _stack_base = base; } 661 size_t stack_size() const { return _stack_size; } 662 void set_stack_size(size_t size) { _stack_size = size; } 663 address stack_end() const { return stack_base() - stack_size(); } 664 void record_stack_base_and_size(); 665 void register_thread_stack_with_NMT() NOT_NMT_RETURN; 666 667 bool on_local_stack(address adr) const { 668 // QQQ this has knowledge of direction, ought to be a stack method 669 return (_stack_base >= adr && adr >= stack_end()); 670 } 671 672 uintptr_t self_raw_id() { return _self_raw_id; } 673 void set_self_raw_id(uintptr_t value) { _self_raw_id = value; } 674 675 int lgrp_id() const { return _lgrp_id; } 676 void set_lgrp_id(int value) { _lgrp_id = value; } 677 678 // Printing 679 void print_on(outputStream* st, bool print_extended_info) const; 680 virtual void print_on(outputStream* st) const { print_on(st, false); } 681 void print() const { print_on(tty); } 682 virtual void print_on_error(outputStream* st, char* buf, int buflen) const; 683 void print_value_on(outputStream* st) const; 684 685 // Debug-only code 686 #ifdef ASSERT 687 private: 688 // Deadlock detection support for Mutex locks. List of locks own by thread. 689 Monitor* _owned_locks; 690 // Mutex::set_owner_implementation is the only place where _owned_locks is modified, 691 // thus the friendship 692 friend class Mutex; 693 friend class Monitor; 694 695 public: 696 void print_owned_locks_on(outputStream* st) const; 697 void print_owned_locks() const { print_owned_locks_on(tty); } 698 Monitor* owned_locks() const { return _owned_locks; } 699 bool owns_locks() const { return owned_locks() != NULL; } 700 bool owns_locks_but_compiled_lock() const; 701 int oops_do_parity() const { return _oops_do_parity; } 702 703 // Deadlock detection 704 bool allow_allocation() { return _allow_allocation_count == 0; } 705 ResourceMark* current_resource_mark() { return _current_resource_mark; } 706 void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; } 707 #endif 708 709 void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN; 710 711 private: 712 volatile int _jvmti_env_iteration_count; 713 714 public: 715 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; } 716 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; } 717 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; } 718 719 // Code generation 720 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); } 721 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); } 722 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); } 723 724 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); } 725 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); } 726 727 static ByteSize polling_page_offset() { return byte_offset_of(Thread, _polling_page); } 728 729 static ByteSize tlab_start_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::start_offset(); } 730 static ByteSize tlab_end_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::end_offset(); } 731 static ByteSize tlab_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::top_offset(); } 732 static ByteSize tlab_pf_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::pf_top_offset(); } 733 734 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); } 735 736 JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;) 737 738 public: 739 volatile intptr_t _Stalled; 740 volatile int _TypeTag; 741 ParkEvent * _ParkEvent; // for synchronized() 742 ParkEvent * _SleepEvent; // for Thread.sleep 743 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor 744 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease 745 int NativeSyncRecursion; // diagnostic 746 747 volatile int _OnTrap; // Resume-at IP delta 748 jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG 749 jint _hashStateX; // thread-specific hashCode generator state 750 jint _hashStateY; 751 jint _hashStateZ; 752 753 volatile jint rng[4]; // RNG for spin loop 754 755 // Low-level leaf-lock primitives used to implement synchronization 756 // and native monitor-mutex infrastructure. 757 // Not for general synchronization use. 758 static void SpinAcquire(volatile int * Lock, const char * Name); 759 static void SpinRelease(volatile int * Lock); 760 static void muxAcquire(volatile intptr_t * Lock, const char * Name); 761 static void muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev); 762 static void muxRelease(volatile intptr_t * Lock); 763 }; 764 765 // Inline implementation of Thread::current() 766 inline Thread* Thread::current() { 767 Thread* current = current_or_null(); 768 assert(current != NULL, "Thread::current() called on detached thread"); 769 return current; 770 } 771 772 inline Thread* Thread::current_or_null() { 773 #ifndef USE_LIBRARY_BASED_TLS_ONLY 774 return _thr_current; 775 #else 776 if (ThreadLocalStorage::is_initialized()) { 777 return ThreadLocalStorage::thread(); 778 } 779 return NULL; 780 #endif 781 } 782 783 inline Thread* Thread::current_or_null_safe() { 784 if (ThreadLocalStorage::is_initialized()) { 785 return ThreadLocalStorage::thread(); 786 } 787 return NULL; 788 } 789 790 class NonJavaThread: public Thread { 791 friend class VMStructs; 792 793 NonJavaThread* volatile _next; 794 795 class List; 796 static List _the_list; 797 798 public: 799 NonJavaThread(); 800 ~NonJavaThread(); 801 802 class Iterator; 803 }; 804 805 // Provides iteration over the list of NonJavaThreads. Because list 806 // management occurs in the NonJavaThread constructor and destructor, 807 // entries in the list may not be fully constructed instances of a 808 // derived class. Threads created after an iterator is constructed 809 // will not be visited by the iterator. The scope of an iterator is a 810 // critical section; there must be no safepoint checks in that scope. 811 class NonJavaThread::Iterator : public StackObj { 812 uint _protect_enter; 813 NonJavaThread* _current; 814 815 // Noncopyable. 816 Iterator(const Iterator&); 817 Iterator& operator=(const Iterator&); 818 819 public: 820 Iterator(); 821 ~Iterator(); 822 823 bool end() const { return _current == NULL; } 824 NonJavaThread* current() const { return _current; } 825 void step(); 826 }; 827 828 // Name support for threads. non-JavaThread subclasses with multiple 829 // uniquely named instances should derive from this. 830 class NamedThread: public NonJavaThread { 831 friend class VMStructs; 832 enum { 833 max_name_len = 64 834 }; 835 private: 836 char* _name; 837 // log JavaThread being processed by oops_do 838 JavaThread* _processed_thread; 839 uint _gc_id; // The current GC id when a thread takes part in GC 840 841 public: 842 NamedThread(); 843 ~NamedThread(); 844 // May only be called once per thread. 845 void set_name(const char* format, ...) ATTRIBUTE_PRINTF(2, 3); 846 void initialize_named_thread(); 847 virtual bool is_Named_thread() const { return true; } 848 virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; } 849 JavaThread *processed_thread() { return _processed_thread; } 850 void set_processed_thread(JavaThread *thread) { _processed_thread = thread; } 851 virtual void print_on(outputStream* st) const; 852 853 void set_gc_id(uint gc_id) { _gc_id = gc_id; } 854 uint gc_id() { return _gc_id; } 855 }; 856 857 // Worker threads are named and have an id of an assigned work. 858 class WorkerThread: public NamedThread { 859 private: 860 uint _id; 861 public: 862 WorkerThread() : _id(0) { } 863 virtual bool is_Worker_thread() const { return true; } 864 865 virtual WorkerThread* as_Worker_thread() const { 866 assert(is_Worker_thread(), "Dubious cast to WorkerThread*?"); 867 return (WorkerThread*) this; 868 } 869 870 void set_id(uint work_id) { _id = work_id; } 871 uint id() const { return _id; } 872 }; 873 874 // A single WatcherThread is used for simulating timer interrupts. 875 class WatcherThread: public NonJavaThread { 876 friend class VMStructs; 877 public: 878 virtual void run(); 879 880 private: 881 static WatcherThread* _watcher_thread; 882 883 static bool _startable; 884 // volatile due to at least one lock-free read 885 volatile static bool _should_terminate; 886 public: 887 enum SomeConstants { 888 delay_interval = 10 // interrupt delay in milliseconds 889 }; 890 891 // Constructor 892 WatcherThread(); 893 894 // No destruction allowed 895 ~WatcherThread() { 896 guarantee(false, "WatcherThread deletion must fix the race with VM termination"); 897 } 898 899 // Tester 900 bool is_Watcher_thread() const { return true; } 901 902 // Printing 903 char* name() const { return (char*)"VM Periodic Task Thread"; } 904 void print_on(outputStream* st) const; 905 void unpark(); 906 907 // Returns the single instance of WatcherThread 908 static WatcherThread* watcher_thread() { return _watcher_thread; } 909 910 // Create and start the single instance of WatcherThread, or stop it on shutdown 911 static void start(); 912 static void stop(); 913 // Only allow start once the VM is sufficiently initialized 914 // Otherwise the first task to enroll will trigger the start 915 static void make_startable(); 916 private: 917 int sleep() const; 918 }; 919 920 921 class CompilerThread; 922 923 typedef void (*ThreadFunction)(JavaThread*, TRAPS); 924 925 class JavaThread: public Thread { 926 friend class VMStructs; 927 friend class JVMCIVMStructs; 928 friend class WhiteBox; 929 private: 930 JavaThread* _next; // The next thread in the Threads list 931 bool _on_thread_list; // Is set when this JavaThread is added to the Threads list 932 oop _threadObj; // The Java level thread object 933 934 #ifdef ASSERT 935 private: 936 int _java_call_counter; 937 938 public: 939 int java_call_counter() { return _java_call_counter; } 940 void inc_java_call_counter() { _java_call_counter++; } 941 void dec_java_call_counter() { 942 assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper"); 943 _java_call_counter--; 944 } 945 private: // restore original namespace restriction 946 #endif // ifdef ASSERT 947 948 #ifndef PRODUCT 949 public: 950 enum { 951 jump_ring_buffer_size = 16 952 }; 953 private: // restore original namespace restriction 954 #endif 955 956 JavaFrameAnchor _anchor; // Encapsulation of current java frame and it state 957 958 ThreadFunction _entry_point; 959 960 JNIEnv _jni_environment; 961 962 // Deopt support 963 DeoptResourceMark* _deopt_mark; // Holds special ResourceMark for deoptimization 964 965 intptr_t* _must_deopt_id; // id of frame that needs to be deopted once we 966 // transition out of native 967 CompiledMethod* _deopt_nmethod; // CompiledMethod that is currently being deoptimized 968 vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays 969 vframeArray* _vframe_array_last; // Holds last vFrameArray we popped 970 // Because deoptimization is lazy we must save jvmti requests to set locals 971 // in compiled frames until we deoptimize and we have an interpreter frame. 972 // This holds the pointer to array (yeah like there might be more than one) of 973 // description of compiled vframes that have locals that need to be updated. 974 GrowableArray<jvmtiDeferredLocalVariableSet*>* _deferred_locals_updates; 975 976 // Handshake value for fixing 6243940. We need a place for the i2c 977 // adapter to store the callee Method*. This value is NEVER live 978 // across a gc point so it does NOT have to be gc'd 979 // The handshake is open ended since we can't be certain that it will 980 // be NULLed. This is because we rarely ever see the race and end up 981 // in handle_wrong_method which is the backend of the handshake. See 982 // code in i2c adapters and handle_wrong_method. 983 984 Method* _callee_target; 985 986 // Used to pass back results to the interpreter or generated code running Java code. 987 oop _vm_result; // oop result is GC-preserved 988 Metadata* _vm_result_2; // non-oop result 989 990 // See ReduceInitialCardMarks: this holds the precise space interval of 991 // the most recent slow path allocation for which compiled code has 992 // elided card-marks for performance along the fast-path. 993 MemRegion _deferred_card_mark; 994 995 MonitorChunk* _monitor_chunks; // Contains the off stack monitors 996 // allocated during deoptimization 997 // and by JNI_MonitorEnter/Exit 998 999 // Async. requests support 1000 enum AsyncRequests { 1001 _no_async_condition = 0, 1002 _async_exception, 1003 _async_unsafe_access_error 1004 }; 1005 AsyncRequests _special_runtime_exit_condition; // Enum indicating pending async. request 1006 oop _pending_async_exception; 1007 1008 // Safepoint support 1009 public: // Expose _thread_state for SafeFetchInt() 1010 volatile JavaThreadState _thread_state; 1011 private: 1012 ThreadSafepointState* _safepoint_state; // Holds information about a thread during a safepoint 1013 address _saved_exception_pc; // Saved pc of instruction where last implicit exception happened 1014 1015 // JavaThread termination support 1016 enum TerminatedTypes { 1017 _not_terminated = 0xDEAD - 2, 1018 _thread_exiting, // JavaThread::exit() has been called for this thread 1019 _thread_terminated, // JavaThread is removed from thread list 1020 _vm_exited // JavaThread is still executing native code, but VM is terminated 1021 // only VM_Exit can set _vm_exited 1022 }; 1023 1024 // In general a JavaThread's _terminated field transitions as follows: 1025 // 1026 // _not_terminated => _thread_exiting => _thread_terminated 1027 // 1028 // _vm_exited is a special value to cover the case of a JavaThread 1029 // executing native code after the VM itself is terminated. 1030 volatile TerminatedTypes _terminated; 1031 // suspend/resume support 1032 volatile bool _suspend_equivalent; // Suspend equivalent condition 1033 jint _in_deopt_handler; // count of deoptimization 1034 // handlers thread is in 1035 volatile bool _doing_unsafe_access; // Thread may fault due to unsafe access 1036 bool _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was 1037 // never locked) when throwing an exception. Used by interpreter only. 1038 1039 // JNI attach states: 1040 enum JNIAttachStates { 1041 _not_attaching_via_jni = 1, // thread is not attaching via JNI 1042 _attaching_via_jni, // thread is attaching via JNI 1043 _attached_via_jni // thread has attached via JNI 1044 }; 1045 1046 // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni. 1047 // A native thread that is attaching via JNI starts with a value 1048 // of _attaching_via_jni and transitions to _attached_via_jni. 1049 volatile JNIAttachStates _jni_attach_state; 1050 1051 public: 1052 // State of the stack guard pages for this thread. 1053 enum StackGuardState { 1054 stack_guard_unused, // not needed 1055 stack_guard_reserved_disabled, 1056 stack_guard_yellow_reserved_disabled,// disabled (temporarily) after stack overflow 1057 stack_guard_enabled // enabled 1058 }; 1059 1060 private: 1061 1062 #if INCLUDE_JVMCI 1063 // The _pending_* fields below are used to communicate extra information 1064 // from an uncommon trap in JVMCI compiled code to the uncommon trap handler. 1065 1066 // Communicates the DeoptReason and DeoptAction of the uncommon trap 1067 int _pending_deoptimization; 1068 1069 // Specifies whether the uncommon trap is to bci 0 of a synchronized method 1070 // before the monitor has been acquired. 1071 bool _pending_monitorenter; 1072 1073 // Specifies if the DeoptReason for the last uncommon trap was Reason_transfer_to_interpreter 1074 bool _pending_transfer_to_interpreter; 1075 1076 // Guard for re-entrant call to JVMCIRuntime::adjust_comp_level 1077 bool _adjusting_comp_level; 1078 1079 // True if in a runtime call from compiled code that will deoptimize 1080 // and re-execute a failed heap allocation in the interpreter. 1081 bool _in_retryable_allocation; 1082 1083 // An id of a speculation that JVMCI compiled code can use to further describe and 1084 // uniquely identify the speculative optimization guarded by the uncommon trap 1085 long _pending_failed_speculation; 1086 1087 // These fields are mutually exclusive in terms of live ranges. 1088 union { 1089 // Communicates the pc at which the most recent implicit exception occurred 1090 // from the signal handler to a deoptimization stub. 1091 address _implicit_exception_pc; 1092 1093 // Communicates an alternative call target to an i2c stub from a JavaCall . 1094 address _alternate_call_target; 1095 } _jvmci; 1096 1097 // Support for high precision, thread sensitive counters in JVMCI compiled code. 1098 jlong* _jvmci_counters; 1099 1100 public: 1101 static jlong* _jvmci_old_thread_counters; 1102 static void collect_counters(typeArrayOop array); 1103 private: 1104 #endif // INCLUDE_JVMCI 1105 1106 StackGuardState _stack_guard_state; 1107 1108 // Precompute the limit of the stack as used in stack overflow checks. 1109 // We load it from here to simplify the stack overflow check in assembly. 1110 address _stack_overflow_limit; 1111 address _reserved_stack_activation; 1112 1113 // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is 1114 // used to temp. parsing values into and out of the runtime system during exception handling for compiled 1115 // code) 1116 volatile oop _exception_oop; // Exception thrown in compiled code 1117 volatile address _exception_pc; // PC where exception happened 1118 volatile address _exception_handler_pc; // PC for handler of exception 1119 volatile int _is_method_handle_return; // true (== 1) if the current exception PC is a MethodHandle call site. 1120 1121 private: 1122 // support for JNI critical regions 1123 jint _jni_active_critical; // count of entries into JNI critical region 1124 1125 // Checked JNI: function name requires exception check 1126 char* _pending_jni_exception_check_fn; 1127 1128 // For deadlock detection. 1129 int _depth_first_number; 1130 1131 // JVMTI PopFrame support 1132 // This is set to popframe_pending to signal that top Java frame should be popped immediately 1133 int _popframe_condition; 1134 1135 // If reallocation of scalar replaced objects fails, we throw OOM 1136 // and during exception propagation, pop the top 1137 // _frames_to_pop_failed_realloc frames, the ones that reference 1138 // failed reallocations. 1139 int _frames_to_pop_failed_realloc; 1140 1141 #ifndef PRODUCT 1142 int _jmp_ring_index; 1143 struct { 1144 // We use intptr_t instead of address so debugger doesn't try and display strings 1145 intptr_t _target; 1146 intptr_t _instruction; 1147 const char* _file; 1148 int _line; 1149 } _jmp_ring[jump_ring_buffer_size]; 1150 #endif // PRODUCT 1151 1152 friend class VMThread; 1153 friend class ThreadWaitTransition; 1154 friend class VM_Exit; 1155 1156 void initialize(); // Initialized the instance variables 1157 1158 public: 1159 // Constructor 1160 JavaThread(bool is_attaching_via_jni = false); // for main thread and JNI attached threads 1161 JavaThread(ThreadFunction entry_point, size_t stack_size = 0); 1162 ~JavaThread(); 1163 1164 #ifdef ASSERT 1165 // verify this JavaThread hasn't be published in the Threads::list yet 1166 void verify_not_published(); 1167 #endif 1168 1169 //JNI functiontable getter/setter for JVMTI jni function table interception API. 1170 void set_jni_functions(struct JNINativeInterface_* functionTable) { 1171 _jni_environment.functions = functionTable; 1172 } 1173 struct JNINativeInterface_* get_jni_functions() { 1174 return (struct JNINativeInterface_ *)_jni_environment.functions; 1175 } 1176 1177 // This function is called at thread creation to allow 1178 // platform specific thread variables to be initialized. 1179 void cache_global_variables(); 1180 1181 // Executes Shutdown.shutdown() 1182 void invoke_shutdown_hooks(); 1183 1184 // Cleanup on thread exit 1185 enum ExitType { 1186 normal_exit, 1187 jni_detach 1188 }; 1189 void exit(bool destroy_vm, ExitType exit_type = normal_exit); 1190 1191 void cleanup_failed_attach_current_thread(); 1192 1193 // Testers 1194 virtual bool is_Java_thread() const { return true; } 1195 virtual bool can_call_java() const { return true; } 1196 1197 // Thread chain operations 1198 JavaThread* next() const { return _next; } 1199 void set_next(JavaThread* p) { _next = p; } 1200 1201 // Thread oop. threadObj() can be NULL for initial JavaThread 1202 // (or for threads attached via JNI) 1203 oop threadObj() const { return _threadObj; } 1204 void set_threadObj(oop p) { _threadObj = p; } 1205 1206 ThreadPriority java_priority() const; // Read from threadObj() 1207 1208 // Prepare thread and add to priority queue. If a priority is 1209 // not specified, use the priority of the thread object. Threads_lock 1210 // must be held while this function is called. 1211 void prepare(jobject jni_thread, ThreadPriority prio=NoPriority); 1212 1213 void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; } 1214 address saved_exception_pc() { return _saved_exception_pc; } 1215 1216 1217 ThreadFunction entry_point() const { return _entry_point; } 1218 1219 // Allocates a new Java level thread object for this thread. thread_name may be NULL. 1220 void allocate_threadObj(Handle thread_group, const char* thread_name, bool daemon, TRAPS); 1221 1222 // Last frame anchor routines 1223 1224 JavaFrameAnchor* frame_anchor(void) { return &_anchor; } 1225 1226 // last_Java_sp 1227 bool has_last_Java_frame() const { return _anchor.has_last_Java_frame(); } 1228 intptr_t* last_Java_sp() const { return _anchor.last_Java_sp(); } 1229 1230 // last_Java_pc 1231 1232 address last_Java_pc(void) { return _anchor.last_Java_pc(); } 1233 1234 // Safepoint support 1235 #if !(defined(PPC64) || defined(AARCH64)) 1236 JavaThreadState thread_state() const { return _thread_state; } 1237 void set_thread_state(JavaThreadState s) { _thread_state = s; } 1238 #else 1239 // Use membars when accessing volatile _thread_state. See 1240 // Threads::create_vm() for size checks. 1241 inline JavaThreadState thread_state() const; 1242 inline void set_thread_state(JavaThreadState s); 1243 #endif 1244 inline ThreadSafepointState* safepoint_state() const; 1245 inline void set_safepoint_state(ThreadSafepointState* state); 1246 inline bool is_at_poll_safepoint(); 1247 1248 // JavaThread termination and lifecycle support: 1249 void smr_delete(); 1250 bool on_thread_list() const { return _on_thread_list; } 1251 void set_on_thread_list() { _on_thread_list = true; } 1252 1253 // thread has called JavaThread::exit() or is terminated 1254 bool is_exiting() const; 1255 // thread is terminated (no longer on the threads list); we compare 1256 // against the two non-terminated values so that a freed JavaThread 1257 // will also be considered terminated. 1258 bool check_is_terminated(TerminatedTypes l_terminated) const { 1259 return l_terminated != _not_terminated && l_terminated != _thread_exiting; 1260 } 1261 bool is_terminated() const; 1262 void set_terminated(TerminatedTypes t); 1263 // special for Threads::remove() which is static: 1264 void set_terminated_value(); 1265 void block_if_vm_exited(); 1266 1267 bool doing_unsafe_access() { return _doing_unsafe_access; } 1268 void set_doing_unsafe_access(bool val) { _doing_unsafe_access = val; } 1269 1270 bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; } 1271 void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; } 1272 1273 inline void set_polling_page_release(void* poll_value); 1274 inline void set_polling_page(void* poll_value); 1275 inline volatile void* get_polling_page(); 1276 1277 private: 1278 // Support for thread handshake operations 1279 HandshakeState _handshake; 1280 public: 1281 void set_handshake_operation(HandshakeOperation* op) { 1282 _handshake.set_operation(this, op); 1283 } 1284 1285 bool has_handshake() const { 1286 return _handshake.has_operation(); 1287 } 1288 1289 void handshake_process_by_self() { 1290 _handshake.process_by_self(this); 1291 } 1292 1293 void handshake_process_by_vmthread() { 1294 _handshake.process_by_vmthread(this); 1295 } 1296 1297 // Suspend/resume support for JavaThread 1298 private: 1299 inline void set_ext_suspended(); 1300 inline void clear_ext_suspended(); 1301 1302 public: 1303 void java_suspend(); 1304 void java_resume(); 1305 int java_suspend_self(); 1306 1307 void check_and_wait_while_suspended() { 1308 assert(JavaThread::current() == this, "sanity check"); 1309 1310 bool do_self_suspend; 1311 do { 1312 // were we externally suspended while we were waiting? 1313 do_self_suspend = handle_special_suspend_equivalent_condition(); 1314 if (do_self_suspend) { 1315 // don't surprise the thread that suspended us by returning 1316 java_suspend_self(); 1317 set_suspend_equivalent(); 1318 } 1319 } while (do_self_suspend); 1320 } 1321 static void check_safepoint_and_suspend_for_native_trans(JavaThread *thread); 1322 // Check for async exception in addition to safepoint and suspend request. 1323 static void check_special_condition_for_native_trans(JavaThread *thread); 1324 1325 // Same as check_special_condition_for_native_trans but finishes the 1326 // transition into thread_in_Java mode so that it can potentially 1327 // block. 1328 static void check_special_condition_for_native_trans_and_transition(JavaThread *thread); 1329 1330 bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits); 1331 bool is_ext_suspend_completed_with_lock(uint32_t *bits) { 1332 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1333 // Warning: is_ext_suspend_completed() may temporarily drop the 1334 // SR_lock to allow the thread to reach a stable thread state if 1335 // it is currently in a transient thread state. 1336 return is_ext_suspend_completed(false /* !called_by_wait */, 1337 SuspendRetryDelay, bits); 1338 } 1339 1340 // We cannot allow wait_for_ext_suspend_completion() to run forever or 1341 // we could hang. SuspendRetryCount and SuspendRetryDelay are normally 1342 // passed as the count and delay parameters. Experiments with specific 1343 // calls to wait_for_ext_suspend_completion() can be done by passing 1344 // other values in the code. Experiments with all calls can be done 1345 // via the appropriate -XX options. 1346 bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits); 1347 1348 // test for suspend - most (all?) of these should go away 1349 bool is_thread_fully_suspended(bool wait_for_suspend, uint32_t *bits); 1350 1351 inline void set_external_suspend(); 1352 inline void clear_external_suspend(); 1353 1354 inline void set_deopt_suspend(); 1355 inline void clear_deopt_suspend(); 1356 bool is_deopt_suspend() { return (_suspend_flags & _deopt_suspend) != 0; } 1357 1358 bool is_external_suspend() const { 1359 return (_suspend_flags & _external_suspend) != 0; 1360 } 1361 // Whenever a thread transitions from native to vm/java it must suspend 1362 // if external|deopt suspend is present. 1363 bool is_suspend_after_native() const { 1364 return (_suspend_flags & (_external_suspend | _deopt_suspend)) != 0; 1365 } 1366 1367 // external suspend request is completed 1368 bool is_ext_suspended() const { 1369 return (_suspend_flags & _ext_suspended) != 0; 1370 } 1371 1372 bool is_external_suspend_with_lock() const { 1373 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1374 return is_external_suspend(); 1375 } 1376 1377 // Special method to handle a pending external suspend request 1378 // when a suspend equivalent condition lifts. 1379 bool handle_special_suspend_equivalent_condition() { 1380 assert(is_suspend_equivalent(), 1381 "should only be called in a suspend equivalence condition"); 1382 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1383 bool ret = is_external_suspend(); 1384 if (!ret) { 1385 // not about to self-suspend so clear suspend equivalence 1386 clear_suspend_equivalent(); 1387 } 1388 // implied else: 1389 // We have a pending external suspend request so we leave the 1390 // suspend_equivalent flag set until java_suspend_self() sets 1391 // the ext_suspended flag and clears the suspend_equivalent 1392 // flag. This insures that wait_for_ext_suspend_completion() 1393 // will return consistent values. 1394 return ret; 1395 } 1396 1397 // utility methods to see if we are doing some kind of suspension 1398 bool is_being_ext_suspended() const { 1399 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1400 return is_ext_suspended() || is_external_suspend(); 1401 } 1402 1403 bool is_suspend_equivalent() const { return _suspend_equivalent; } 1404 1405 void set_suspend_equivalent() { _suspend_equivalent = true; } 1406 void clear_suspend_equivalent() { _suspend_equivalent = false; } 1407 1408 // Thread.stop support 1409 void send_thread_stop(oop throwable); 1410 AsyncRequests clear_special_runtime_exit_condition() { 1411 AsyncRequests x = _special_runtime_exit_condition; 1412 _special_runtime_exit_condition = _no_async_condition; 1413 return x; 1414 } 1415 1416 // Are any async conditions present? 1417 bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); } 1418 1419 void check_and_handle_async_exceptions(bool check_unsafe_error = true); 1420 1421 // these next two are also used for self-suspension and async exception support 1422 void handle_special_runtime_exit_condition(bool check_asyncs = true); 1423 1424 // Return true if JavaThread has an asynchronous condition or 1425 // if external suspension is requested. 1426 bool has_special_runtime_exit_condition() { 1427 // Because we don't use is_external_suspend_with_lock 1428 // it is possible that we won't see an asynchronous external suspend 1429 // request that has just gotten started, i.e., SR_lock grabbed but 1430 // _external_suspend field change either not made yet or not visible 1431 // yet. However, this is okay because the request is asynchronous and 1432 // we will see the new flag value the next time through. It's also 1433 // possible that the external suspend request is dropped after 1434 // we have checked is_external_suspend(), we will recheck its value 1435 // under SR_lock in java_suspend_self(). 1436 return (_special_runtime_exit_condition != _no_async_condition) || 1437 is_external_suspend() || is_trace_suspend(); 1438 } 1439 1440 void set_pending_unsafe_access_error() { _special_runtime_exit_condition = _async_unsafe_access_error; } 1441 1442 inline void set_pending_async_exception(oop e); 1443 1444 // Fast-locking support 1445 bool is_lock_owned(address adr) const; 1446 1447 // Accessors for vframe array top 1448 // The linked list of vframe arrays are sorted on sp. This means when we 1449 // unpack the head must contain the vframe array to unpack. 1450 void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; } 1451 vframeArray* vframe_array_head() const { return _vframe_array_head; } 1452 1453 // Side structure for deferring update of java frame locals until deopt occurs 1454 GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; } 1455 void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; } 1456 1457 // These only really exist to make debugging deopt problems simpler 1458 1459 void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; } 1460 vframeArray* vframe_array_last() const { return _vframe_array_last; } 1461 1462 // The special resourceMark used during deoptimization 1463 1464 void set_deopt_mark(DeoptResourceMark* value) { _deopt_mark = value; } 1465 DeoptResourceMark* deopt_mark(void) { return _deopt_mark; } 1466 1467 intptr_t* must_deopt_id() { return _must_deopt_id; } 1468 void set_must_deopt_id(intptr_t* id) { _must_deopt_id = id; } 1469 void clear_must_deopt_id() { _must_deopt_id = NULL; } 1470 1471 void set_deopt_compiled_method(CompiledMethod* nm) { _deopt_nmethod = nm; } 1472 CompiledMethod* deopt_compiled_method() { return _deopt_nmethod; } 1473 1474 Method* callee_target() const { return _callee_target; } 1475 void set_callee_target (Method* x) { _callee_target = x; } 1476 1477 // Oop results of vm runtime calls 1478 oop vm_result() const { return _vm_result; } 1479 void set_vm_result (oop x) { _vm_result = x; } 1480 1481 Metadata* vm_result_2() const { return _vm_result_2; } 1482 void set_vm_result_2 (Metadata* x) { _vm_result_2 = x; } 1483 1484 MemRegion deferred_card_mark() const { return _deferred_card_mark; } 1485 void set_deferred_card_mark(MemRegion mr) { _deferred_card_mark = mr; } 1486 1487 #if INCLUDE_JVMCI 1488 int pending_deoptimization() const { return _pending_deoptimization; } 1489 long pending_failed_speculation() const { return _pending_failed_speculation; } 1490 bool adjusting_comp_level() const { return _adjusting_comp_level; } 1491 void set_adjusting_comp_level(bool b) { _adjusting_comp_level = b; } 1492 bool has_pending_monitorenter() const { return _pending_monitorenter; } 1493 void set_pending_monitorenter(bool b) { _pending_monitorenter = b; } 1494 void set_pending_deoptimization(int reason) { _pending_deoptimization = reason; } 1495 void set_pending_failed_speculation(long failed_speculation) { _pending_failed_speculation = failed_speculation; } 1496 void set_pending_transfer_to_interpreter(bool b) { _pending_transfer_to_interpreter = b; } 1497 void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == NULL, "must be"); _jvmci._alternate_call_target = a; } 1498 void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == NULL, "must be"); _jvmci._implicit_exception_pc = a; } 1499 1500 virtual bool in_retryable_allocation() const { return _in_retryable_allocation; } 1501 void set_in_retryable_allocation(bool b) { _in_retryable_allocation = b; } 1502 #endif // INCLUDE_JVMCI 1503 1504 // Exception handling for compiled methods 1505 oop exception_oop() const { return _exception_oop; } 1506 address exception_pc() const { return _exception_pc; } 1507 address exception_handler_pc() const { return _exception_handler_pc; } 1508 bool is_method_handle_return() const { return _is_method_handle_return == 1; } 1509 1510 void set_exception_oop(oop o) { (void)const_cast<oop&>(_exception_oop = o); } 1511 void set_exception_pc(address a) { _exception_pc = a; } 1512 void set_exception_handler_pc(address a) { _exception_handler_pc = a; } 1513 void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; } 1514 1515 void clear_exception_oop_and_pc() { 1516 set_exception_oop(NULL); 1517 set_exception_pc(NULL); 1518 } 1519 1520 // Stack overflow support 1521 // 1522 // (small addresses) 1523 // 1524 // -- <-- stack_end() --- 1525 // | | 1526 // | red pages | 1527 // | | 1528 // -- <-- stack_red_zone_base() | 1529 // | | 1530 // | guard 1531 // | yellow pages zone 1532 // | | 1533 // | | 1534 // -- <-- stack_yellow_zone_base() | 1535 // | | 1536 // | | 1537 // | reserved pages | 1538 // | | 1539 // -- <-- stack_reserved_zone_base() --- --- 1540 // /|\ shadow <-- stack_overflow_limit() (somewhere in here) 1541 // | zone 1542 // \|/ size 1543 // some untouched memory --- 1544 // 1545 // 1546 // -- 1547 // | 1548 // | shadow zone 1549 // | 1550 // -- 1551 // x frame n 1552 // -- 1553 // x frame n-1 1554 // x 1555 // -- 1556 // ... 1557 // 1558 // -- 1559 // x frame 0 1560 // -- <-- stack_base() 1561 // 1562 // (large addresses) 1563 // 1564 1565 private: 1566 // These values are derived from flags StackRedPages, StackYellowPages, 1567 // StackReservedPages and StackShadowPages. The zone size is determined 1568 // ergonomically if page_size > 4K. 1569 static size_t _stack_red_zone_size; 1570 static size_t _stack_yellow_zone_size; 1571 static size_t _stack_reserved_zone_size; 1572 static size_t _stack_shadow_zone_size; 1573 public: 1574 inline size_t stack_available(address cur_sp); 1575 1576 static size_t stack_red_zone_size() { 1577 assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized."); 1578 return _stack_red_zone_size; 1579 } 1580 static void set_stack_red_zone_size(size_t s) { 1581 assert(is_aligned(s, os::vm_page_size()), 1582 "We can not protect if the red zone size is not page aligned."); 1583 assert(_stack_red_zone_size == 0, "This should be called only once."); 1584 _stack_red_zone_size = s; 1585 } 1586 address stack_red_zone_base() { 1587 return (address)(stack_end() + stack_red_zone_size()); 1588 } 1589 bool in_stack_red_zone(address a) { 1590 return a <= stack_red_zone_base() && a >= stack_end(); 1591 } 1592 1593 static size_t stack_yellow_zone_size() { 1594 assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized."); 1595 return _stack_yellow_zone_size; 1596 } 1597 static void set_stack_yellow_zone_size(size_t s) { 1598 assert(is_aligned(s, os::vm_page_size()), 1599 "We can not protect if the yellow zone size is not page aligned."); 1600 assert(_stack_yellow_zone_size == 0, "This should be called only once."); 1601 _stack_yellow_zone_size = s; 1602 } 1603 1604 static size_t stack_reserved_zone_size() { 1605 // _stack_reserved_zone_size may be 0. This indicates the feature is off. 1606 return _stack_reserved_zone_size; 1607 } 1608 static void set_stack_reserved_zone_size(size_t s) { 1609 assert(is_aligned(s, os::vm_page_size()), 1610 "We can not protect if the reserved zone size is not page aligned."); 1611 assert(_stack_reserved_zone_size == 0, "This should be called only once."); 1612 _stack_reserved_zone_size = s; 1613 } 1614 address stack_reserved_zone_base() { 1615 return (address)(stack_end() + 1616 (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size())); 1617 } 1618 bool in_stack_reserved_zone(address a) { 1619 return (a <= stack_reserved_zone_base()) && 1620 (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size())); 1621 } 1622 1623 static size_t stack_yellow_reserved_zone_size() { 1624 return _stack_yellow_zone_size + _stack_reserved_zone_size; 1625 } 1626 bool in_stack_yellow_reserved_zone(address a) { 1627 return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base()); 1628 } 1629 1630 // Size of red + yellow + reserved zones. 1631 static size_t stack_guard_zone_size() { 1632 return stack_red_zone_size() + stack_yellow_reserved_zone_size(); 1633 } 1634 1635 static size_t stack_shadow_zone_size() { 1636 assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized."); 1637 return _stack_shadow_zone_size; 1638 } 1639 static void set_stack_shadow_zone_size(size_t s) { 1640 // The shadow area is not allocated or protected, so 1641 // it needs not be page aligned. 1642 // But the stack bang currently assumes that it is a 1643 // multiple of page size. This guarantees that the bang 1644 // loop touches all pages in the shadow zone. 1645 // This can be guaranteed differently, as well. E.g., if 1646 // the page size is a multiple of 4K, banging in 4K steps 1647 // suffices to touch all pages. (Some pages are banged 1648 // several times, though.) 1649 assert(is_aligned(s, os::vm_page_size()), 1650 "Stack bang assumes multiple of page size."); 1651 assert(_stack_shadow_zone_size == 0, "This should be called only once."); 1652 _stack_shadow_zone_size = s; 1653 } 1654 1655 void create_stack_guard_pages(); 1656 void remove_stack_guard_pages(); 1657 1658 void enable_stack_reserved_zone(); 1659 void disable_stack_reserved_zone(); 1660 void enable_stack_yellow_reserved_zone(); 1661 void disable_stack_yellow_reserved_zone(); 1662 void enable_stack_red_zone(); 1663 void disable_stack_red_zone(); 1664 1665 inline bool stack_guard_zone_unused(); 1666 inline bool stack_yellow_reserved_zone_disabled(); 1667 inline bool stack_reserved_zone_disabled(); 1668 inline bool stack_guards_enabled(); 1669 1670 address reserved_stack_activation() const { return _reserved_stack_activation; } 1671 void set_reserved_stack_activation(address addr) { 1672 assert(_reserved_stack_activation == stack_base() 1673 || _reserved_stack_activation == NULL 1674 || addr == stack_base(), "Must not be set twice"); 1675 _reserved_stack_activation = addr; 1676 } 1677 1678 // Attempt to reguard the stack after a stack overflow may have occurred. 1679 // Returns true if (a) guard pages are not needed on this thread, (b) the 1680 // pages are already guarded, or (c) the pages were successfully reguarded. 1681 // Returns false if there is not enough stack space to reguard the pages, in 1682 // which case the caller should unwind a frame and try again. The argument 1683 // should be the caller's (approximate) sp. 1684 bool reguard_stack(address cur_sp); 1685 // Similar to above but see if current stackpoint is out of the guard area 1686 // and reguard if possible. 1687 bool reguard_stack(void); 1688 1689 address stack_overflow_limit() { return _stack_overflow_limit; } 1690 void set_stack_overflow_limit() { 1691 _stack_overflow_limit = 1692 stack_end() + MAX2(JavaThread::stack_guard_zone_size(), JavaThread::stack_shadow_zone_size()); 1693 } 1694 1695 // Misc. accessors/mutators 1696 void set_do_not_unlock(void) { _do_not_unlock_if_synchronized = true; } 1697 void clr_do_not_unlock(void) { _do_not_unlock_if_synchronized = false; } 1698 bool do_not_unlock(void) { return _do_not_unlock_if_synchronized; } 1699 1700 #ifndef PRODUCT 1701 void record_jump(address target, address instr, const char* file, int line); 1702 #endif // PRODUCT 1703 1704 // For assembly stub generation 1705 static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj); } 1706 #ifndef PRODUCT 1707 static ByteSize jmp_ring_index_offset() { return byte_offset_of(JavaThread, _jmp_ring_index); } 1708 static ByteSize jmp_ring_offset() { return byte_offset_of(JavaThread, _jmp_ring); } 1709 #endif // PRODUCT 1710 static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment); } 1711 static ByteSize pending_jni_exception_check_fn_offset() { 1712 return byte_offset_of(JavaThread, _pending_jni_exception_check_fn); 1713 } 1714 static ByteSize last_Java_sp_offset() { 1715 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset(); 1716 } 1717 static ByteSize last_Java_pc_offset() { 1718 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset(); 1719 } 1720 static ByteSize frame_anchor_offset() { 1721 return byte_offset_of(JavaThread, _anchor); 1722 } 1723 static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target); } 1724 static ByteSize vm_result_offset() { return byte_offset_of(JavaThread, _vm_result); } 1725 static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2); } 1726 static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); } 1727 static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); } 1728 static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); } 1729 #if INCLUDE_JVMCI 1730 static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); } 1731 static ByteSize pending_monitorenter_offset() { return byte_offset_of(JavaThread, _pending_monitorenter); } 1732 static ByteSize pending_failed_speculation_offset() { return byte_offset_of(JavaThread, _pending_failed_speculation); } 1733 static ByteSize jvmci_alternate_call_target_offset() { return byte_offset_of(JavaThread, _jvmci._alternate_call_target); } 1734 static ByteSize jvmci_implicit_exception_pc_offset() { return byte_offset_of(JavaThread, _jvmci._implicit_exception_pc); } 1735 static ByteSize jvmci_counters_offset() { return byte_offset_of(JavaThread, _jvmci_counters); } 1736 #endif // INCLUDE_JVMCI 1737 static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop); } 1738 static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc); } 1739 static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); } 1740 static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); } 1741 static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); } 1742 static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); } 1743 static ByteSize reserved_stack_activation_offset() { return byte_offset_of(JavaThread, _reserved_stack_activation); } 1744 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); } 1745 1746 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); } 1747 static ByteSize should_post_on_exceptions_flag_offset() { 1748 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag); 1749 } 1750 1751 // Returns the jni environment for this thread 1752 JNIEnv* jni_environment() { return &_jni_environment; } 1753 1754 static JavaThread* thread_from_jni_environment(JNIEnv* env) { 1755 JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset())); 1756 // Only return NULL if thread is off the thread list; starting to 1757 // exit should not return NULL. 1758 if (thread_from_jni_env->is_terminated()) { 1759 thread_from_jni_env->block_if_vm_exited(); 1760 return NULL; 1761 } else { 1762 return thread_from_jni_env; 1763 } 1764 } 1765 1766 // JNI critical regions. These can nest. 1767 bool in_critical() { return _jni_active_critical > 0; } 1768 bool in_last_critical() { return _jni_active_critical == 1; } 1769 inline void enter_critical(); 1770 void exit_critical() { 1771 assert(Thread::current() == this, "this must be current thread"); 1772 _jni_active_critical--; 1773 assert(_jni_active_critical >= 0, "JNI critical nesting problem?"); 1774 } 1775 1776 // Checked JNI: is the programmer required to check for exceptions, if so specify 1777 // which function name. Returning to a Java frame should implicitly clear the 1778 // pending check, this is done for Native->Java transitions (i.e. user JNI code). 1779 // VM->Java transistions are not cleared, it is expected that JNI code enclosed 1780 // within ThreadToNativeFromVM makes proper exception checks (i.e. VM internal). 1781 bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; } 1782 void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; } 1783 const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; } 1784 void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; } 1785 1786 // For deadlock detection 1787 int depth_first_number() { return _depth_first_number; } 1788 void set_depth_first_number(int dfn) { _depth_first_number = dfn; } 1789 1790 private: 1791 void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; } 1792 1793 public: 1794 MonitorChunk* monitor_chunks() const { return _monitor_chunks; } 1795 void add_monitor_chunk(MonitorChunk* chunk); 1796 void remove_monitor_chunk(MonitorChunk* chunk); 1797 bool in_deopt_handler() const { return _in_deopt_handler > 0; } 1798 void inc_in_deopt_handler() { _in_deopt_handler++; } 1799 void dec_in_deopt_handler() { 1800 assert(_in_deopt_handler > 0, "mismatched deopt nesting"); 1801 if (_in_deopt_handler > 0) { // robustness 1802 _in_deopt_handler--; 1803 } 1804 } 1805 1806 private: 1807 void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; } 1808 1809 public: 1810 1811 // Frame iteration; calls the function f for all frames on the stack 1812 void frames_do(void f(frame*, const RegisterMap*)); 1813 1814 // Memory operations 1815 void oops_do(OopClosure* f, CodeBlobClosure* cf); 1816 1817 // Sweeper operations 1818 virtual void nmethods_do(CodeBlobClosure* cf); 1819 1820 // RedefineClasses Support 1821 void metadata_do(void f(Metadata*)); 1822 1823 // Misc. operations 1824 char* name() const { return (char*)get_thread_name(); } 1825 void print_on(outputStream* st, bool print_extended_info) const; 1826 void print_on(outputStream* st) const { print_on(st, false); } 1827 void print_value(); 1828 void print_thread_state_on(outputStream*) const PRODUCT_RETURN; 1829 void print_thread_state() const PRODUCT_RETURN; 1830 void print_on_error(outputStream* st, char* buf, int buflen) const; 1831 void print_name_on_error(outputStream* st, char* buf, int buflen) const; 1832 void verify(); 1833 const char* get_thread_name() const; 1834 private: 1835 // factor out low-level mechanics for use in both normal and error cases 1836 const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const; 1837 public: 1838 const char* get_threadgroup_name() const; 1839 const char* get_parent_name() const; 1840 1841 // Accessing frames 1842 frame last_frame() { 1843 _anchor.make_walkable(this); 1844 return pd_last_frame(); 1845 } 1846 javaVFrame* last_java_vframe(RegisterMap* reg_map); 1847 1848 // Returns method at 'depth' java or native frames down the stack 1849 // Used for security checks 1850 Klass* security_get_caller_class(int depth); 1851 1852 // Print stack trace in external format 1853 void print_stack_on(outputStream* st); 1854 void print_stack() { print_stack_on(tty); } 1855 1856 // Print stack traces in various internal formats 1857 void trace_stack() PRODUCT_RETURN; 1858 void trace_stack_from(vframe* start_vf) PRODUCT_RETURN; 1859 void trace_frames() PRODUCT_RETURN; 1860 void trace_oops() PRODUCT_RETURN; 1861 1862 // Print an annotated view of the stack frames 1863 void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN; 1864 void validate_frame_layout() { 1865 print_frame_layout(0, true); 1866 } 1867 1868 // Returns the number of stack frames on the stack 1869 int depth() const; 1870 1871 // Function for testing deoptimization 1872 void deoptimize(); 1873 void make_zombies(); 1874 1875 void deoptimized_wrt_marked_nmethods(); 1876 1877 public: 1878 // Returns the running thread as a JavaThread 1879 static inline JavaThread* current(); 1880 1881 // Returns the active Java thread. Do not use this if you know you are calling 1882 // from a JavaThread, as it's slower than JavaThread::current. If called from 1883 // the VMThread, it also returns the JavaThread that instigated the VMThread's 1884 // operation. You may not want that either. 1885 static JavaThread* active(); 1886 1887 inline CompilerThread* as_CompilerThread(); 1888 1889 public: 1890 virtual void run(); 1891 void thread_main_inner(); 1892 1893 private: 1894 GrowableArray<oop>* _array_for_gc; 1895 public: 1896 1897 void register_array_for_gc(GrowableArray<oop>* array) { _array_for_gc = array; } 1898 1899 public: 1900 // Thread local information maintained by JVMTI. 1901 void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; } 1902 // A JvmtiThreadState is lazily allocated. This jvmti_thread_state() 1903 // getter is used to get this JavaThread's JvmtiThreadState if it has 1904 // one which means NULL can be returned. JvmtiThreadState::state_for() 1905 // is used to get the specified JavaThread's JvmtiThreadState if it has 1906 // one or it allocates a new JvmtiThreadState for the JavaThread and 1907 // returns it. JvmtiThreadState::state_for() will return NULL only if 1908 // the specified JavaThread is exiting. 1909 JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; } 1910 static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); } 1911 1912 // JVMTI PopFrame support 1913 // Setting and clearing popframe_condition 1914 // All of these enumerated values are bits. popframe_pending 1915 // indicates that a PopFrame() has been requested and not yet been 1916 // completed. popframe_processing indicates that that PopFrame() is in 1917 // the process of being completed. popframe_force_deopt_reexecution_bit 1918 // indicates that special handling is required when returning to a 1919 // deoptimized caller. 1920 enum PopCondition { 1921 popframe_inactive = 0x00, 1922 popframe_pending_bit = 0x01, 1923 popframe_processing_bit = 0x02, 1924 popframe_force_deopt_reexecution_bit = 0x04 1925 }; 1926 PopCondition popframe_condition() { return (PopCondition) _popframe_condition; } 1927 void set_popframe_condition(PopCondition c) { _popframe_condition = c; } 1928 void set_popframe_condition_bit(PopCondition c) { _popframe_condition |= c; } 1929 void clear_popframe_condition() { _popframe_condition = popframe_inactive; } 1930 static ByteSize popframe_condition_offset() { return byte_offset_of(JavaThread, _popframe_condition); } 1931 bool has_pending_popframe() { return (popframe_condition() & popframe_pending_bit) != 0; } 1932 bool popframe_forcing_deopt_reexecution() { return (popframe_condition() & popframe_force_deopt_reexecution_bit) != 0; } 1933 void clear_popframe_forcing_deopt_reexecution() { _popframe_condition &= ~popframe_force_deopt_reexecution_bit; } 1934 #ifdef CC_INTERP 1935 bool pop_frame_pending(void) { return ((_popframe_condition & popframe_pending_bit) != 0); } 1936 void clr_pop_frame_pending(void) { _popframe_condition = popframe_inactive; } 1937 bool pop_frame_in_process(void) { return ((_popframe_condition & popframe_processing_bit) != 0); } 1938 void set_pop_frame_in_process(void) { _popframe_condition |= popframe_processing_bit; } 1939 void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; } 1940 #endif 1941 1942 int frames_to_pop_failed_realloc() const { return _frames_to_pop_failed_realloc; } 1943 void set_frames_to_pop_failed_realloc(int nb) { _frames_to_pop_failed_realloc = nb; } 1944 void dec_frames_to_pop_failed_realloc() { _frames_to_pop_failed_realloc--; } 1945 1946 private: 1947 // Saved incoming arguments to popped frame. 1948 // Used only when popped interpreted frame returns to deoptimized frame. 1949 void* _popframe_preserved_args; 1950 int _popframe_preserved_args_size; 1951 1952 public: 1953 void popframe_preserve_args(ByteSize size_in_bytes, void* start); 1954 void* popframe_preserved_args(); 1955 ByteSize popframe_preserved_args_size(); 1956 WordSize popframe_preserved_args_size_in_words(); 1957 void popframe_free_preserved_args(); 1958 1959 1960 private: 1961 JvmtiThreadState *_jvmti_thread_state; 1962 1963 // Used by the interpreter in fullspeed mode for frame pop, method 1964 // entry, method exit and single stepping support. This field is 1965 // only set to non-zero by the VM_EnterInterpOnlyMode VM operation. 1966 // It can be set to zero asynchronously (i.e., without a VM operation 1967 // or a lock) so we have to be very careful. 1968 int _interp_only_mode; 1969 1970 public: 1971 // used by the interpreter for fullspeed debugging support (see above) 1972 static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); } 1973 bool is_interp_only_mode() { return (_interp_only_mode != 0); } 1974 int get_interp_only_mode() { return _interp_only_mode; } 1975 void increment_interp_only_mode() { ++_interp_only_mode; } 1976 void decrement_interp_only_mode() { --_interp_only_mode; } 1977 1978 // support for cached flag that indicates whether exceptions need to be posted for this thread 1979 // if this is false, we can avoid deoptimizing when events are thrown 1980 // this gets set to reflect whether jvmtiExport::post_exception_throw would actually do anything 1981 private: 1982 int _should_post_on_exceptions_flag; 1983 1984 public: 1985 int should_post_on_exceptions_flag() { return _should_post_on_exceptions_flag; } 1986 void set_should_post_on_exceptions_flag(int val) { _should_post_on_exceptions_flag = val; } 1987 1988 private: 1989 ThreadStatistics *_thread_stat; 1990 1991 public: 1992 ThreadStatistics* get_thread_stat() const { return _thread_stat; } 1993 1994 // Return a blocker object for which this thread is blocked parking. 1995 oop current_park_blocker(); 1996 1997 private: 1998 static size_t _stack_size_at_create; 1999 2000 public: 2001 static inline size_t stack_size_at_create(void) { 2002 return _stack_size_at_create; 2003 } 2004 static inline void set_stack_size_at_create(size_t value) { 2005 _stack_size_at_create = value; 2006 } 2007 2008 // Machine dependent stuff 2009 #include OS_CPU_HEADER(thread) 2010 2011 public: 2012 void set_blocked_on_compilation(bool value) { 2013 _blocked_on_compilation = value; 2014 } 2015 2016 bool blocked_on_compilation() { 2017 return _blocked_on_compilation; 2018 } 2019 protected: 2020 bool _blocked_on_compilation; 2021 2022 2023 // JSR166 per-thread parker 2024 private: 2025 Parker* _parker; 2026 public: 2027 Parker* parker() { return _parker; } 2028 2029 // Biased locking support 2030 private: 2031 GrowableArray<MonitorInfo*>* _cached_monitor_info; 2032 public: 2033 GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; } 2034 void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; } 2035 2036 // clearing/querying jni attach status 2037 bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; } 2038 bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; } 2039 inline void set_done_attaching_via_jni(); 2040 }; 2041 2042 // Inline implementation of JavaThread::current 2043 inline JavaThread* JavaThread::current() { 2044 Thread* thread = Thread::current(); 2045 assert(thread->is_Java_thread(), "just checking"); 2046 return (JavaThread*)thread; 2047 } 2048 2049 inline CompilerThread* JavaThread::as_CompilerThread() { 2050 assert(is_Compiler_thread(), "just checking"); 2051 return (CompilerThread*)this; 2052 } 2053 2054 // Dedicated thread to sweep the code cache 2055 class CodeCacheSweeperThread : public JavaThread { 2056 CompiledMethod* _scanned_compiled_method; // nmethod being scanned by the sweeper 2057 public: 2058 CodeCacheSweeperThread(); 2059 // Track the nmethod currently being scanned by the sweeper 2060 void set_scanned_compiled_method(CompiledMethod* cm) { 2061 assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value"); 2062 _scanned_compiled_method = cm; 2063 } 2064 2065 // Hide sweeper thread from external view. 2066 bool is_hidden_from_external_view() const { return true; } 2067 2068 bool is_Code_cache_sweeper_thread() const { return true; } 2069 2070 // Prevent GC from unloading _scanned_compiled_method 2071 void oops_do(OopClosure* f, CodeBlobClosure* cf); 2072 void nmethods_do(CodeBlobClosure* cf); 2073 }; 2074 2075 // A thread used for Compilation. 2076 class CompilerThread : public JavaThread { 2077 friend class VMStructs; 2078 private: 2079 CompilerCounters* _counters; 2080 2081 ciEnv* _env; 2082 CompileLog* _log; 2083 CompileTask* volatile _task; // print_threads_compiling can read this concurrently. 2084 CompileQueue* _queue; 2085 BufferBlob* _buffer_blob; 2086 2087 AbstractCompiler* _compiler; 2088 TimeStamp _idle_time; 2089 2090 public: 2091 2092 static CompilerThread* current(); 2093 2094 CompilerThread(CompileQueue* queue, CompilerCounters* counters); 2095 ~CompilerThread(); 2096 2097 bool is_Compiler_thread() const { return true; } 2098 2099 virtual bool can_call_java() const; 2100 2101 // Hide native compiler threads from external view. 2102 bool is_hidden_from_external_view() const { return !can_call_java(); } 2103 2104 void set_compiler(AbstractCompiler* c) { _compiler = c; } 2105 AbstractCompiler* compiler() const { return _compiler; } 2106 2107 CompileQueue* queue() const { return _queue; } 2108 CompilerCounters* counters() const { return _counters; } 2109 2110 // Get/set the thread's compilation environment. 2111 ciEnv* env() { return _env; } 2112 void set_env(ciEnv* env) { _env = env; } 2113 2114 BufferBlob* get_buffer_blob() const { return _buffer_blob; } 2115 void set_buffer_blob(BufferBlob* b) { _buffer_blob = b; } 2116 2117 // Get/set the thread's logging information 2118 CompileLog* log() { return _log; } 2119 void init_log(CompileLog* log) { 2120 // Set once, for good. 2121 assert(_log == NULL, "set only once"); 2122 _log = log; 2123 } 2124 2125 void start_idle_timer() { _idle_time.update(); } 2126 jlong idle_time_millis() { 2127 return TimeHelper::counter_to_millis(_idle_time.ticks_since_update()); 2128 } 2129 2130 #ifndef PRODUCT 2131 private: 2132 IdealGraphPrinter *_ideal_graph_printer; 2133 public: 2134 IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; } 2135 void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; } 2136 #endif 2137 2138 // Get/set the thread's current task 2139 CompileTask* task() { return _task; } 2140 void set_task(CompileTask* task) { _task = task; } 2141 }; 2142 2143 inline CompilerThread* CompilerThread::current() { 2144 return JavaThread::current()->as_CompilerThread(); 2145 } 2146 2147 // The active thread queue. It also keeps track of the current used 2148 // thread priorities. 2149 class Threads: AllStatic { 2150 friend class VMStructs; 2151 private: 2152 static JavaThread* _thread_list; 2153 static int _number_of_threads; 2154 static int _number_of_non_daemon_threads; 2155 static int _return_code; 2156 static int _thread_claim_parity; 2157 #ifdef ASSERT 2158 static bool _vm_complete; 2159 #endif 2160 2161 static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS); 2162 static void initialize_jsr292_core_classes(TRAPS); 2163 2164 public: 2165 // Thread management 2166 // force_daemon is a concession to JNI, where we may need to add a 2167 // thread to the thread list before allocating its thread object 2168 static void add(JavaThread* p, bool force_daemon = false); 2169 static void remove(JavaThread* p); 2170 static void non_java_threads_do(ThreadClosure* tc); 2171 static void java_threads_do(ThreadClosure* tc); 2172 static void java_threads_and_vm_thread_do(ThreadClosure* tc); 2173 static void threads_do(ThreadClosure* tc); 2174 static void possibly_parallel_threads_do(bool is_par, ThreadClosure* tc); 2175 2176 // Initializes the vm and creates the vm thread 2177 static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain); 2178 static void convert_vm_init_libraries_to_agents(); 2179 static void create_vm_init_libraries(); 2180 static void create_vm_init_agents(); 2181 static void shutdown_vm_agents(); 2182 static bool destroy_vm(); 2183 // Supported VM versions via JNI 2184 // Includes JNI_VERSION_1_1 2185 static jboolean is_supported_jni_version_including_1_1(jint version); 2186 // Does not include JNI_VERSION_1_1 2187 static jboolean is_supported_jni_version(jint version); 2188 2189 // The "thread claim parity" provides a way for threads to be claimed 2190 // by parallel worker tasks. 2191 // 2192 // Each thread contains a a "parity" field. A task will claim the 2193 // thread only if its parity field is the same as the global parity, 2194 // which is updated by calling change_thread_claim_parity(). 2195 // 2196 // For this to work change_thread_claim_parity() needs to be called 2197 // exactly once in sequential code before starting parallel tasks 2198 // that should claim threads. 2199 // 2200 // New threads get their parity set to 0 and change_thread_claim_parity() 2201 // never set the global parity to 0. 2202 static int thread_claim_parity() { return _thread_claim_parity; } 2203 static void change_thread_claim_parity(); 2204 static void assert_all_threads_claimed() NOT_DEBUG_RETURN; 2205 2206 // Apply "f->do_oop" to all root oops in all threads. 2207 // This version may only be called by sequential code. 2208 static void oops_do(OopClosure* f, CodeBlobClosure* cf); 2209 // This version may be called by sequential or parallel code. 2210 static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf); 2211 2212 // Apply "f->do_oop" to roots in all threads that 2213 // are part of compiled frames 2214 static void compiled_frame_oops_do(OopClosure* f, CodeBlobClosure* cf); 2215 2216 static void convert_hcode_pointers(); 2217 static void restore_hcode_pointers(); 2218 2219 // Sweeper 2220 static void nmethods_do(CodeBlobClosure* cf); 2221 2222 // RedefineClasses support 2223 static void metadata_do(void f(Metadata*)); 2224 static void metadata_handles_do(void f(Metadata*)); 2225 2226 #ifdef ASSERT 2227 static bool is_vm_complete() { return _vm_complete; } 2228 #endif // ASSERT 2229 2230 // Verification 2231 static void verify(); 2232 static void print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks, bool print_extended_info); 2233 static void print(bool print_stacks, bool internal_format) { 2234 // this function is only used by debug.cpp 2235 print_on(tty, print_stacks, internal_format, false /* no concurrent lock printed */, false /* simple format */); 2236 } 2237 static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen); 2238 static void print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf, 2239 int buflen, bool* found_current); 2240 static void print_threads_compiling(outputStream* st, char* buf, int buflen); 2241 2242 // Get Java threads that are waiting to enter a monitor. 2243 static GrowableArray<JavaThread*>* get_pending_threads(ThreadsList * t_list, 2244 int count, address monitor); 2245 2246 // Get owning Java thread from the monitor's owner field. 2247 static JavaThread *owning_thread_from_monitor_owner(ThreadsList * t_list, 2248 address owner); 2249 2250 // Number of threads on the active threads list 2251 static int number_of_threads() { return _number_of_threads; } 2252 // Number of non-daemon threads on the active threads list 2253 static int number_of_non_daemon_threads() { return _number_of_non_daemon_threads; } 2254 2255 // Deoptimizes all frames tied to marked nmethods 2256 static void deoptimized_wrt_marked_nmethods(); 2257 }; 2258 2259 2260 // Thread iterator 2261 class ThreadClosure: public StackObj { 2262 public: 2263 virtual void do_thread(Thread* thread) = 0; 2264 }; 2265 2266 class SignalHandlerMark: public StackObj { 2267 private: 2268 Thread* _thread; 2269 public: 2270 SignalHandlerMark(Thread* t) { 2271 _thread = t; 2272 if (_thread) _thread->enter_signal_handler(); 2273 } 2274 ~SignalHandlerMark() { 2275 if (_thread) _thread->leave_signal_handler(); 2276 _thread = NULL; 2277 } 2278 }; 2279 2280 2281 #endif // SHARE_VM_RUNTIME_THREAD_HPP