1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_THREAD_HPP 26 #define SHARE_VM_RUNTIME_THREAD_HPP 27 28 #include "jni.h" 29 #include "gc/shared/threadLocalAllocBuffer.hpp" 30 #include "memory/allocation.hpp" 31 #include "oops/oop.hpp" 32 #include "prims/jvmtiExport.hpp" 33 #include "runtime/frame.hpp" 34 #include "runtime/handshake.hpp" 35 #include "runtime/javaFrameAnchor.hpp" 36 #include "runtime/jniHandles.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/os.hpp" 39 #include "runtime/osThread.hpp" 40 #include "runtime/park.hpp" 41 #include "runtime/safepoint.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/threadLocalStorage.hpp" 44 #include "runtime/thread_ext.hpp" 45 #include "runtime/unhandledOops.hpp" 46 #include "trace/traceBackend.hpp" 47 #include "trace/traceMacros.hpp" 48 #include "utilities/align.hpp" 49 #include "utilities/exceptions.hpp" 50 #include "utilities/macros.hpp" 51 #if INCLUDE_ALL_GCS 52 #include "gc/g1/dirtyCardQueue.hpp" 53 #include "gc/g1/satbMarkQueue.hpp" 54 #endif // INCLUDE_ALL_GCS 55 #ifdef ZERO 56 # include "stack_zero.hpp" 57 #endif 58 59 class ThreadSafepointState; 60 class ThreadsList; 61 class ThreadsSMRSupport; 62 class NestedThreadsList; 63 64 class JvmtiThreadState; 65 class JvmtiGetLoadedClassesClosure; 66 class ThreadStatistics; 67 class ConcurrentLocksDump; 68 class ParkEvent; 69 class Parker; 70 71 class ciEnv; 72 class CompileThread; 73 class CompileLog; 74 class CompileTask; 75 class CompileQueue; 76 class CompilerCounters; 77 class vframeArray; 78 79 class DeoptResourceMark; 80 class jvmtiDeferredLocalVariableSet; 81 82 class GCTaskQueue; 83 class ThreadClosure; 84 class IdealGraphPrinter; 85 86 class Metadata; 87 template <class T, MEMFLAGS F> class ChunkedList; 88 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer; 89 90 DEBUG_ONLY(class ResourceMark;) 91 92 class WorkerThread; 93 94 #ifdef INCLUDE_ALL_GCS 95 class PLAB; 96 #endif 97 98 // Class hierarchy 99 // - Thread 100 // - NamedThread 101 // - VMThread 102 // - ConcurrentGCThread 103 // - WorkerThread 104 // - GangWorker 105 // - GCTaskThread 106 // - JavaThread 107 // - various subclasses eg CompilerThread, ServiceThread 108 // - WatcherThread 109 110 class Thread: public ThreadShadow { 111 friend class VMStructs; 112 friend class JVMCIVMStructs; 113 private: 114 115 #ifndef USE_LIBRARY_BASED_TLS_ONLY 116 // Current thread is maintained as a thread-local variable 117 static THREAD_LOCAL_DECL Thread* _thr_current; 118 #endif 119 120 // Exception handling 121 // (Note: _pending_exception and friends are in ThreadShadow) 122 //oop _pending_exception; // pending exception for current thread 123 // const char* _exception_file; // file information for exception (debugging only) 124 // int _exception_line; // line information for exception (debugging only) 125 protected: 126 // Support for forcing alignment of thread objects for biased locking 127 void* _real_malloc_address; 128 129 // JavaThread lifecycle support: 130 friend class ScanHazardPtrGatherProtectedThreadsClosure; // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access 131 friend class ScanHazardPtrGatherThreadsListClosure; // for get_nested_threads_hazard_ptr(), get_threads_hazard_ptr(), untag_hazard_ptr() access 132 friend class ScanHazardPtrPrintMatchingThreadsClosure; // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access 133 friend class ThreadsListSetter; // for get_threads_hazard_ptr() access 134 friend class ThreadsSMRSupport; // for get_threads_hazard_ptr() access 135 136 ThreadsList* volatile _threads_hazard_ptr; 137 ThreadsList* cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value); 138 ThreadsList* get_threads_hazard_ptr(); 139 void set_threads_hazard_ptr(ThreadsList* new_list); 140 static bool is_hazard_ptr_tagged(ThreadsList* list) { 141 return (intptr_t(list) & intptr_t(1)) == intptr_t(1); 142 } 143 static ThreadsList* tag_hazard_ptr(ThreadsList* list) { 144 return (ThreadsList*)(intptr_t(list) | intptr_t(1)); 145 } 146 static ThreadsList* untag_hazard_ptr(ThreadsList* list) { 147 return (ThreadsList*)(intptr_t(list) & ~intptr_t(1)); 148 } 149 NestedThreadsList* _nested_threads_hazard_ptr; 150 NestedThreadsList* get_nested_threads_hazard_ptr() { 151 return _nested_threads_hazard_ptr; 152 } 153 void set_nested_threads_hazard_ptr(NestedThreadsList* value) { 154 assert(Threads_lock->owned_by_self(), 155 "must own Threads_lock for _nested_threads_hazard_ptr to be valid."); 156 _nested_threads_hazard_ptr = value; 157 } 158 // This field is enabled via -XX:+EnableThreadSMRStatistics: 159 uint _nested_threads_hazard_ptr_cnt; 160 void dec_nested_threads_hazard_ptr_cnt() { 161 assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()"); 162 _nested_threads_hazard_ptr_cnt--; 163 } 164 void inc_nested_threads_hazard_ptr_cnt() { 165 _nested_threads_hazard_ptr_cnt++; 166 } 167 uint nested_threads_hazard_ptr_cnt() { 168 return _nested_threads_hazard_ptr_cnt; 169 } 170 171 public: 172 void* operator new(size_t size) throw() { return allocate(size, true); } 173 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { 174 return allocate(size, false); } 175 void operator delete(void* p); 176 177 protected: 178 static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread); 179 private: 180 181 // *************************************************************** 182 // Suspend and resume support 183 // *************************************************************** 184 // 185 // VM suspend/resume no longer exists - it was once used for various 186 // things including safepoints but was deprecated and finally removed 187 // in Java 7. Because VM suspension was considered "internal" Java-level 188 // suspension was considered "external", and this legacy naming scheme 189 // remains. 190 // 191 // External suspend/resume requests come from JVM_SuspendThread, 192 // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI 193 // ResumeThread. External 194 // suspend requests cause _external_suspend to be set and external 195 // resume requests cause _external_suspend to be cleared. 196 // External suspend requests do not nest on top of other external 197 // suspend requests. The higher level APIs reject suspend requests 198 // for already suspended threads. 199 // 200 // The external_suspend 201 // flag is checked by has_special_runtime_exit_condition() and java thread 202 // will self-suspend when handle_special_runtime_exit_condition() is 203 // called. Most uses of the _thread_blocked state in JavaThreads are 204 // considered the same as being externally suspended; if the blocking 205 // condition lifts, the JavaThread will self-suspend. Other places 206 // where VM checks for external_suspend include: 207 // + mutex granting (do not enter monitors when thread is suspended) 208 // + state transitions from _thread_in_native 209 // 210 // In general, java_suspend() does not wait for an external suspend 211 // request to complete. When it returns, the only guarantee is that 212 // the _external_suspend field is true. 213 // 214 // wait_for_ext_suspend_completion() is used to wait for an external 215 // suspend request to complete. External suspend requests are usually 216 // followed by some other interface call that requires the thread to 217 // be quiescent, e.g., GetCallTrace(). By moving the "wait time" into 218 // the interface that requires quiescence, we give the JavaThread a 219 // chance to self-suspend before we need it to be quiescent. This 220 // improves overall suspend/query performance. 221 // 222 // _suspend_flags controls the behavior of java_ suspend/resume. 223 // It must be set under the protection of SR_lock. Read from the flag is 224 // OK without SR_lock as long as the value is only used as a hint. 225 // (e.g., check _external_suspend first without lock and then recheck 226 // inside SR_lock and finish the suspension) 227 // 228 // _suspend_flags is also overloaded for other "special conditions" so 229 // that a single check indicates whether any special action is needed 230 // eg. for async exceptions. 231 // ------------------------------------------------------------------- 232 // Notes: 233 // 1. The suspend/resume logic no longer uses ThreadState in OSThread 234 // but we still update its value to keep other part of the system (mainly 235 // JVMTI) happy. ThreadState is legacy code (see notes in 236 // osThread.hpp). 237 // 238 // 2. It would be more natural if set_external_suspend() is private and 239 // part of java_suspend(), but that probably would affect the suspend/query 240 // performance. Need more investigation on this. 241 242 // suspend/resume lock: used for self-suspend 243 Monitor* _SR_lock; 244 245 protected: 246 enum SuspendFlags { 247 // NOTE: avoid using the sign-bit as cc generates different test code 248 // when the sign-bit is used, and sometimes incorrectly - see CR 6398077 249 250 _external_suspend = 0x20000000U, // thread is asked to self suspend 251 _ext_suspended = 0x40000000U, // thread has self-suspended 252 _deopt_suspend = 0x10000000U, // thread needs to self suspend for deopt 253 254 _has_async_exception = 0x00000001U, // there is a pending async exception 255 _critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock 256 257 _trace_flag = 0x00000004U // call tracing backend 258 }; 259 260 // various suspension related flags - atomically updated 261 // overloaded for async exception checking in check_special_condition_for_native_trans. 262 volatile uint32_t _suspend_flags; 263 264 private: 265 int _num_nested_signal; 266 267 DEBUG_ONLY(bool _suspendible_thread;) 268 269 public: 270 void enter_signal_handler() { _num_nested_signal++; } 271 void leave_signal_handler() { _num_nested_signal--; } 272 bool is_inside_signal_handler() const { return _num_nested_signal > 0; } 273 274 #ifdef ASSERT 275 void set_suspendible_thread() { 276 _suspendible_thread = true; 277 } 278 279 void clear_suspendible_thread() { 280 _suspendible_thread = false; 281 } 282 283 bool is_suspendible_thread() { return _suspendible_thread; } 284 #endif 285 286 private: 287 // Active_handles points to a block of handles 288 JNIHandleBlock* _active_handles; 289 290 // One-element thread local free list 291 JNIHandleBlock* _free_handle_block; 292 293 // Point to the last handle mark 294 HandleMark* _last_handle_mark; 295 296 // The parity of the last strong_roots iteration in which this thread was 297 // claimed as a task. 298 int _oops_do_parity; 299 300 public: 301 void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; } 302 HandleMark* last_handle_mark() const { return _last_handle_mark; } 303 private: 304 305 // debug support for checking if code does allow safepoints or not 306 // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on 307 // mutex, or blocking on an object synchronizer (Java locking). 308 // If !allow_safepoint(), then an assertion failure will happen in any of the above cases 309 // If !allow_allocation(), then an assertion failure will happen during allocation 310 // (Hence, !allow_safepoint() => !allow_allocation()). 311 // 312 // The two classes NoSafepointVerifier and No_Allocation_Verifier are used to set these counters. 313 // 314 NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen 315 debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops. 316 317 // Used by SkipGCALot class. 318 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot? 319 320 friend class NoAllocVerifier; 321 friend class NoSafepointVerifier; 322 friend class PauseNoSafepointVerifier; 323 friend class GCLocker; 324 325 volatile void* _polling_page; // Thread local polling page 326 327 ThreadLocalAllocBuffer _tlab; // Thread-local eden 328 #ifdef INCLUDE_ALL_GCS 329 PLAB* _gclab; // Thread-local allocation buffer for GC (e.g. evacuation) 330 #endif 331 jlong _allocated_bytes; // Cumulative number of bytes allocated on 332 // the Java heap 333 mutable TRACE_DATA _trace_data; // Thread-local data for tracing 334 335 ThreadExt _ext; 336 337 int _vm_operation_started_count; // VM_Operation support 338 int _vm_operation_completed_count; // VM_Operation support 339 340 char _oom_during_evac; 341 342 ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread 343 // is waiting to lock 344 bool _current_pending_monitor_is_from_java; // locking is from Java code 345 346 // ObjectMonitor on which this thread called Object.wait() 347 ObjectMonitor* _current_waiting_monitor; 348 349 // Private thread-local objectmonitor list - a simple cache organized as a SLL. 350 public: 351 ObjectMonitor* omFreeList; 352 int omFreeCount; // length of omFreeList 353 int omFreeProvision; // reload chunk size 354 ObjectMonitor* omInUseList; // SLL to track monitors in circulation 355 int omInUseCount; // length of omInUseList 356 357 #ifdef ASSERT 358 private: 359 bool _visited_for_critical_count; 360 361 public: 362 void set_visited_for_critical_count(bool z) { _visited_for_critical_count = z; } 363 bool was_visited_for_critical_count() const { return _visited_for_critical_count; } 364 #endif 365 366 public: 367 enum { 368 is_definitely_current_thread = true 369 }; 370 371 // Constructor 372 Thread(); 373 virtual ~Thread(); 374 375 // Manage Thread::current() 376 void initialize_thread_current(); 377 void clear_thread_current(); // TLS cleanup needed before threads terminate 378 379 public: 380 // thread entry point 381 virtual void run(); 382 383 // Testers 384 virtual bool is_VM_thread() const { return false; } 385 virtual bool is_Java_thread() const { return false; } 386 virtual bool is_Compiler_thread() const { return false; } 387 virtual bool is_Code_cache_sweeper_thread() const { return false; } 388 virtual bool is_hidden_from_external_view() const { return false; } 389 virtual bool is_jvmti_agent_thread() const { return false; } 390 // True iff the thread can perform GC operations at a safepoint. 391 // Generally will be true only of VM thread and parallel GC WorkGang 392 // threads. 393 virtual bool is_GC_task_thread() const { return false; } 394 virtual bool is_Watcher_thread() const { return false; } 395 virtual bool is_ConcurrentGC_thread() const { return false; } 396 virtual bool is_Named_thread() const { return false; } 397 virtual bool is_Worker_thread() const { return false; } 398 399 // Can this thread make Java upcalls 400 virtual bool can_call_java() const { return false; } 401 402 // Casts 403 virtual WorkerThread* as_Worker_thread() const { return NULL; } 404 405 virtual char* name() const { return (char*)"Unknown thread"; } 406 407 // Returns the current thread (ASSERTS if NULL) 408 static inline Thread* current(); 409 // Returns the current thread, or NULL if not attached 410 static inline Thread* current_or_null(); 411 // Returns the current thread, or NULL if not attached, and is 412 // safe for use from signal-handlers 413 static inline Thread* current_or_null_safe(); 414 415 // Common thread operations 416 #ifdef ASSERT 417 static void check_for_dangling_thread_pointer(Thread *thread); 418 #endif 419 static void set_priority(Thread* thread, ThreadPriority priority); 420 static ThreadPriority get_priority(const Thread* const thread); 421 static void start(Thread* thread); 422 static void interrupt(Thread* thr); 423 static bool is_interrupted(Thread* thr, bool clear_interrupted); 424 425 void set_native_thread_name(const char *name) { 426 assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread"); 427 os::set_native_thread_name(name); 428 } 429 430 ObjectMonitor** omInUseList_addr() { return (ObjectMonitor **)&omInUseList; } 431 Monitor* SR_lock() const { return _SR_lock; } 432 433 bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; } 434 435 inline void set_suspend_flag(SuspendFlags f); 436 inline void clear_suspend_flag(SuspendFlags f); 437 438 inline void set_has_async_exception(); 439 inline void clear_has_async_exception(); 440 441 bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; } 442 443 inline void set_critical_native_unlock(); 444 inline void clear_critical_native_unlock(); 445 446 inline void set_trace_flag(); 447 inline void clear_trace_flag(); 448 449 bool is_oom_during_evac() const; 450 void set_oom_during_evac(bool oom); 451 452 #ifdef ASSERT 453 bool is_evac_allowed() const; 454 void set_evac_allowed(bool evac_allowed); 455 #endif 456 457 // Support for Unhandled Oop detection 458 // Add the field for both, fastdebug and debug, builds to keep 459 // Thread's fields layout the same. 460 // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build. 461 #ifdef CHECK_UNHANDLED_OOPS 462 private: 463 UnhandledOops* _unhandled_oops; 464 #elif defined(ASSERT) 465 private: 466 void* _unhandled_oops; 467 #endif 468 #ifdef CHECK_UNHANDLED_OOPS 469 public: 470 UnhandledOops* unhandled_oops() { return _unhandled_oops; } 471 // Mark oop safe for gc. It may be stack allocated but won't move. 472 void allow_unhandled_oop(oop *op) { 473 if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op); 474 } 475 // Clear oops at safepoint so crashes point to unhandled oop violator 476 void clear_unhandled_oops() { 477 if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops(); 478 } 479 #endif // CHECK_UNHANDLED_OOPS 480 481 public: 482 #ifndef PRODUCT 483 bool skip_gcalot() { return _skip_gcalot; } 484 void set_skip_gcalot(bool v) { _skip_gcalot = v; } 485 #endif 486 487 // Installs a pending exception to be inserted later 488 static void send_async_exception(oop thread_oop, oop java_throwable); 489 490 // Resource area 491 ResourceArea* resource_area() const { return _resource_area; } 492 void set_resource_area(ResourceArea* area) { _resource_area = area; } 493 494 OSThread* osthread() const { return _osthread; } 495 void set_osthread(OSThread* thread) { _osthread = thread; } 496 497 // JNI handle support 498 JNIHandleBlock* active_handles() const { return _active_handles; } 499 void set_active_handles(JNIHandleBlock* block) { _active_handles = block; } 500 JNIHandleBlock* free_handle_block() const { return _free_handle_block; } 501 void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; } 502 503 // Internal handle support 504 HandleArea* handle_area() const { return _handle_area; } 505 void set_handle_area(HandleArea* area) { _handle_area = area; } 506 507 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; } 508 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; } 509 510 // Thread-Local Allocation Buffer (TLAB) support 511 ThreadLocalAllocBuffer& tlab() { return _tlab; } 512 void initialize_tlab() { 513 if (UseTLAB) { 514 tlab().initialize(); 515 } 516 } 517 518 // Thread-Local GC Allocation Buffer (GCLAB) support 519 #ifdef INCLUDE_ALL_GCS 520 PLAB* gclab() { return _gclab; } 521 void set_gclab(PLAB* gclab) { _gclab = gclab; } 522 #endif 523 jlong allocated_bytes() { return _allocated_bytes; } 524 void set_allocated_bytes(jlong value) { _allocated_bytes = value; } 525 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } 526 inline jlong cooked_allocated_bytes(); 527 528 TRACE_DEFINE_THREAD_TRACE_DATA_OFFSET; 529 TRACE_DATA* trace_data() const { return &_trace_data; } 530 bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; } 531 532 const ThreadExt& ext() const { return _ext; } 533 ThreadExt& ext() { return _ext; } 534 535 // VM operation support 536 int vm_operation_ticket() { return ++_vm_operation_started_count; } 537 int vm_operation_completed_count() { return _vm_operation_completed_count; } 538 void increment_vm_operation_completed_count() { _vm_operation_completed_count++; } 539 540 // For tracking the heavyweight monitor the thread is pending on. 541 ObjectMonitor* current_pending_monitor() { 542 return _current_pending_monitor; 543 } 544 void set_current_pending_monitor(ObjectMonitor* monitor) { 545 _current_pending_monitor = monitor; 546 } 547 void set_current_pending_monitor_is_from_java(bool from_java) { 548 _current_pending_monitor_is_from_java = from_java; 549 } 550 bool current_pending_monitor_is_from_java() { 551 return _current_pending_monitor_is_from_java; 552 } 553 554 // For tracking the ObjectMonitor on which this thread called Object.wait() 555 ObjectMonitor* current_waiting_monitor() { 556 return _current_waiting_monitor; 557 } 558 void set_current_waiting_monitor(ObjectMonitor* monitor) { 559 _current_waiting_monitor = monitor; 560 } 561 562 // GC support 563 // Apply "f->do_oop" to all root oops in "this". 564 // Used by JavaThread::oops_do. 565 // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames 566 virtual void oops_do(OopClosure* f, CodeBlobClosure* cf); 567 568 // Handles the parallel case for the method below. 569 private: 570 bool claim_oops_do_par_case(int collection_parity); 571 public: 572 // Requires that "collection_parity" is that of the current roots 573 // iteration. If "is_par" is false, sets the parity of "this" to 574 // "collection_parity", and returns "true". If "is_par" is true, 575 // uses an atomic instruction to set the current threads parity to 576 // "collection_parity", if it is not already. Returns "true" iff the 577 // calling thread does the update, this indicates that the calling thread 578 // has claimed the thread's stack as a root groop in the current 579 // collection. 580 bool claim_oops_do(bool is_par, int collection_parity) { 581 if (!is_par) { 582 _oops_do_parity = collection_parity; 583 return true; 584 } else { 585 return claim_oops_do_par_case(collection_parity); 586 } 587 } 588 589 // jvmtiRedefineClasses support 590 void metadata_handles_do(void f(Metadata*)); 591 592 // Used by fast lock support 593 virtual bool is_lock_owned(address adr) const; 594 595 // Check if address is in the stack of the thread (not just for locks). 596 // Warning: the method can only be used on the running thread 597 bool is_in_stack(address adr) const; 598 // Check if address is in the usable part of the stack (excludes protected 599 // guard pages) 600 bool is_in_usable_stack(address adr) const; 601 602 // Sets this thread as starting thread. Returns failure if thread 603 // creation fails due to lack of memory, too many threads etc. 604 bool set_as_starting_thread(); 605 606 protected: 607 // OS data associated with the thread 608 OSThread* _osthread; // Platform-specific thread information 609 610 // Thread local resource area for temporary allocation within the VM 611 ResourceArea* _resource_area; 612 613 DEBUG_ONLY(ResourceMark* _current_resource_mark;) 614 615 // Thread local handle area for allocation of handles within the VM 616 HandleArea* _handle_area; 617 GrowableArray<Metadata*>* _metadata_handles; 618 619 // Support for stack overflow handling, get_thread, etc. 620 address _stack_base; 621 size_t _stack_size; 622 uintptr_t _self_raw_id; // used by get_thread (mutable) 623 int _lgrp_id; 624 625 volatile void** polling_page_addr() { return &_polling_page; } 626 627 public: 628 // Stack overflow support 629 address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } 630 void set_stack_base(address base) { _stack_base = base; } 631 size_t stack_size() const { return _stack_size; } 632 void set_stack_size(size_t size) { _stack_size = size; } 633 address stack_end() const { return stack_base() - stack_size(); } 634 void record_stack_base_and_size(); 635 636 bool on_local_stack(address adr) const { 637 // QQQ this has knowledge of direction, ought to be a stack method 638 return (_stack_base >= adr && adr >= stack_end()); 639 } 640 641 uintptr_t self_raw_id() { return _self_raw_id; } 642 void set_self_raw_id(uintptr_t value) { _self_raw_id = value; } 643 644 int lgrp_id() const { return _lgrp_id; } 645 void set_lgrp_id(int value) { _lgrp_id = value; } 646 647 // Printing 648 virtual void print_on(outputStream* st) const; 649 virtual void print_nested_threads_hazard_ptrs_on(outputStream* st) const; 650 void print() const { print_on(tty); } 651 virtual void print_on_error(outputStream* st, char* buf, int buflen) const; 652 void print_value_on(outputStream* st) const; 653 654 // Debug-only code 655 #ifdef ASSERT 656 private: 657 // Deadlock detection support for Mutex locks. List of locks own by thread. 658 Monitor* _owned_locks; 659 // Mutex::set_owner_implementation is the only place where _owned_locks is modified, 660 // thus the friendship 661 friend class Mutex; 662 friend class Monitor; 663 664 public: 665 void print_owned_locks_on(outputStream* st) const; 666 void print_owned_locks() const { print_owned_locks_on(tty); } 667 Monitor* owned_locks() const { return _owned_locks; } 668 bool owns_locks() const { return owned_locks() != NULL; } 669 bool owns_locks_but_compiled_lock() const; 670 int oops_do_parity() const { return _oops_do_parity; } 671 672 // Deadlock detection 673 bool allow_allocation() { return _allow_allocation_count == 0; } 674 ResourceMark* current_resource_mark() { return _current_resource_mark; } 675 void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; } 676 #endif 677 678 void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN; 679 680 private: 681 volatile int _jvmti_env_iteration_count; 682 683 public: 684 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; } 685 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; } 686 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; } 687 688 // Code generation 689 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); } 690 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); } 691 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); } 692 693 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); } 694 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); } 695 696 static ByteSize polling_page_offset() { return byte_offset_of(Thread, _polling_page); } 697 698 #define TLAB_FIELD_OFFSET(name) \ 699 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); } 700 701 TLAB_FIELD_OFFSET(start) 702 TLAB_FIELD_OFFSET(end) 703 TLAB_FIELD_OFFSET(top) 704 TLAB_FIELD_OFFSET(pf_top) 705 TLAB_FIELD_OFFSET(size) // desired_size 706 TLAB_FIELD_OFFSET(refill_waste_limit) 707 TLAB_FIELD_OFFSET(number_of_refills) 708 TLAB_FIELD_OFFSET(fast_refill_waste) 709 TLAB_FIELD_OFFSET(slow_allocations) 710 711 #undef TLAB_FIELD_OFFSET 712 713 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); } 714 715 public: 716 volatile intptr_t _Stalled; 717 volatile int _TypeTag; 718 ParkEvent * _ParkEvent; // for synchronized() 719 ParkEvent * _SleepEvent; // for Thread.sleep 720 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor 721 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease 722 int NativeSyncRecursion; // diagnostic 723 724 volatile int _OnTrap; // Resume-at IP delta 725 jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG 726 jint _hashStateX; // thread-specific hashCode generator state 727 jint _hashStateY; 728 jint _hashStateZ; 729 void * _schedctl; 730 731 732 volatile jint rng[4]; // RNG for spin loop 733 734 // Low-level leaf-lock primitives used to implement synchronization 735 // and native monitor-mutex infrastructure. 736 // Not for general synchronization use. 737 static void SpinAcquire(volatile int * Lock, const char * Name); 738 static void SpinRelease(volatile int * Lock); 739 static void muxAcquire(volatile intptr_t * Lock, const char * Name); 740 static void muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev); 741 static void muxRelease(volatile intptr_t * Lock); 742 }; 743 744 // Inline implementation of Thread::current() 745 inline Thread* Thread::current() { 746 Thread* current = current_or_null(); 747 assert(current != NULL, "Thread::current() called on detached thread"); 748 return current; 749 } 750 751 inline Thread* Thread::current_or_null() { 752 #ifndef USE_LIBRARY_BASED_TLS_ONLY 753 return _thr_current; 754 #else 755 if (ThreadLocalStorage::is_initialized()) { 756 return ThreadLocalStorage::thread(); 757 } 758 return NULL; 759 #endif 760 } 761 762 inline Thread* Thread::current_or_null_safe() { 763 if (ThreadLocalStorage::is_initialized()) { 764 return ThreadLocalStorage::thread(); 765 } 766 return NULL; 767 } 768 769 // Name support for threads. non-JavaThread subclasses with multiple 770 // uniquely named instances should derive from this. 771 class NamedThread: public Thread { 772 friend class VMStructs; 773 enum { 774 max_name_len = 64 775 }; 776 private: 777 char* _name; 778 // log JavaThread being processed by oops_do 779 JavaThread* _processed_thread; 780 uint _gc_id; // The current GC id when a thread takes part in GC 781 782 public: 783 NamedThread(); 784 ~NamedThread(); 785 // May only be called once per thread. 786 void set_name(const char* format, ...) ATTRIBUTE_PRINTF(2, 3); 787 void initialize_named_thread(); 788 virtual bool is_Named_thread() const { return true; } 789 virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; } 790 JavaThread *processed_thread() { return _processed_thread; } 791 void set_processed_thread(JavaThread *thread) { _processed_thread = thread; } 792 virtual void print_on(outputStream* st) const; 793 794 void set_gc_id(uint gc_id) { _gc_id = gc_id; } 795 uint gc_id() { return _gc_id; } 796 }; 797 798 // Worker threads are named and have an id of an assigned work. 799 class WorkerThread: public NamedThread { 800 private: 801 uint _id; 802 public: 803 WorkerThread() : _id(0) { } 804 virtual bool is_Worker_thread() const { return true; } 805 806 virtual WorkerThread* as_Worker_thread() const { 807 assert(is_Worker_thread(), "Dubious cast to WorkerThread*?"); 808 return (WorkerThread*) this; 809 } 810 811 void set_id(uint work_id) { _id = work_id; } 812 uint id() const { return _id; } 813 }; 814 815 // A single WatcherThread is used for simulating timer interrupts. 816 class WatcherThread: public Thread { 817 friend class VMStructs; 818 public: 819 virtual void run(); 820 821 private: 822 static WatcherThread* _watcher_thread; 823 824 static bool _startable; 825 // volatile due to at least one lock-free read 826 volatile static bool _should_terminate; 827 public: 828 enum SomeConstants { 829 delay_interval = 10 // interrupt delay in milliseconds 830 }; 831 832 // Constructor 833 WatcherThread(); 834 835 // No destruction allowed 836 ~WatcherThread() { 837 guarantee(false, "WatcherThread deletion must fix the race with VM termination"); 838 } 839 840 // Tester 841 bool is_Watcher_thread() const { return true; } 842 843 // Printing 844 char* name() const { return (char*)"VM Periodic Task Thread"; } 845 void print_on(outputStream* st) const; 846 void unpark(); 847 848 // Returns the single instance of WatcherThread 849 static WatcherThread* watcher_thread() { return _watcher_thread; } 850 851 // Create and start the single instance of WatcherThread, or stop it on shutdown 852 static void start(); 853 static void stop(); 854 // Only allow start once the VM is sufficiently initialized 855 // Otherwise the first task to enroll will trigger the start 856 static void make_startable(); 857 private: 858 int sleep() const; 859 }; 860 861 862 class CompilerThread; 863 864 typedef void (*ThreadFunction)(JavaThread*, TRAPS); 865 866 class JavaThread: public Thread { 867 friend class VMStructs; 868 friend class JVMCIVMStructs; 869 friend class WhiteBox; 870 private: 871 JavaThread* _next; // The next thread in the Threads list 872 bool _on_thread_list; // Is set when this JavaThread is added to the Threads list 873 oop _threadObj; // The Java level thread object 874 875 #ifdef ASSERT 876 private: 877 int _java_call_counter; 878 879 public: 880 int java_call_counter() { return _java_call_counter; } 881 void inc_java_call_counter() { _java_call_counter++; } 882 void dec_java_call_counter() { 883 assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper"); 884 _java_call_counter--; 885 } 886 private: // restore original namespace restriction 887 #endif // ifdef ASSERT 888 889 #ifndef PRODUCT 890 public: 891 enum { 892 jump_ring_buffer_size = 16 893 }; 894 private: // restore original namespace restriction 895 #endif 896 897 JavaFrameAnchor _anchor; // Encapsulation of current java frame and it state 898 899 ThreadFunction _entry_point; 900 901 JNIEnv _jni_environment; 902 903 // Deopt support 904 DeoptResourceMark* _deopt_mark; // Holds special ResourceMark for deoptimization 905 906 intptr_t* _must_deopt_id; // id of frame that needs to be deopted once we 907 // transition out of native 908 CompiledMethod* _deopt_nmethod; // CompiledMethod that is currently being deoptimized 909 vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays 910 vframeArray* _vframe_array_last; // Holds last vFrameArray we popped 911 // Because deoptimization is lazy we must save jvmti requests to set locals 912 // in compiled frames until we deoptimize and we have an interpreter frame. 913 // This holds the pointer to array (yeah like there might be more than one) of 914 // description of compiled vframes that have locals that need to be updated. 915 GrowableArray<jvmtiDeferredLocalVariableSet*>* _deferred_locals_updates; 916 917 // Handshake value for fixing 6243940. We need a place for the i2c 918 // adapter to store the callee Method*. This value is NEVER live 919 // across a gc point so it does NOT have to be gc'd 920 // The handshake is open ended since we can't be certain that it will 921 // be NULLed. This is because we rarely ever see the race and end up 922 // in handle_wrong_method which is the backend of the handshake. See 923 // code in i2c adapters and handle_wrong_method. 924 925 Method* _callee_target; 926 927 // Used to pass back results to the interpreter or generated code running Java code. 928 oop _vm_result; // oop result is GC-preserved 929 Metadata* _vm_result_2; // non-oop result 930 931 // See ReduceInitialCardMarks: this holds the precise space interval of 932 // the most recent slow path allocation for which compiled code has 933 // elided card-marks for performance along the fast-path. 934 MemRegion _deferred_card_mark; 935 936 MonitorChunk* _monitor_chunks; // Contains the off stack monitors 937 // allocated during deoptimization 938 // and by JNI_MonitorEnter/Exit 939 940 // Async. requests support 941 enum AsyncRequests { 942 _no_async_condition = 0, 943 _async_exception, 944 _async_unsafe_access_error 945 }; 946 AsyncRequests _special_runtime_exit_condition; // Enum indicating pending async. request 947 oop _pending_async_exception; 948 949 // Safepoint support 950 public: // Expose _thread_state for SafeFetchInt() 951 volatile JavaThreadState _thread_state; 952 private: 953 ThreadSafepointState *_safepoint_state; // Holds information about a thread during a safepoint 954 address _saved_exception_pc; // Saved pc of instruction where last implicit exception happened 955 956 // JavaThread termination support 957 enum TerminatedTypes { 958 _not_terminated = 0xDEAD - 2, 959 _thread_exiting, // JavaThread::exit() has been called for this thread 960 _thread_terminated, // JavaThread is removed from thread list 961 _vm_exited // JavaThread is still executing native code, but VM is terminated 962 // only VM_Exit can set _vm_exited 963 }; 964 965 // In general a JavaThread's _terminated field transitions as follows: 966 // 967 // _not_terminated => _thread_exiting => _thread_terminated 968 // 969 // _vm_exited is a special value to cover the case of a JavaThread 970 // executing native code after the VM itself is terminated. 971 volatile TerminatedTypes _terminated; 972 // suspend/resume support 973 volatile bool _suspend_equivalent; // Suspend equivalent condition 974 jint _in_deopt_handler; // count of deoptimization 975 // handlers thread is in 976 volatile bool _doing_unsafe_access; // Thread may fault due to unsafe access 977 bool _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was 978 // never locked) when throwing an exception. Used by interpreter only. 979 980 // JNI attach states: 981 enum JNIAttachStates { 982 _not_attaching_via_jni = 1, // thread is not attaching via JNI 983 _attaching_via_jni, // thread is attaching via JNI 984 _attached_via_jni // thread has attached via JNI 985 }; 986 987 // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni. 988 // A native thread that is attaching via JNI starts with a value 989 // of _attaching_via_jni and transitions to _attached_via_jni. 990 volatile JNIAttachStates _jni_attach_state; 991 992 public: 993 // State of the stack guard pages for this thread. 994 enum StackGuardState { 995 stack_guard_unused, // not needed 996 stack_guard_reserved_disabled, 997 stack_guard_yellow_reserved_disabled,// disabled (temporarily) after stack overflow 998 stack_guard_enabled // enabled 999 }; 1000 1001 private: 1002 1003 #if INCLUDE_JVMCI 1004 // The _pending_* fields below are used to communicate extra information 1005 // from an uncommon trap in JVMCI compiled code to the uncommon trap handler. 1006 1007 // Communicates the DeoptReason and DeoptAction of the uncommon trap 1008 int _pending_deoptimization; 1009 1010 // Specifies whether the uncommon trap is to bci 0 of a synchronized method 1011 // before the monitor has been acquired. 1012 bool _pending_monitorenter; 1013 1014 // Specifies if the DeoptReason for the last uncommon trap was Reason_transfer_to_interpreter 1015 bool _pending_transfer_to_interpreter; 1016 1017 // Guard for re-entrant call to JVMCIRuntime::adjust_comp_level 1018 bool _adjusting_comp_level; 1019 1020 // An object that JVMCI compiled code can use to further describe and 1021 // uniquely identify the speculative optimization guarded by the uncommon trap 1022 oop _pending_failed_speculation; 1023 1024 // These fields are mutually exclusive in terms of live ranges. 1025 union { 1026 // Communicates the pc at which the most recent implicit exception occurred 1027 // from the signal handler to a deoptimization stub. 1028 address _implicit_exception_pc; 1029 1030 // Communicates an alternative call target to an i2c stub from a JavaCall . 1031 address _alternate_call_target; 1032 } _jvmci; 1033 1034 // Support for high precision, thread sensitive counters in JVMCI compiled code. 1035 jlong* _jvmci_counters; 1036 1037 public: 1038 static jlong* _jvmci_old_thread_counters; 1039 static void collect_counters(typeArrayOop array); 1040 private: 1041 #endif // INCLUDE_JVMCI 1042 1043 StackGuardState _stack_guard_state; 1044 1045 // Precompute the limit of the stack as used in stack overflow checks. 1046 // We load it from here to simplify the stack overflow check in assembly. 1047 address _stack_overflow_limit; 1048 address _reserved_stack_activation; 1049 1050 // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is 1051 // used to temp. parsing values into and out of the runtime system during exception handling for compiled 1052 // code) 1053 volatile oop _exception_oop; // Exception thrown in compiled code 1054 volatile address _exception_pc; // PC where exception happened 1055 volatile address _exception_handler_pc; // PC for handler of exception 1056 volatile int _is_method_handle_return; // true (== 1) if the current exception PC is a MethodHandle call site. 1057 1058 private: 1059 // support for JNI critical regions 1060 jint _jni_active_critical; // count of entries into JNI critical region 1061 1062 // Checked JNI: function name requires exception check 1063 char* _pending_jni_exception_check_fn; 1064 1065 // For deadlock detection. 1066 int _depth_first_number; 1067 1068 // JVMTI PopFrame support 1069 // This is set to popframe_pending to signal that top Java frame should be popped immediately 1070 int _popframe_condition; 1071 1072 // If reallocation of scalar replaced objects fails, we throw OOM 1073 // and during exception propagation, pop the top 1074 // _frames_to_pop_failed_realloc frames, the ones that reference 1075 // failed reallocations. 1076 int _frames_to_pop_failed_realloc; 1077 1078 #ifndef PRODUCT 1079 int _jmp_ring_index; 1080 struct { 1081 // We use intptr_t instead of address so debugger doesn't try and display strings 1082 intptr_t _target; 1083 intptr_t _instruction; 1084 const char* _file; 1085 int _line; 1086 } _jmp_ring[jump_ring_buffer_size]; 1087 #endif // PRODUCT 1088 1089 #if INCLUDE_ALL_GCS 1090 // Support for G1 barriers 1091 1092 SATBMarkQueue _satb_mark_queue; // Thread-local log for SATB barrier. 1093 // Set of all such queues. 1094 static SATBMarkQueueSet _satb_mark_queue_set; 1095 1096 DirtyCardQueue _dirty_card_queue; // Thread-local log for dirty cards. 1097 // Set of all such queues. 1098 static DirtyCardQueueSet _dirty_card_queue_set; 1099 1100 void flush_barrier_queues(); 1101 1102 // Support for Shenandoah barriers 1103 static char _gc_state_global; 1104 char _gc_state; 1105 1106 #endif // INCLUDE_ALL_GCS 1107 1108 friend class VMThread; 1109 friend class ThreadWaitTransition; 1110 friend class VM_Exit; 1111 1112 void initialize(); // Initialized the instance variables 1113 1114 public: 1115 // Constructor 1116 JavaThread(bool is_attaching_via_jni = false); // for main thread and JNI attached threads 1117 JavaThread(ThreadFunction entry_point, size_t stack_size = 0); 1118 ~JavaThread(); 1119 1120 #ifdef ASSERT 1121 // verify this JavaThread hasn't be published in the Threads::list yet 1122 void verify_not_published(); 1123 #endif 1124 1125 //JNI functiontable getter/setter for JVMTI jni function table interception API. 1126 void set_jni_functions(struct JNINativeInterface_* functionTable) { 1127 _jni_environment.functions = functionTable; 1128 } 1129 struct JNINativeInterface_* get_jni_functions() { 1130 return (struct JNINativeInterface_ *)_jni_environment.functions; 1131 } 1132 1133 // This function is called at thread creation to allow 1134 // platform specific thread variables to be initialized. 1135 void cache_global_variables(); 1136 1137 // Executes Shutdown.shutdown() 1138 void invoke_shutdown_hooks(); 1139 1140 // Cleanup on thread exit 1141 enum ExitType { 1142 normal_exit, 1143 jni_detach 1144 }; 1145 void exit(bool destroy_vm, ExitType exit_type = normal_exit); 1146 1147 void cleanup_failed_attach_current_thread(); 1148 1149 // Testers 1150 virtual bool is_Java_thread() const { return true; } 1151 virtual bool can_call_java() const { return true; } 1152 1153 // Thread chain operations 1154 JavaThread* next() const { return _next; } 1155 void set_next(JavaThread* p) { _next = p; } 1156 1157 // Thread oop. threadObj() can be NULL for initial JavaThread 1158 // (or for threads attached via JNI) 1159 oop threadObj() const { return _threadObj; } 1160 void set_threadObj(oop p) { _threadObj = p; } 1161 1162 ThreadPriority java_priority() const; // Read from threadObj() 1163 1164 // Prepare thread and add to priority queue. If a priority is 1165 // not specified, use the priority of the thread object. Threads_lock 1166 // must be held while this function is called. 1167 void prepare(jobject jni_thread, ThreadPriority prio=NoPriority); 1168 void prepare_ext(); 1169 1170 void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; } 1171 address saved_exception_pc() { return _saved_exception_pc; } 1172 1173 1174 ThreadFunction entry_point() const { return _entry_point; } 1175 1176 // Allocates a new Java level thread object for this thread. thread_name may be NULL. 1177 void allocate_threadObj(Handle thread_group, const char* thread_name, bool daemon, TRAPS); 1178 1179 // Last frame anchor routines 1180 1181 JavaFrameAnchor* frame_anchor(void) { return &_anchor; } 1182 1183 // last_Java_sp 1184 bool has_last_Java_frame() const { return _anchor.has_last_Java_frame(); } 1185 intptr_t* last_Java_sp() const { return _anchor.last_Java_sp(); } 1186 1187 // last_Java_pc 1188 1189 address last_Java_pc(void) { return _anchor.last_Java_pc(); } 1190 1191 // Safepoint support 1192 #if !(defined(PPC64) || defined(AARCH64)) 1193 JavaThreadState thread_state() const { return _thread_state; } 1194 void set_thread_state(JavaThreadState s) { _thread_state = s; } 1195 #else 1196 // Use membars when accessing volatile _thread_state. See 1197 // Threads::create_vm() for size checks. 1198 inline JavaThreadState thread_state() const; 1199 inline void set_thread_state(JavaThreadState s); 1200 #endif 1201 ThreadSafepointState *safepoint_state() const { return _safepoint_state; } 1202 void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; } 1203 bool is_at_poll_safepoint() { return _safepoint_state->is_at_poll_safepoint(); } 1204 1205 // JavaThread termination and lifecycle support: 1206 void smr_delete(); 1207 bool on_thread_list() const { return _on_thread_list; } 1208 void set_on_thread_list() { _on_thread_list = true; } 1209 1210 // thread has called JavaThread::exit() or is terminated 1211 bool is_exiting() const; 1212 // thread is terminated (no longer on the threads list); we compare 1213 // against the two non-terminated values so that a freed JavaThread 1214 // will also be considered terminated. 1215 bool check_is_terminated(TerminatedTypes l_terminated) const { 1216 return l_terminated != _not_terminated && l_terminated != _thread_exiting; 1217 } 1218 bool is_terminated() const; 1219 void set_terminated(TerminatedTypes t); 1220 // special for Threads::remove() which is static: 1221 void set_terminated_value(); 1222 void block_if_vm_exited(); 1223 1224 bool doing_unsafe_access() { return _doing_unsafe_access; } 1225 void set_doing_unsafe_access(bool val) { _doing_unsafe_access = val; } 1226 1227 bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; } 1228 void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; } 1229 1230 inline void set_polling_page(void* poll_value); 1231 inline volatile void* get_polling_page(); 1232 1233 private: 1234 // Support for thread handshake operations 1235 HandshakeState _handshake; 1236 public: 1237 void set_handshake_operation(HandshakeOperation* op) { 1238 _handshake.set_operation(this, op); 1239 } 1240 1241 bool has_handshake() const { 1242 return _handshake.has_operation(); 1243 } 1244 1245 void cancel_handshake() { 1246 _handshake.cancel(this); 1247 } 1248 1249 void handshake_process_by_self() { 1250 _handshake.process_by_self(this); 1251 } 1252 1253 void handshake_process_by_vmthread() { 1254 _handshake.process_by_vmthread(this); 1255 } 1256 1257 // Suspend/resume support for JavaThread 1258 private: 1259 inline void set_ext_suspended(); 1260 inline void clear_ext_suspended(); 1261 1262 public: 1263 void java_suspend(); 1264 void java_resume(); 1265 int java_suspend_self(); 1266 1267 void check_and_wait_while_suspended() { 1268 assert(JavaThread::current() == this, "sanity check"); 1269 1270 bool do_self_suspend; 1271 do { 1272 // were we externally suspended while we were waiting? 1273 do_self_suspend = handle_special_suspend_equivalent_condition(); 1274 if (do_self_suspend) { 1275 // don't surprise the thread that suspended us by returning 1276 java_suspend_self(); 1277 set_suspend_equivalent(); 1278 } 1279 } while (do_self_suspend); 1280 } 1281 static void check_safepoint_and_suspend_for_native_trans(JavaThread *thread); 1282 // Check for async exception in addition to safepoint and suspend request. 1283 static void check_special_condition_for_native_trans(JavaThread *thread); 1284 1285 // Same as check_special_condition_for_native_trans but finishes the 1286 // transition into thread_in_Java mode so that it can potentially 1287 // block. 1288 static void check_special_condition_for_native_trans_and_transition(JavaThread *thread); 1289 1290 bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits); 1291 bool is_ext_suspend_completed_with_lock(uint32_t *bits) { 1292 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1293 // Warning: is_ext_suspend_completed() may temporarily drop the 1294 // SR_lock to allow the thread to reach a stable thread state if 1295 // it is currently in a transient thread state. 1296 return is_ext_suspend_completed(false /* !called_by_wait */, 1297 SuspendRetryDelay, bits); 1298 } 1299 1300 // We cannot allow wait_for_ext_suspend_completion() to run forever or 1301 // we could hang. SuspendRetryCount and SuspendRetryDelay are normally 1302 // passed as the count and delay parameters. Experiments with specific 1303 // calls to wait_for_ext_suspend_completion() can be done by passing 1304 // other values in the code. Experiments with all calls can be done 1305 // via the appropriate -XX options. 1306 bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits); 1307 1308 // test for suspend - most (all?) of these should go away 1309 bool is_thread_fully_suspended(bool wait_for_suspend, uint32_t *bits); 1310 1311 inline void set_external_suspend(); 1312 inline void clear_external_suspend(); 1313 1314 inline void set_deopt_suspend(); 1315 inline void clear_deopt_suspend(); 1316 bool is_deopt_suspend() { return (_suspend_flags & _deopt_suspend) != 0; } 1317 1318 bool is_external_suspend() const { 1319 return (_suspend_flags & _external_suspend) != 0; 1320 } 1321 // Whenever a thread transitions from native to vm/java it must suspend 1322 // if external|deopt suspend is present. 1323 bool is_suspend_after_native() const { 1324 return (_suspend_flags & (_external_suspend | _deopt_suspend)) != 0; 1325 } 1326 1327 // external suspend request is completed 1328 bool is_ext_suspended() const { 1329 return (_suspend_flags & _ext_suspended) != 0; 1330 } 1331 1332 bool is_external_suspend_with_lock() const { 1333 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1334 return is_external_suspend(); 1335 } 1336 1337 // Special method to handle a pending external suspend request 1338 // when a suspend equivalent condition lifts. 1339 bool handle_special_suspend_equivalent_condition() { 1340 assert(is_suspend_equivalent(), 1341 "should only be called in a suspend equivalence condition"); 1342 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1343 bool ret = is_external_suspend(); 1344 if (!ret) { 1345 // not about to self-suspend so clear suspend equivalence 1346 clear_suspend_equivalent(); 1347 } 1348 // implied else: 1349 // We have a pending external suspend request so we leave the 1350 // suspend_equivalent flag set until java_suspend_self() sets 1351 // the ext_suspended flag and clears the suspend_equivalent 1352 // flag. This insures that wait_for_ext_suspend_completion() 1353 // will return consistent values. 1354 return ret; 1355 } 1356 1357 // utility methods to see if we are doing some kind of suspension 1358 bool is_being_ext_suspended() const { 1359 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1360 return is_ext_suspended() || is_external_suspend(); 1361 } 1362 1363 bool is_suspend_equivalent() const { return _suspend_equivalent; } 1364 1365 void set_suspend_equivalent() { _suspend_equivalent = true; } 1366 void clear_suspend_equivalent() { _suspend_equivalent = false; } 1367 1368 // Thread.stop support 1369 void send_thread_stop(oop throwable); 1370 AsyncRequests clear_special_runtime_exit_condition() { 1371 AsyncRequests x = _special_runtime_exit_condition; 1372 _special_runtime_exit_condition = _no_async_condition; 1373 return x; 1374 } 1375 1376 // Are any async conditions present? 1377 bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); } 1378 1379 void check_and_handle_async_exceptions(bool check_unsafe_error = true); 1380 1381 // these next two are also used for self-suspension and async exception support 1382 void handle_special_runtime_exit_condition(bool check_asyncs = true); 1383 1384 // Return true if JavaThread has an asynchronous condition or 1385 // if external suspension is requested. 1386 bool has_special_runtime_exit_condition() { 1387 // Because we don't use is_external_suspend_with_lock 1388 // it is possible that we won't see an asynchronous external suspend 1389 // request that has just gotten started, i.e., SR_lock grabbed but 1390 // _external_suspend field change either not made yet or not visible 1391 // yet. However, this is okay because the request is asynchronous and 1392 // we will see the new flag value the next time through. It's also 1393 // possible that the external suspend request is dropped after 1394 // we have checked is_external_suspend(), we will recheck its value 1395 // under SR_lock in java_suspend_self(). 1396 return (_special_runtime_exit_condition != _no_async_condition) || 1397 is_external_suspend() || is_trace_suspend(); 1398 } 1399 1400 void set_pending_unsafe_access_error() { _special_runtime_exit_condition = _async_unsafe_access_error; } 1401 1402 inline void set_pending_async_exception(oop e); 1403 1404 // Fast-locking support 1405 bool is_lock_owned(address adr) const; 1406 1407 // Accessors for vframe array top 1408 // The linked list of vframe arrays are sorted on sp. This means when we 1409 // unpack the head must contain the vframe array to unpack. 1410 void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; } 1411 vframeArray* vframe_array_head() const { return _vframe_array_head; } 1412 1413 // Side structure for deferring update of java frame locals until deopt occurs 1414 GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; } 1415 void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; } 1416 1417 // These only really exist to make debugging deopt problems simpler 1418 1419 void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; } 1420 vframeArray* vframe_array_last() const { return _vframe_array_last; } 1421 1422 // The special resourceMark used during deoptimization 1423 1424 void set_deopt_mark(DeoptResourceMark* value) { _deopt_mark = value; } 1425 DeoptResourceMark* deopt_mark(void) { return _deopt_mark; } 1426 1427 intptr_t* must_deopt_id() { return _must_deopt_id; } 1428 void set_must_deopt_id(intptr_t* id) { _must_deopt_id = id; } 1429 void clear_must_deopt_id() { _must_deopt_id = NULL; } 1430 1431 void set_deopt_compiled_method(CompiledMethod* nm) { _deopt_nmethod = nm; } 1432 CompiledMethod* deopt_compiled_method() { return _deopt_nmethod; } 1433 1434 Method* callee_target() const { return _callee_target; } 1435 void set_callee_target (Method* x) { _callee_target = x; } 1436 1437 // Oop results of vm runtime calls 1438 oop vm_result() const { return _vm_result; } 1439 void set_vm_result (oop x) { _vm_result = x; } 1440 1441 Metadata* vm_result_2() const { return _vm_result_2; } 1442 void set_vm_result_2 (Metadata* x) { _vm_result_2 = x; } 1443 1444 MemRegion deferred_card_mark() const { return _deferred_card_mark; } 1445 void set_deferred_card_mark(MemRegion mr) { _deferred_card_mark = mr; } 1446 1447 #if INCLUDE_JVMCI 1448 int pending_deoptimization() const { return _pending_deoptimization; } 1449 oop pending_failed_speculation() const { return _pending_failed_speculation; } 1450 bool adjusting_comp_level() const { return _adjusting_comp_level; } 1451 void set_adjusting_comp_level(bool b) { _adjusting_comp_level = b; } 1452 bool has_pending_monitorenter() const { return _pending_monitorenter; } 1453 void set_pending_monitorenter(bool b) { _pending_monitorenter = b; } 1454 void set_pending_deoptimization(int reason) { _pending_deoptimization = reason; } 1455 void set_pending_failed_speculation(oop failed_speculation) { _pending_failed_speculation = failed_speculation; } 1456 void set_pending_transfer_to_interpreter(bool b) { _pending_transfer_to_interpreter = b; } 1457 void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == NULL, "must be"); _jvmci._alternate_call_target = a; } 1458 void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == NULL, "must be"); _jvmci._implicit_exception_pc = a; } 1459 #endif // INCLUDE_JVMCI 1460 1461 // Exception handling for compiled methods 1462 oop exception_oop() const { return _exception_oop; } 1463 address exception_pc() const { return _exception_pc; } 1464 address exception_handler_pc() const { return _exception_handler_pc; } 1465 bool is_method_handle_return() const { return _is_method_handle_return == 1; } 1466 1467 void set_exception_oop(oop o) { (void)const_cast<oop&>(_exception_oop = o); } 1468 void set_exception_pc(address a) { _exception_pc = a; } 1469 void set_exception_handler_pc(address a) { _exception_handler_pc = a; } 1470 void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; } 1471 1472 void clear_exception_oop_and_pc() { 1473 set_exception_oop(NULL); 1474 set_exception_pc(NULL); 1475 } 1476 1477 // Stack overflow support 1478 // 1479 // (small addresses) 1480 // 1481 // -- <-- stack_end() --- 1482 // | | 1483 // | red pages | 1484 // | | 1485 // -- <-- stack_red_zone_base() | 1486 // | | 1487 // | guard 1488 // | yellow pages zone 1489 // | | 1490 // | | 1491 // -- <-- stack_yellow_zone_base() | 1492 // | | 1493 // | | 1494 // | reserved pages | 1495 // | | 1496 // -- <-- stack_reserved_zone_base() --- --- 1497 // /|\ shadow <-- stack_overflow_limit() (somewhere in here) 1498 // | zone 1499 // \|/ size 1500 // some untouched memory --- 1501 // 1502 // 1503 // -- 1504 // | 1505 // | shadow zone 1506 // | 1507 // -- 1508 // x frame n 1509 // -- 1510 // x frame n-1 1511 // x 1512 // -- 1513 // ... 1514 // 1515 // -- 1516 // x frame 0 1517 // -- <-- stack_base() 1518 // 1519 // (large addresses) 1520 // 1521 1522 private: 1523 // These values are derived from flags StackRedPages, StackYellowPages, 1524 // StackReservedPages and StackShadowPages. The zone size is determined 1525 // ergonomically if page_size > 4K. 1526 static size_t _stack_red_zone_size; 1527 static size_t _stack_yellow_zone_size; 1528 static size_t _stack_reserved_zone_size; 1529 static size_t _stack_shadow_zone_size; 1530 public: 1531 inline size_t stack_available(address cur_sp); 1532 1533 static size_t stack_red_zone_size() { 1534 assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized."); 1535 return _stack_red_zone_size; 1536 } 1537 static void set_stack_red_zone_size(size_t s) { 1538 assert(is_aligned(s, os::vm_page_size()), 1539 "We can not protect if the red zone size is not page aligned."); 1540 assert(_stack_red_zone_size == 0, "This should be called only once."); 1541 _stack_red_zone_size = s; 1542 } 1543 address stack_red_zone_base() { 1544 return (address)(stack_end() + stack_red_zone_size()); 1545 } 1546 bool in_stack_red_zone(address a) { 1547 return a <= stack_red_zone_base() && a >= stack_end(); 1548 } 1549 1550 static size_t stack_yellow_zone_size() { 1551 assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized."); 1552 return _stack_yellow_zone_size; 1553 } 1554 static void set_stack_yellow_zone_size(size_t s) { 1555 assert(is_aligned(s, os::vm_page_size()), 1556 "We can not protect if the yellow zone size is not page aligned."); 1557 assert(_stack_yellow_zone_size == 0, "This should be called only once."); 1558 _stack_yellow_zone_size = s; 1559 } 1560 1561 static size_t stack_reserved_zone_size() { 1562 // _stack_reserved_zone_size may be 0. This indicates the feature is off. 1563 return _stack_reserved_zone_size; 1564 } 1565 static void set_stack_reserved_zone_size(size_t s) { 1566 assert(is_aligned(s, os::vm_page_size()), 1567 "We can not protect if the reserved zone size is not page aligned."); 1568 assert(_stack_reserved_zone_size == 0, "This should be called only once."); 1569 _stack_reserved_zone_size = s; 1570 } 1571 address stack_reserved_zone_base() { 1572 return (address)(stack_end() + 1573 (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size())); 1574 } 1575 bool in_stack_reserved_zone(address a) { 1576 return (a <= stack_reserved_zone_base()) && 1577 (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size())); 1578 } 1579 1580 static size_t stack_yellow_reserved_zone_size() { 1581 return _stack_yellow_zone_size + _stack_reserved_zone_size; 1582 } 1583 bool in_stack_yellow_reserved_zone(address a) { 1584 return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base()); 1585 } 1586 1587 // Size of red + yellow + reserved zones. 1588 static size_t stack_guard_zone_size() { 1589 return stack_red_zone_size() + stack_yellow_reserved_zone_size(); 1590 } 1591 1592 static size_t stack_shadow_zone_size() { 1593 assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized."); 1594 return _stack_shadow_zone_size; 1595 } 1596 static void set_stack_shadow_zone_size(size_t s) { 1597 // The shadow area is not allocated or protected, so 1598 // it needs not be page aligned. 1599 // But the stack bang currently assumes that it is a 1600 // multiple of page size. This guarantees that the bang 1601 // loop touches all pages in the shadow zone. 1602 // This can be guaranteed differently, as well. E.g., if 1603 // the page size is a multiple of 4K, banging in 4K steps 1604 // suffices to touch all pages. (Some pages are banged 1605 // several times, though.) 1606 assert(is_aligned(s, os::vm_page_size()), 1607 "Stack bang assumes multiple of page size."); 1608 assert(_stack_shadow_zone_size == 0, "This should be called only once."); 1609 _stack_shadow_zone_size = s; 1610 } 1611 1612 void create_stack_guard_pages(); 1613 void remove_stack_guard_pages(); 1614 1615 void enable_stack_reserved_zone(); 1616 void disable_stack_reserved_zone(); 1617 void enable_stack_yellow_reserved_zone(); 1618 void disable_stack_yellow_reserved_zone(); 1619 void enable_stack_red_zone(); 1620 void disable_stack_red_zone(); 1621 1622 inline bool stack_guard_zone_unused(); 1623 inline bool stack_yellow_reserved_zone_disabled(); 1624 inline bool stack_reserved_zone_disabled(); 1625 inline bool stack_guards_enabled(); 1626 1627 address reserved_stack_activation() const { return _reserved_stack_activation; } 1628 void set_reserved_stack_activation(address addr) { 1629 assert(_reserved_stack_activation == stack_base() 1630 || _reserved_stack_activation == NULL 1631 || addr == stack_base(), "Must not be set twice"); 1632 _reserved_stack_activation = addr; 1633 } 1634 1635 // Attempt to reguard the stack after a stack overflow may have occurred. 1636 // Returns true if (a) guard pages are not needed on this thread, (b) the 1637 // pages are already guarded, or (c) the pages were successfully reguarded. 1638 // Returns false if there is not enough stack space to reguard the pages, in 1639 // which case the caller should unwind a frame and try again. The argument 1640 // should be the caller's (approximate) sp. 1641 bool reguard_stack(address cur_sp); 1642 // Similar to above but see if current stackpoint is out of the guard area 1643 // and reguard if possible. 1644 bool reguard_stack(void); 1645 1646 address stack_overflow_limit() { return _stack_overflow_limit; } 1647 void set_stack_overflow_limit() { 1648 _stack_overflow_limit = 1649 stack_end() + MAX2(JavaThread::stack_guard_zone_size(), JavaThread::stack_shadow_zone_size()); 1650 } 1651 1652 // Misc. accessors/mutators 1653 void set_do_not_unlock(void) { _do_not_unlock_if_synchronized = true; } 1654 void clr_do_not_unlock(void) { _do_not_unlock_if_synchronized = false; } 1655 bool do_not_unlock(void) { return _do_not_unlock_if_synchronized; } 1656 1657 #ifndef PRODUCT 1658 void record_jump(address target, address instr, const char* file, int line); 1659 #endif // PRODUCT 1660 1661 // For assembly stub generation 1662 static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj); } 1663 #ifndef PRODUCT 1664 static ByteSize jmp_ring_index_offset() { return byte_offset_of(JavaThread, _jmp_ring_index); } 1665 static ByteSize jmp_ring_offset() { return byte_offset_of(JavaThread, _jmp_ring); } 1666 #endif // PRODUCT 1667 static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment); } 1668 static ByteSize pending_jni_exception_check_fn_offset() { 1669 return byte_offset_of(JavaThread, _pending_jni_exception_check_fn); 1670 } 1671 static ByteSize last_Java_sp_offset() { 1672 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset(); 1673 } 1674 static ByteSize last_Java_pc_offset() { 1675 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset(); 1676 } 1677 static ByteSize frame_anchor_offset() { 1678 return byte_offset_of(JavaThread, _anchor); 1679 } 1680 static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target); } 1681 static ByteSize vm_result_offset() { return byte_offset_of(JavaThread, _vm_result); } 1682 static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2); } 1683 static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); } 1684 static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); } 1685 static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); } 1686 #if INCLUDE_JVMCI 1687 static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); } 1688 static ByteSize pending_monitorenter_offset() { return byte_offset_of(JavaThread, _pending_monitorenter); } 1689 static ByteSize pending_failed_speculation_offset() { return byte_offset_of(JavaThread, _pending_failed_speculation); } 1690 static ByteSize jvmci_alternate_call_target_offset() { return byte_offset_of(JavaThread, _jvmci._alternate_call_target); } 1691 static ByteSize jvmci_implicit_exception_pc_offset() { return byte_offset_of(JavaThread, _jvmci._implicit_exception_pc); } 1692 static ByteSize jvmci_counters_offset() { return byte_offset_of(JavaThread, _jvmci_counters); } 1693 #endif // INCLUDE_JVMCI 1694 static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop); } 1695 static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc); } 1696 static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); } 1697 static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); } 1698 static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); } 1699 static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); } 1700 static ByteSize reserved_stack_activation_offset() { return byte_offset_of(JavaThread, _reserved_stack_activation); } 1701 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); } 1702 1703 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); } 1704 static ByteSize should_post_on_exceptions_flag_offset() { 1705 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag); 1706 } 1707 1708 #if INCLUDE_ALL_GCS 1709 static ByteSize satb_mark_queue_offset() { return byte_offset_of(JavaThread, _satb_mark_queue); } 1710 static ByteSize dirty_card_queue_offset() { return byte_offset_of(JavaThread, _dirty_card_queue); } 1711 1712 static ByteSize gc_state_offset() { return byte_offset_of(JavaThread, _gc_state); } 1713 1714 #endif // INCLUDE_ALL_GCS 1715 1716 // Returns the jni environment for this thread 1717 JNIEnv* jni_environment() { return &_jni_environment; } 1718 1719 static JavaThread* thread_from_jni_environment(JNIEnv* env) { 1720 JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset())); 1721 // Only return NULL if thread is off the thread list; starting to 1722 // exit should not return NULL. 1723 if (thread_from_jni_env->is_terminated()) { 1724 thread_from_jni_env->block_if_vm_exited(); 1725 return NULL; 1726 } else { 1727 return thread_from_jni_env; 1728 } 1729 } 1730 1731 // JNI critical regions. These can nest. 1732 bool in_critical() { return _jni_active_critical > 0; } 1733 bool in_last_critical() { return _jni_active_critical == 1; } 1734 void enter_critical() { 1735 assert(Thread::current() == this || 1736 (Thread::current()->is_VM_thread() && 1737 SafepointSynchronize::is_synchronizing()), 1738 "this must be current thread or synchronizing"); 1739 _jni_active_critical++; 1740 } 1741 void exit_critical() { 1742 assert(Thread::current() == this, "this must be current thread"); 1743 _jni_active_critical--; 1744 assert(_jni_active_critical >= 0, "JNI critical nesting problem?"); 1745 } 1746 1747 // Checked JNI: is the programmer required to check for exceptions, if so specify 1748 // which function name. Returning to a Java frame should implicitly clear the 1749 // pending check, this is done for Native->Java transitions (i.e. user JNI code). 1750 // VM->Java transistions are not cleared, it is expected that JNI code enclosed 1751 // within ThreadToNativeFromVM makes proper exception checks (i.e. VM internal). 1752 bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; } 1753 void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; } 1754 const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; } 1755 void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; } 1756 1757 // For deadlock detection 1758 int depth_first_number() { return _depth_first_number; } 1759 void set_depth_first_number(int dfn) { _depth_first_number = dfn; } 1760 1761 private: 1762 void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; } 1763 1764 public: 1765 MonitorChunk* monitor_chunks() const { return _monitor_chunks; } 1766 void add_monitor_chunk(MonitorChunk* chunk); 1767 void remove_monitor_chunk(MonitorChunk* chunk); 1768 bool in_deopt_handler() const { return _in_deopt_handler > 0; } 1769 void inc_in_deopt_handler() { _in_deopt_handler++; } 1770 void dec_in_deopt_handler() { 1771 assert(_in_deopt_handler > 0, "mismatched deopt nesting"); 1772 if (_in_deopt_handler > 0) { // robustness 1773 _in_deopt_handler--; 1774 } 1775 } 1776 1777 private: 1778 void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; } 1779 1780 public: 1781 1782 // Frame iteration; calls the function f for all frames on the stack 1783 void frames_do(void f(frame*, const RegisterMap*)); 1784 1785 // Memory operations 1786 void oops_do(OopClosure* f, CodeBlobClosure* cf); 1787 1788 // Sweeper operations 1789 virtual void nmethods_do(CodeBlobClosure* cf); 1790 1791 // RedefineClasses Support 1792 void metadata_do(void f(Metadata*)); 1793 1794 // Misc. operations 1795 char* name() const { return (char*)get_thread_name(); } 1796 void print_on(outputStream* st) const; 1797 void print_value(); 1798 void print_thread_state_on(outputStream*) const PRODUCT_RETURN; 1799 void print_thread_state() const PRODUCT_RETURN; 1800 void print_on_error(outputStream* st, char* buf, int buflen) const; 1801 void print_name_on_error(outputStream* st, char* buf, int buflen) const; 1802 void verify(); 1803 const char* get_thread_name() const; 1804 private: 1805 // factor out low-level mechanics for use in both normal and error cases 1806 const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const; 1807 public: 1808 const char* get_threadgroup_name() const; 1809 const char* get_parent_name() const; 1810 1811 // Accessing frames 1812 frame last_frame() { 1813 _anchor.make_walkable(this); 1814 return pd_last_frame(); 1815 } 1816 javaVFrame* last_java_vframe(RegisterMap* reg_map); 1817 1818 // Returns method at 'depth' java or native frames down the stack 1819 // Used for security checks 1820 Klass* security_get_caller_class(int depth); 1821 1822 // Print stack trace in external format 1823 void print_stack_on(outputStream* st); 1824 void print_stack() { print_stack_on(tty); } 1825 1826 // Print stack traces in various internal formats 1827 void trace_stack() PRODUCT_RETURN; 1828 void trace_stack_from(vframe* start_vf) PRODUCT_RETURN; 1829 void trace_frames() PRODUCT_RETURN; 1830 void trace_oops() PRODUCT_RETURN; 1831 1832 // Print an annotated view of the stack frames 1833 void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN; 1834 void validate_frame_layout() { 1835 print_frame_layout(0, true); 1836 } 1837 1838 // Returns the number of stack frames on the stack 1839 int depth() const; 1840 1841 // Function for testing deoptimization 1842 void deoptimize(); 1843 void make_zombies(); 1844 1845 void deoptimized_wrt_marked_nmethods(); 1846 1847 public: 1848 // Returns the running thread as a JavaThread 1849 static inline JavaThread* current(); 1850 1851 // Returns the active Java thread. Do not use this if you know you are calling 1852 // from a JavaThread, as it's slower than JavaThread::current. If called from 1853 // the VMThread, it also returns the JavaThread that instigated the VMThread's 1854 // operation. You may not want that either. 1855 static JavaThread* active(); 1856 1857 inline CompilerThread* as_CompilerThread(); 1858 1859 public: 1860 virtual void run(); 1861 void thread_main_inner(); 1862 1863 private: 1864 // PRIVILEGED STACK 1865 PrivilegedElement* _privileged_stack_top; 1866 GrowableArray<oop>* _array_for_gc; 1867 public: 1868 1869 // Returns the privileged_stack information. 1870 PrivilegedElement* privileged_stack_top() const { return _privileged_stack_top; } 1871 void set_privileged_stack_top(PrivilegedElement *e) { _privileged_stack_top = e; } 1872 void register_array_for_gc(GrowableArray<oop>* array) { _array_for_gc = array; } 1873 1874 public: 1875 // Thread local information maintained by JVMTI. 1876 void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; } 1877 // A JvmtiThreadState is lazily allocated. This jvmti_thread_state() 1878 // getter is used to get this JavaThread's JvmtiThreadState if it has 1879 // one which means NULL can be returned. JvmtiThreadState::state_for() 1880 // is used to get the specified JavaThread's JvmtiThreadState if it has 1881 // one or it allocates a new JvmtiThreadState for the JavaThread and 1882 // returns it. JvmtiThreadState::state_for() will return NULL only if 1883 // the specified JavaThread is exiting. 1884 JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; } 1885 static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); } 1886 void set_jvmti_get_loaded_classes_closure(JvmtiGetLoadedClassesClosure* value) { _jvmti_get_loaded_classes_closure = value; } 1887 JvmtiGetLoadedClassesClosure* get_jvmti_get_loaded_classes_closure() const { return _jvmti_get_loaded_classes_closure; } 1888 1889 // JVMTI PopFrame support 1890 // Setting and clearing popframe_condition 1891 // All of these enumerated values are bits. popframe_pending 1892 // indicates that a PopFrame() has been requested and not yet been 1893 // completed. popframe_processing indicates that that PopFrame() is in 1894 // the process of being completed. popframe_force_deopt_reexecution_bit 1895 // indicates that special handling is required when returning to a 1896 // deoptimized caller. 1897 enum PopCondition { 1898 popframe_inactive = 0x00, 1899 popframe_pending_bit = 0x01, 1900 popframe_processing_bit = 0x02, 1901 popframe_force_deopt_reexecution_bit = 0x04 1902 }; 1903 PopCondition popframe_condition() { return (PopCondition) _popframe_condition; } 1904 void set_popframe_condition(PopCondition c) { _popframe_condition = c; } 1905 void set_popframe_condition_bit(PopCondition c) { _popframe_condition |= c; } 1906 void clear_popframe_condition() { _popframe_condition = popframe_inactive; } 1907 static ByteSize popframe_condition_offset() { return byte_offset_of(JavaThread, _popframe_condition); } 1908 bool has_pending_popframe() { return (popframe_condition() & popframe_pending_bit) != 0; } 1909 bool popframe_forcing_deopt_reexecution() { return (popframe_condition() & popframe_force_deopt_reexecution_bit) != 0; } 1910 void clear_popframe_forcing_deopt_reexecution() { _popframe_condition &= ~popframe_force_deopt_reexecution_bit; } 1911 #ifdef CC_INTERP 1912 bool pop_frame_pending(void) { return ((_popframe_condition & popframe_pending_bit) != 0); } 1913 void clr_pop_frame_pending(void) { _popframe_condition = popframe_inactive; } 1914 bool pop_frame_in_process(void) { return ((_popframe_condition & popframe_processing_bit) != 0); } 1915 void set_pop_frame_in_process(void) { _popframe_condition |= popframe_processing_bit; } 1916 void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; } 1917 #endif 1918 1919 int frames_to_pop_failed_realloc() const { return _frames_to_pop_failed_realloc; } 1920 void set_frames_to_pop_failed_realloc(int nb) { _frames_to_pop_failed_realloc = nb; } 1921 void dec_frames_to_pop_failed_realloc() { _frames_to_pop_failed_realloc--; } 1922 1923 private: 1924 // Saved incoming arguments to popped frame. 1925 // Used only when popped interpreted frame returns to deoptimized frame. 1926 void* _popframe_preserved_args; 1927 int _popframe_preserved_args_size; 1928 1929 public: 1930 void popframe_preserve_args(ByteSize size_in_bytes, void* start); 1931 void* popframe_preserved_args(); 1932 ByteSize popframe_preserved_args_size(); 1933 WordSize popframe_preserved_args_size_in_words(); 1934 void popframe_free_preserved_args(); 1935 1936 1937 private: 1938 JvmtiThreadState *_jvmti_thread_state; 1939 JvmtiGetLoadedClassesClosure* _jvmti_get_loaded_classes_closure; 1940 1941 // Used by the interpreter in fullspeed mode for frame pop, method 1942 // entry, method exit and single stepping support. This field is 1943 // only set to non-zero by the VM_EnterInterpOnlyMode VM operation. 1944 // It can be set to zero asynchronously (i.e., without a VM operation 1945 // or a lock) so we have to be very careful. 1946 int _interp_only_mode; 1947 1948 public: 1949 // used by the interpreter for fullspeed debugging support (see above) 1950 static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); } 1951 bool is_interp_only_mode() { return (_interp_only_mode != 0); } 1952 int get_interp_only_mode() { return _interp_only_mode; } 1953 void increment_interp_only_mode() { ++_interp_only_mode; } 1954 void decrement_interp_only_mode() { --_interp_only_mode; } 1955 1956 // support for cached flag that indicates whether exceptions need to be posted for this thread 1957 // if this is false, we can avoid deoptimizing when events are thrown 1958 // this gets set to reflect whether jvmtiExport::post_exception_throw would actually do anything 1959 private: 1960 int _should_post_on_exceptions_flag; 1961 1962 public: 1963 int should_post_on_exceptions_flag() { return _should_post_on_exceptions_flag; } 1964 void set_should_post_on_exceptions_flag(int val) { _should_post_on_exceptions_flag = val; } 1965 1966 private: 1967 ThreadStatistics *_thread_stat; 1968 1969 public: 1970 ThreadStatistics* get_thread_stat() const { return _thread_stat; } 1971 1972 // Return a blocker object for which this thread is blocked parking. 1973 oop current_park_blocker(); 1974 1975 private: 1976 static size_t _stack_size_at_create; 1977 1978 public: 1979 static inline size_t stack_size_at_create(void) { 1980 return _stack_size_at_create; 1981 } 1982 static inline void set_stack_size_at_create(size_t value) { 1983 _stack_size_at_create = value; 1984 } 1985 1986 #if INCLUDE_ALL_GCS 1987 // SATB marking queue support 1988 SATBMarkQueue& satb_mark_queue() { return _satb_mark_queue; } 1989 static SATBMarkQueueSet& satb_mark_queue_set() { 1990 return _satb_mark_queue_set; 1991 } 1992 1993 // Dirty card queue support 1994 DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; } 1995 static DirtyCardQueueSet& dirty_card_queue_set() { 1996 return _dirty_card_queue_set; 1997 } 1998 1999 inline char gc_state() const; 2000 2001 private: 2002 void set_gc_state(char in_prog); 2003 2004 public: 2005 static void set_gc_state_all_threads(char in_prog); 2006 2007 #endif // INCLUDE_ALL_GCS 2008 2009 // This method initializes the SATB and dirty card queues before a 2010 // JavaThread is added to the Java thread list. Right now, we don't 2011 // have to do anything to the dirty card queue (it should have been 2012 // activated when the thread was created), but we have to activate 2013 // the SATB queue if the thread is created while a marking cycle is 2014 // in progress. The activation / de-activation of the SATB queues at 2015 // the beginning / end of a marking cycle is done during safepoints 2016 // so we have to make sure this method is called outside one to be 2017 // able to safely read the active field of the SATB queue set. Right 2018 // now, it is called just before the thread is added to the Java 2019 // thread list in the Threads::add() method. That method is holding 2020 // the Threads_lock which ensures we are outside a safepoint. We 2021 // cannot do the obvious and set the active field of the SATB queue 2022 // when the thread is created given that, in some cases, safepoints 2023 // might happen between the JavaThread constructor being called and the 2024 // thread being added to the Java thread list (an example of this is 2025 // when the structure for the DestroyJavaVM thread is created). 2026 #if INCLUDE_ALL_GCS 2027 void initialize_queues(); 2028 #else // INCLUDE_ALL_GCS 2029 void initialize_queues() { } 2030 #endif // INCLUDE_ALL_GCS 2031 2032 // Machine dependent stuff 2033 #include OS_CPU_HEADER(thread) 2034 2035 public: 2036 void set_blocked_on_compilation(bool value) { 2037 _blocked_on_compilation = value; 2038 } 2039 2040 bool blocked_on_compilation() { 2041 return _blocked_on_compilation; 2042 } 2043 protected: 2044 bool _blocked_on_compilation; 2045 2046 2047 // JSR166 per-thread parker 2048 private: 2049 Parker* _parker; 2050 public: 2051 Parker* parker() { return _parker; } 2052 2053 // Biased locking support 2054 private: 2055 GrowableArray<MonitorInfo*>* _cached_monitor_info; 2056 public: 2057 GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; } 2058 void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; } 2059 2060 // clearing/querying jni attach status 2061 bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; } 2062 bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; } 2063 inline void set_done_attaching_via_jni(); 2064 }; 2065 2066 // Inline implementation of JavaThread::current 2067 inline JavaThread* JavaThread::current() { 2068 Thread* thread = Thread::current(); 2069 assert(thread->is_Java_thread(), "just checking"); 2070 return (JavaThread*)thread; 2071 } 2072 2073 inline CompilerThread* JavaThread::as_CompilerThread() { 2074 assert(is_Compiler_thread(), "just checking"); 2075 return (CompilerThread*)this; 2076 } 2077 2078 // Dedicated thread to sweep the code cache 2079 class CodeCacheSweeperThread : public JavaThread { 2080 CompiledMethod* _scanned_compiled_method; // nmethod being scanned by the sweeper 2081 public: 2082 CodeCacheSweeperThread(); 2083 // Track the nmethod currently being scanned by the sweeper 2084 void set_scanned_compiled_method(CompiledMethod* cm) { 2085 assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value"); 2086 _scanned_compiled_method = cm; 2087 } 2088 2089 // Hide sweeper thread from external view. 2090 bool is_hidden_from_external_view() const { return true; } 2091 2092 bool is_Code_cache_sweeper_thread() const { return true; } 2093 2094 // Prevent GC from unloading _scanned_compiled_method 2095 void oops_do(OopClosure* f, CodeBlobClosure* cf); 2096 void nmethods_do(CodeBlobClosure* cf); 2097 }; 2098 2099 // A thread used for Compilation. 2100 class CompilerThread : public JavaThread { 2101 friend class VMStructs; 2102 private: 2103 CompilerCounters* _counters; 2104 2105 ciEnv* _env; 2106 CompileLog* _log; 2107 CompileTask* volatile _task; // print_threads_compiling can read this concurrently. 2108 CompileQueue* _queue; 2109 BufferBlob* _buffer_blob; 2110 2111 AbstractCompiler* _compiler; 2112 2113 public: 2114 2115 static CompilerThread* current(); 2116 2117 CompilerThread(CompileQueue* queue, CompilerCounters* counters); 2118 2119 bool is_Compiler_thread() const { return true; } 2120 2121 virtual bool can_call_java() const; 2122 2123 // Hide native compiler threads from external view. 2124 bool is_hidden_from_external_view() const { return !can_call_java(); } 2125 2126 void set_compiler(AbstractCompiler* c) { _compiler = c; } 2127 AbstractCompiler* compiler() const { return _compiler; } 2128 2129 CompileQueue* queue() const { return _queue; } 2130 CompilerCounters* counters() const { return _counters; } 2131 2132 // Get/set the thread's compilation environment. 2133 ciEnv* env() { return _env; } 2134 void set_env(ciEnv* env) { _env = env; } 2135 2136 BufferBlob* get_buffer_blob() const { return _buffer_blob; } 2137 void set_buffer_blob(BufferBlob* b) { _buffer_blob = b; } 2138 2139 // Get/set the thread's logging information 2140 CompileLog* log() { return _log; } 2141 void init_log(CompileLog* log) { 2142 // Set once, for good. 2143 assert(_log == NULL, "set only once"); 2144 _log = log; 2145 } 2146 2147 #ifndef PRODUCT 2148 private: 2149 IdealGraphPrinter *_ideal_graph_printer; 2150 public: 2151 IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; } 2152 void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; } 2153 #endif 2154 2155 // Get/set the thread's current task 2156 CompileTask* task() { return _task; } 2157 void set_task(CompileTask* task) { _task = task; } 2158 }; 2159 2160 inline CompilerThread* CompilerThread::current() { 2161 return JavaThread::current()->as_CompilerThread(); 2162 } 2163 2164 // The active thread queue. It also keeps track of the current used 2165 // thread priorities. 2166 class Threads: AllStatic { 2167 friend class VMStructs; 2168 private: 2169 static JavaThread* _thread_list; 2170 static int _number_of_threads; 2171 static int _number_of_non_daemon_threads; 2172 static int _return_code; 2173 static int _thread_claim_parity; 2174 #ifdef ASSERT 2175 static bool _vm_complete; 2176 #endif 2177 2178 static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS); 2179 static void initialize_jsr292_core_classes(TRAPS); 2180 2181 public: 2182 // Thread management 2183 // force_daemon is a concession to JNI, where we may need to add a 2184 // thread to the thread list before allocating its thread object 2185 static void add(JavaThread* p, bool force_daemon = false); 2186 static void remove(JavaThread* p); 2187 static void threads_do(ThreadClosure* tc); 2188 static void java_threads_do(ThreadClosure* tc); 2189 static void possibly_parallel_threads_do(bool is_par, ThreadClosure* tc); 2190 2191 // Initializes the vm and creates the vm thread 2192 static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain); 2193 static void convert_vm_init_libraries_to_agents(); 2194 static void create_vm_init_libraries(); 2195 static void create_vm_init_agents(); 2196 static void shutdown_vm_agents(); 2197 static bool destroy_vm(); 2198 // Supported VM versions via JNI 2199 // Includes JNI_VERSION_1_1 2200 static jboolean is_supported_jni_version_including_1_1(jint version); 2201 // Does not include JNI_VERSION_1_1 2202 static jboolean is_supported_jni_version(jint version); 2203 2204 // The "thread claim parity" provides a way for threads to be claimed 2205 // by parallel worker tasks. 2206 // 2207 // Each thread contains a a "parity" field. A task will claim the 2208 // thread only if its parity field is the same as the global parity, 2209 // which is updated by calling change_thread_claim_parity(). 2210 // 2211 // For this to work change_thread_claim_parity() needs to be called 2212 // exactly once in sequential code before starting parallel tasks 2213 // that should claim threads. 2214 // 2215 // New threads get their parity set to 0 and change_thread_claim_parity() 2216 // never set the global parity to 0. 2217 static int thread_claim_parity() { return _thread_claim_parity; } 2218 static void change_thread_claim_parity(); 2219 static void assert_all_threads_claimed() NOT_DEBUG_RETURN; 2220 2221 // Apply "f->do_oop" to all root oops in all threads. 2222 // This version may only be called by sequential code. 2223 static void oops_do(OopClosure* f, CodeBlobClosure* cf); 2224 // This version may be called by sequential or parallel code. 2225 static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf, CodeBlobClosure* nmethods_cl = NULL, ThreadClosure* thread_cl = NULL); 2226 // This creates a list of GCTasks, one per thread. 2227 static void create_thread_roots_tasks(GCTaskQueue* q); 2228 // This creates a list of GCTasks, one per thread, for marking objects. 2229 static void create_thread_roots_marking_tasks(GCTaskQueue* q); 2230 2231 // Apply "f->do_oop" to roots in all threads that 2232 // are part of compiled frames 2233 static void compiled_frame_oops_do(OopClosure* f, CodeBlobClosure* cf); 2234 2235 static void convert_hcode_pointers(); 2236 static void restore_hcode_pointers(); 2237 2238 // Sweeper 2239 static void nmethods_do(CodeBlobClosure* cf); 2240 2241 // RedefineClasses support 2242 static void metadata_do(void f(Metadata*)); 2243 static void metadata_handles_do(void f(Metadata*)); 2244 2245 #ifdef ASSERT 2246 static bool is_vm_complete() { return _vm_complete; } 2247 #endif 2248 2249 // Verification 2250 static void verify(); 2251 static void print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks); 2252 static void print(bool print_stacks, bool internal_format) { 2253 // this function is only used by debug.cpp 2254 print_on(tty, print_stacks, internal_format, false /* no concurrent lock printed */); 2255 } 2256 static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen); 2257 static void print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf, 2258 int buflen, bool* found_current); 2259 static void print_threads_compiling(outputStream* st, char* buf, int buflen); 2260 2261 // Get Java threads that are waiting to enter a monitor. 2262 static GrowableArray<JavaThread*>* get_pending_threads(ThreadsList * t_list, 2263 int count, address monitor); 2264 2265 // Get owning Java thread from the monitor's owner field. 2266 static JavaThread *owning_thread_from_monitor_owner(ThreadsList * t_list, 2267 address owner); 2268 2269 // Number of threads on the active threads list 2270 static int number_of_threads() { return _number_of_threads; } 2271 // Number of non-daemon threads on the active threads list 2272 static int number_of_non_daemon_threads() { return _number_of_non_daemon_threads; } 2273 2274 // Deoptimizes all frames tied to marked nmethods 2275 static void deoptimized_wrt_marked_nmethods(); 2276 }; 2277 2278 2279 // Thread iterator 2280 class ThreadClosure: public StackObj { 2281 public: 2282 virtual void do_thread(Thread* thread) = 0; 2283 }; 2284 2285 class SignalHandlerMark: public StackObj { 2286 private: 2287 Thread* _thread; 2288 public: 2289 SignalHandlerMark(Thread* t) { 2290 _thread = t; 2291 if (_thread) _thread->enter_signal_handler(); 2292 } 2293 ~SignalHandlerMark() { 2294 if (_thread) _thread->leave_signal_handler(); 2295 _thread = NULL; 2296 } 2297 }; 2298 2299 2300 #endif // SHARE_VM_RUNTIME_THREAD_HPP