1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_THREAD_HPP 26 #define SHARE_VM_RUNTIME_THREAD_HPP 27 28 #include "gc/shared/threadLocalAllocBuffer.hpp" 29 #include "memory/allocation.hpp" 30 #include "oops/oop.hpp" 31 #include "prims/jni.h" 32 #include "prims/jvmtiExport.hpp" 33 #include "runtime/frame.hpp" 34 #include "runtime/javaFrameAnchor.hpp" 35 #include "runtime/jniHandles.hpp" 36 #include "runtime/mutexLocker.hpp" 37 #include "runtime/os.hpp" 38 #include "runtime/osThread.hpp" 39 #include "runtime/park.hpp" 40 #include "runtime/safepoint.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/threadLocalStorage.hpp" 43 #include "runtime/thread_ext.hpp" 44 #include "runtime/unhandledOops.hpp" 45 #include "trace/traceBackend.hpp" 46 #include "trace/traceMacros.hpp" 47 #include "utilities/align.hpp" 48 #include "utilities/exceptions.hpp" 49 #include "utilities/macros.hpp" 50 #if INCLUDE_ALL_GCS 51 #include "gc/g1/dirtyCardQueue.hpp" 52 #include "gc/g1/satbMarkQueue.hpp" 53 #endif // INCLUDE_ALL_GCS 54 #ifdef ZERO 55 # include "stack_zero.hpp" 56 #endif 57 58 class ThreadSafepointState; 59 class ThreadProfiler; 60 61 class JvmtiThreadState; 62 class JvmtiGetLoadedClassesClosure; 63 class ThreadStatistics; 64 class ConcurrentLocksDump; 65 class ParkEvent; 66 class Parker; 67 68 class ciEnv; 69 class CompileThread; 70 class CompileLog; 71 class CompileTask; 72 class CompileQueue; 73 class CompilerCounters; 74 class vframeArray; 75 76 class DeoptResourceMark; 77 class jvmtiDeferredLocalVariableSet; 78 79 class GCTaskQueue; 80 class ThreadClosure; 81 class IdealGraphPrinter; 82 83 class Metadata; 84 template <class T, MEMFLAGS F> class ChunkedList; 85 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer; 86 87 DEBUG_ONLY(class ResourceMark;) 88 89 class WorkerThread; 90 91 // Class hierarchy 92 // - Thread 93 // - NamedThread 94 // - VMThread 95 // - ConcurrentGCThread 96 // - WorkerThread 97 // - GangWorker 98 // - GCTaskThread 99 // - JavaThread 100 // - various subclasses eg CompilerThread, ServiceThread 101 // - WatcherThread 102 103 class Thread: public ThreadShadow { 104 friend class VMStructs; 105 friend class JVMCIVMStructs; 106 private: 107 108 #ifndef USE_LIBRARY_BASED_TLS_ONLY 109 // Current thread is maintained as a thread-local variable 110 static THREAD_LOCAL_DECL Thread* _thr_current; 111 #endif 112 113 // Exception handling 114 // (Note: _pending_exception and friends are in ThreadShadow) 115 //oop _pending_exception; // pending exception for current thread 116 // const char* _exception_file; // file information for exception (debugging only) 117 // int _exception_line; // line information for exception (debugging only) 118 protected: 119 // Support for forcing alignment of thread objects for biased locking 120 void* _real_malloc_address; 121 public: 122 void* operator new(size_t size) throw() { return allocate(size, true); } 123 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { 124 return allocate(size, false); } 125 void operator delete(void* p); 126 127 protected: 128 static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread); 129 private: 130 131 // *************************************************************** 132 // Suspend and resume support 133 // *************************************************************** 134 // 135 // VM suspend/resume no longer exists - it was once used for various 136 // things including safepoints but was deprecated and finally removed 137 // in Java 7. Because VM suspension was considered "internal" Java-level 138 // suspension was considered "external", and this legacy naming scheme 139 // remains. 140 // 141 // External suspend/resume requests come from JVM_SuspendThread, 142 // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI 143 // ResumeThread. External 144 // suspend requests cause _external_suspend to be set and external 145 // resume requests cause _external_suspend to be cleared. 146 // External suspend requests do not nest on top of other external 147 // suspend requests. The higher level APIs reject suspend requests 148 // for already suspended threads. 149 // 150 // The external_suspend 151 // flag is checked by has_special_runtime_exit_condition() and java thread 152 // will self-suspend when handle_special_runtime_exit_condition() is 153 // called. Most uses of the _thread_blocked state in JavaThreads are 154 // considered the same as being externally suspended; if the blocking 155 // condition lifts, the JavaThread will self-suspend. Other places 156 // where VM checks for external_suspend include: 157 // + mutex granting (do not enter monitors when thread is suspended) 158 // + state transitions from _thread_in_native 159 // 160 // In general, java_suspend() does not wait for an external suspend 161 // request to complete. When it returns, the only guarantee is that 162 // the _external_suspend field is true. 163 // 164 // wait_for_ext_suspend_completion() is used to wait for an external 165 // suspend request to complete. External suspend requests are usually 166 // followed by some other interface call that requires the thread to 167 // be quiescent, e.g., GetCallTrace(). By moving the "wait time" into 168 // the interface that requires quiescence, we give the JavaThread a 169 // chance to self-suspend before we need it to be quiescent. This 170 // improves overall suspend/query performance. 171 // 172 // _suspend_flags controls the behavior of java_ suspend/resume. 173 // It must be set under the protection of SR_lock. Read from the flag is 174 // OK without SR_lock as long as the value is only used as a hint. 175 // (e.g., check _external_suspend first without lock and then recheck 176 // inside SR_lock and finish the suspension) 177 // 178 // _suspend_flags is also overloaded for other "special conditions" so 179 // that a single check indicates whether any special action is needed 180 // eg. for async exceptions. 181 // ------------------------------------------------------------------- 182 // Notes: 183 // 1. The suspend/resume logic no longer uses ThreadState in OSThread 184 // but we still update its value to keep other part of the system (mainly 185 // JVMTI) happy. ThreadState is legacy code (see notes in 186 // osThread.hpp). 187 // 188 // 2. It would be more natural if set_external_suspend() is private and 189 // part of java_suspend(), but that probably would affect the suspend/query 190 // performance. Need more investigation on this. 191 192 // suspend/resume lock: used for self-suspend 193 Monitor* _SR_lock; 194 195 protected: 196 enum SuspendFlags { 197 // NOTE: avoid using the sign-bit as cc generates different test code 198 // when the sign-bit is used, and sometimes incorrectly - see CR 6398077 199 200 _external_suspend = 0x20000000U, // thread is asked to self suspend 201 _ext_suspended = 0x40000000U, // thread has self-suspended 202 _deopt_suspend = 0x10000000U, // thread needs to self suspend for deopt 203 204 _has_async_exception = 0x00000001U, // there is a pending async exception 205 _critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock 206 207 _trace_flag = 0x00000004U // call tracing backend 208 }; 209 210 // various suspension related flags - atomically updated 211 // overloaded for async exception checking in check_special_condition_for_native_trans. 212 volatile uint32_t _suspend_flags; 213 214 private: 215 int _num_nested_signal; 216 217 DEBUG_ONLY(bool _suspendible_thread;) 218 219 public: 220 void enter_signal_handler() { _num_nested_signal++; } 221 void leave_signal_handler() { _num_nested_signal--; } 222 bool is_inside_signal_handler() const { return _num_nested_signal > 0; } 223 224 #ifdef ASSERT 225 void set_suspendible_thread() { 226 _suspendible_thread = true; 227 } 228 229 void clear_suspendible_thread() { 230 _suspendible_thread = false; 231 } 232 233 bool is_suspendible_thread() { return _suspendible_thread; } 234 #endif 235 236 private: 237 // Active_handles points to a block of handles 238 JNIHandleBlock* _active_handles; 239 240 // One-element thread local free list 241 JNIHandleBlock* _free_handle_block; 242 243 // Point to the last handle mark 244 HandleMark* _last_handle_mark; 245 246 // The parity of the last strong_roots iteration in which this thread was 247 // claimed as a task. 248 jint _oops_do_parity; 249 250 public: 251 void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; } 252 HandleMark* last_handle_mark() const { return _last_handle_mark; } 253 private: 254 255 // debug support for checking if code does allow safepoints or not 256 // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on 257 // mutex, or blocking on an object synchronizer (Java locking). 258 // If !allow_safepoint(), then an assertion failure will happen in any of the above cases 259 // If !allow_allocation(), then an assertion failure will happen during allocation 260 // (Hence, !allow_safepoint() => !allow_allocation()). 261 // 262 // The two classes NoSafepointVerifier and No_Allocation_Verifier are used to set these counters. 263 // 264 NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen 265 debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops. 266 267 // Used by SkipGCALot class. 268 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot? 269 270 friend class NoAllocVerifier; 271 friend class NoSafepointVerifier; 272 friend class PauseNoSafepointVerifier; 273 friend class GCLocker; 274 275 ThreadLocalAllocBuffer _tlab; // Thread-local eden 276 jlong _allocated_bytes; // Cumulative number of bytes allocated on 277 // the Java heap 278 279 mutable TRACE_DATA _trace_data; // Thread-local data for tracing 280 281 ThreadExt _ext; 282 283 int _vm_operation_started_count; // VM_Operation support 284 int _vm_operation_completed_count; // VM_Operation support 285 286 ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread 287 // is waiting to lock 288 bool _current_pending_monitor_is_from_java; // locking is from Java code 289 290 // ObjectMonitor on which this thread called Object.wait() 291 ObjectMonitor* _current_waiting_monitor; 292 293 // Private thread-local objectmonitor list - a simple cache organized as a SLL. 294 public: 295 ObjectMonitor* omFreeList; 296 int omFreeCount; // length of omFreeList 297 int omFreeProvision; // reload chunk size 298 ObjectMonitor* omInUseList; // SLL to track monitors in circulation 299 int omInUseCount; // length of omInUseList 300 301 #ifdef ASSERT 302 private: 303 bool _visited_for_critical_count; 304 305 public: 306 void set_visited_for_critical_count(bool z) { _visited_for_critical_count = z; } 307 bool was_visited_for_critical_count() const { return _visited_for_critical_count; } 308 #endif 309 310 public: 311 enum { 312 is_definitely_current_thread = true 313 }; 314 315 // Constructor 316 Thread(); 317 virtual ~Thread(); 318 319 // Manage Thread::current() 320 void initialize_thread_current(); 321 void clear_thread_current(); // TLS cleanup needed before threads terminate 322 323 public: 324 // thread entry point 325 virtual void run(); 326 327 // Testers 328 virtual bool is_VM_thread() const { return false; } 329 virtual bool is_Java_thread() const { return false; } 330 virtual bool is_Compiler_thread() const { return false; } 331 virtual bool is_Code_cache_sweeper_thread() const { return false; } 332 virtual bool is_hidden_from_external_view() const { return false; } 333 virtual bool is_jvmti_agent_thread() const { return false; } 334 // True iff the thread can perform GC operations at a safepoint. 335 // Generally will be true only of VM thread and parallel GC WorkGang 336 // threads. 337 virtual bool is_GC_task_thread() const { return false; } 338 virtual bool is_Watcher_thread() const { return false; } 339 virtual bool is_ConcurrentGC_thread() const { return false; } 340 virtual bool is_Named_thread() const { return false; } 341 virtual bool is_Worker_thread() const { return false; } 342 343 // Can this thread make Java upcalls 344 virtual bool can_call_java() const { return false; } 345 346 // Casts 347 virtual WorkerThread* as_Worker_thread() const { return NULL; } 348 349 virtual char* name() const { return (char*)"Unknown thread"; } 350 351 // Returns the current thread (ASSERTS if NULL) 352 static inline Thread* current(); 353 // Returns the current thread, or NULL if not attached 354 static inline Thread* current_or_null(); 355 // Returns the current thread, or NULL if not attached, and is 356 // safe for use from signal-handlers 357 static inline Thread* current_or_null_safe(); 358 359 // Common thread operations 360 static void set_priority(Thread* thread, ThreadPriority priority); 361 static ThreadPriority get_priority(const Thread* const thread); 362 static void start(Thread* thread); 363 static void interrupt(Thread* thr); 364 static bool is_interrupted(Thread* thr, bool clear_interrupted); 365 366 void set_native_thread_name(const char *name) { 367 assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread"); 368 os::set_native_thread_name(name); 369 } 370 371 ObjectMonitor** omInUseList_addr() { return (ObjectMonitor **)&omInUseList; } 372 Monitor* SR_lock() const { return _SR_lock; } 373 374 bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; } 375 376 inline void set_suspend_flag(SuspendFlags f); 377 inline void clear_suspend_flag(SuspendFlags f); 378 379 inline void set_has_async_exception(); 380 inline void clear_has_async_exception(); 381 382 bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; } 383 384 inline void set_critical_native_unlock(); 385 inline void clear_critical_native_unlock(); 386 387 inline void set_trace_flag(); 388 inline void clear_trace_flag(); 389 390 // Support for Unhandled Oop detection 391 // Add the field for both, fastdebug and debug, builds to keep 392 // Thread's fields layout the same. 393 // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build. 394 #ifdef CHECK_UNHANDLED_OOPS 395 private: 396 UnhandledOops* _unhandled_oops; 397 #elif defined(ASSERT) 398 private: 399 void* _unhandled_oops; 400 #endif 401 #ifdef CHECK_UNHANDLED_OOPS 402 public: 403 UnhandledOops* unhandled_oops() { return _unhandled_oops; } 404 // Mark oop safe for gc. It may be stack allocated but won't move. 405 void allow_unhandled_oop(oop *op) { 406 if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op); 407 } 408 // Clear oops at safepoint so crashes point to unhandled oop violator 409 void clear_unhandled_oops() { 410 if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops(); 411 } 412 #endif // CHECK_UNHANDLED_OOPS 413 414 public: 415 #ifndef PRODUCT 416 bool skip_gcalot() { return _skip_gcalot; } 417 void set_skip_gcalot(bool v) { _skip_gcalot = v; } 418 #endif 419 420 // Installs a pending exception to be inserted later 421 static void send_async_exception(oop thread_oop, oop java_throwable); 422 423 // Resource area 424 ResourceArea* resource_area() const { return _resource_area; } 425 void set_resource_area(ResourceArea* area) { _resource_area = area; } 426 427 OSThread* osthread() const { return _osthread; } 428 void set_osthread(OSThread* thread) { _osthread = thread; } 429 430 // JNI handle support 431 JNIHandleBlock* active_handles() const { return _active_handles; } 432 void set_active_handles(JNIHandleBlock* block) { _active_handles = block; } 433 JNIHandleBlock* free_handle_block() const { return _free_handle_block; } 434 void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; } 435 436 // Internal handle support 437 HandleArea* handle_area() const { return _handle_area; } 438 void set_handle_area(HandleArea* area) { _handle_area = area; } 439 440 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; } 441 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; } 442 443 // Thread-Local Allocation Buffer (TLAB) support 444 ThreadLocalAllocBuffer& tlab() { return _tlab; } 445 void initialize_tlab() { 446 if (UseTLAB) { 447 tlab().initialize(); 448 } 449 } 450 451 jlong allocated_bytes() { return _allocated_bytes; } 452 void set_allocated_bytes(jlong value) { _allocated_bytes = value; } 453 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } 454 inline jlong cooked_allocated_bytes(); 455 456 TRACE_DEFINE_THREAD_TRACE_DATA_OFFSET; 457 TRACE_DATA* trace_data() const { return &_trace_data; } 458 bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; } 459 460 const ThreadExt& ext() const { return _ext; } 461 ThreadExt& ext() { return _ext; } 462 463 // VM operation support 464 int vm_operation_ticket() { return ++_vm_operation_started_count; } 465 int vm_operation_completed_count() { return _vm_operation_completed_count; } 466 void increment_vm_operation_completed_count() { _vm_operation_completed_count++; } 467 468 // For tracking the heavyweight monitor the thread is pending on. 469 ObjectMonitor* current_pending_monitor() { 470 return _current_pending_monitor; 471 } 472 void set_current_pending_monitor(ObjectMonitor* monitor) { 473 _current_pending_monitor = monitor; 474 } 475 void set_current_pending_monitor_is_from_java(bool from_java) { 476 _current_pending_monitor_is_from_java = from_java; 477 } 478 bool current_pending_monitor_is_from_java() { 479 return _current_pending_monitor_is_from_java; 480 } 481 482 // For tracking the ObjectMonitor on which this thread called Object.wait() 483 ObjectMonitor* current_waiting_monitor() { 484 return _current_waiting_monitor; 485 } 486 void set_current_waiting_monitor(ObjectMonitor* monitor) { 487 _current_waiting_monitor = monitor; 488 } 489 490 // GC support 491 // Apply "f->do_oop" to all root oops in "this". 492 // Used by JavaThread::oops_do. 493 // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames 494 virtual void oops_do(OopClosure* f, CodeBlobClosure* cf); 495 496 // Handles the parallel case for the method below. 497 private: 498 bool claim_oops_do_par_case(int collection_parity); 499 public: 500 // Requires that "collection_parity" is that of the current roots 501 // iteration. If "is_par" is false, sets the parity of "this" to 502 // "collection_parity", and returns "true". If "is_par" is true, 503 // uses an atomic instruction to set the current threads parity to 504 // "collection_parity", if it is not already. Returns "true" iff the 505 // calling thread does the update, this indicates that the calling thread 506 // has claimed the thread's stack as a root groop in the current 507 // collection. 508 bool claim_oops_do(bool is_par, int collection_parity) { 509 if (!is_par) { 510 _oops_do_parity = collection_parity; 511 return true; 512 } else { 513 return claim_oops_do_par_case(collection_parity); 514 } 515 } 516 517 // jvmtiRedefineClasses support 518 void metadata_handles_do(void f(Metadata*)); 519 520 // Used by fast lock support 521 virtual bool is_lock_owned(address adr) const; 522 523 // Check if address is in the stack of the thread (not just for locks). 524 // Warning: the method can only be used on the running thread 525 bool is_in_stack(address adr) const; 526 // Check if address is in the usable part of the stack (excludes protected 527 // guard pages) 528 bool is_in_usable_stack(address adr) const; 529 530 // Sets this thread as starting thread. Returns failure if thread 531 // creation fails due to lack of memory, too many threads etc. 532 bool set_as_starting_thread(); 533 534 protected: 535 // OS data associated with the thread 536 OSThread* _osthread; // Platform-specific thread information 537 538 // Thread local resource area for temporary allocation within the VM 539 ResourceArea* _resource_area; 540 541 DEBUG_ONLY(ResourceMark* _current_resource_mark;) 542 543 // Thread local handle area for allocation of handles within the VM 544 HandleArea* _handle_area; 545 GrowableArray<Metadata*>* _metadata_handles; 546 547 // Support for stack overflow handling, get_thread, etc. 548 address _stack_base; 549 size_t _stack_size; 550 uintptr_t _self_raw_id; // used by get_thread (mutable) 551 int _lgrp_id; 552 553 public: 554 // Stack overflow support 555 address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } 556 void set_stack_base(address base) { _stack_base = base; } 557 size_t stack_size() const { return _stack_size; } 558 void set_stack_size(size_t size) { _stack_size = size; } 559 address stack_end() const { return stack_base() - stack_size(); } 560 void record_stack_base_and_size(); 561 562 bool on_local_stack(address adr) const { 563 // QQQ this has knowledge of direction, ought to be a stack method 564 return (_stack_base >= adr && adr >= stack_end()); 565 } 566 567 uintptr_t self_raw_id() { return _self_raw_id; } 568 void set_self_raw_id(uintptr_t value) { _self_raw_id = value; } 569 570 int lgrp_id() const { return _lgrp_id; } 571 void set_lgrp_id(int value) { _lgrp_id = value; } 572 573 // Printing 574 virtual void print_on(outputStream* st) const; 575 void print() const { print_on(tty); } 576 virtual void print_on_error(outputStream* st, char* buf, int buflen) const; 577 578 // Debug-only code 579 #ifdef ASSERT 580 private: 581 // Deadlock detection support for Mutex locks. List of locks own by thread. 582 Monitor* _owned_locks; 583 // Mutex::set_owner_implementation is the only place where _owned_locks is modified, 584 // thus the friendship 585 friend class Mutex; 586 friend class Monitor; 587 588 public: 589 void print_owned_locks_on(outputStream* st) const; 590 void print_owned_locks() const { print_owned_locks_on(tty); } 591 Monitor* owned_locks() const { return _owned_locks; } 592 bool owns_locks() const { return owned_locks() != NULL; } 593 bool owns_locks_but_compiled_lock() const; 594 int oops_do_parity() const { return _oops_do_parity; } 595 596 // Deadlock detection 597 bool allow_allocation() { return _allow_allocation_count == 0; } 598 ResourceMark* current_resource_mark() { return _current_resource_mark; } 599 void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; } 600 #endif 601 602 void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN; 603 604 private: 605 volatile int _jvmti_env_iteration_count; 606 607 public: 608 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; } 609 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; } 610 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; } 611 612 // Code generation 613 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); } 614 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); } 615 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); } 616 617 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); } 618 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); } 619 620 #define TLAB_FIELD_OFFSET(name) \ 621 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); } 622 623 TLAB_FIELD_OFFSET(start) 624 TLAB_FIELD_OFFSET(end) 625 TLAB_FIELD_OFFSET(top) 626 TLAB_FIELD_OFFSET(pf_top) 627 TLAB_FIELD_OFFSET(size) // desired_size 628 TLAB_FIELD_OFFSET(refill_waste_limit) 629 TLAB_FIELD_OFFSET(number_of_refills) 630 TLAB_FIELD_OFFSET(fast_refill_waste) 631 TLAB_FIELD_OFFSET(slow_allocations) 632 633 #undef TLAB_FIELD_OFFSET 634 635 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); } 636 637 public: 638 volatile intptr_t _Stalled; 639 volatile int _TypeTag; 640 ParkEvent * _ParkEvent; // for synchronized() 641 ParkEvent * _SleepEvent; // for Thread.sleep 642 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor 643 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease 644 int NativeSyncRecursion; // diagnostic 645 646 volatile int _OnTrap; // Resume-at IP delta 647 jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG 648 jint _hashStateX; // thread-specific hashCode generator state 649 jint _hashStateY; 650 jint _hashStateZ; 651 void * _schedctl; 652 653 654 volatile jint rng[4]; // RNG for spin loop 655 656 // Low-level leaf-lock primitives used to implement synchronization 657 // and native monitor-mutex infrastructure. 658 // Not for general synchronization use. 659 static void SpinAcquire(volatile int * Lock, const char * Name); 660 static void SpinRelease(volatile int * Lock); 661 static void muxAcquire(volatile intptr_t * Lock, const char * Name); 662 static void muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev); 663 static void muxRelease(volatile intptr_t * Lock); 664 }; 665 666 // Inline implementation of Thread::current() 667 inline Thread* Thread::current() { 668 Thread* current = current_or_null(); 669 assert(current != NULL, "Thread::current() called on detached thread"); 670 return current; 671 } 672 673 inline Thread* Thread::current_or_null() { 674 #ifndef USE_LIBRARY_BASED_TLS_ONLY 675 return _thr_current; 676 #else 677 return ThreadLocalStorage::thread(); 678 #endif 679 } 680 681 inline Thread* Thread::current_or_null_safe() { 682 return ThreadLocalStorage::thread(); 683 } 684 685 // Name support for threads. non-JavaThread subclasses with multiple 686 // uniquely named instances should derive from this. 687 class NamedThread: public Thread { 688 friend class VMStructs; 689 enum { 690 max_name_len = 64 691 }; 692 private: 693 char* _name; 694 // log JavaThread being processed by oops_do 695 JavaThread* _processed_thread; 696 uint _gc_id; // The current GC id when a thread takes part in GC 697 698 public: 699 NamedThread(); 700 ~NamedThread(); 701 // May only be called once per thread. 702 void set_name(const char* format, ...) ATTRIBUTE_PRINTF(2, 3); 703 void initialize_named_thread(); 704 virtual bool is_Named_thread() const { return true; } 705 virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; } 706 JavaThread *processed_thread() { return _processed_thread; } 707 void set_processed_thread(JavaThread *thread) { _processed_thread = thread; } 708 virtual void print_on(outputStream* st) const; 709 710 void set_gc_id(uint gc_id) { _gc_id = gc_id; } 711 uint gc_id() { return _gc_id; } 712 }; 713 714 // Worker threads are named and have an id of an assigned work. 715 class WorkerThread: public NamedThread { 716 private: 717 uint _id; 718 public: 719 WorkerThread() : _id(0) { } 720 virtual bool is_Worker_thread() const { return true; } 721 722 virtual WorkerThread* as_Worker_thread() const { 723 assert(is_Worker_thread(), "Dubious cast to WorkerThread*?"); 724 return (WorkerThread*) this; 725 } 726 727 void set_id(uint work_id) { _id = work_id; } 728 uint id() const { return _id; } 729 }; 730 731 // A single WatcherThread is used for simulating timer interrupts. 732 class WatcherThread: public Thread { 733 friend class VMStructs; 734 public: 735 virtual void run(); 736 737 private: 738 static WatcherThread* _watcher_thread; 739 740 static bool _startable; 741 // volatile due to at least one lock-free read 742 volatile static bool _should_terminate; 743 public: 744 enum SomeConstants { 745 delay_interval = 10 // interrupt delay in milliseconds 746 }; 747 748 // Constructor 749 WatcherThread(); 750 751 // No destruction allowed 752 ~WatcherThread() { 753 guarantee(false, "WatcherThread deletion must fix the race with VM termination"); 754 } 755 756 // Tester 757 bool is_Watcher_thread() const { return true; } 758 759 // Printing 760 char* name() const { return (char*)"VM Periodic Task Thread"; } 761 void print_on(outputStream* st) const; 762 void unpark(); 763 764 // Returns the single instance of WatcherThread 765 static WatcherThread* watcher_thread() { return _watcher_thread; } 766 767 // Create and start the single instance of WatcherThread, or stop it on shutdown 768 static void start(); 769 static void stop(); 770 // Only allow start once the VM is sufficiently initialized 771 // Otherwise the first task to enroll will trigger the start 772 static void make_startable(); 773 private: 774 int sleep() const; 775 }; 776 777 778 class CompilerThread; 779 780 typedef void (*ThreadFunction)(JavaThread*, TRAPS); 781 782 class JavaThread: public Thread { 783 friend class VMStructs; 784 friend class JVMCIVMStructs; 785 friend class WhiteBox; 786 private: 787 JavaThread* _next; // The next thread in the Threads list 788 oop _threadObj; // The Java level thread object 789 790 #ifdef ASSERT 791 private: 792 int _java_call_counter; 793 794 public: 795 int java_call_counter() { return _java_call_counter; } 796 void inc_java_call_counter() { _java_call_counter++; } 797 void dec_java_call_counter() { 798 assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper"); 799 _java_call_counter--; 800 } 801 private: // restore original namespace restriction 802 #endif // ifdef ASSERT 803 804 #ifndef PRODUCT 805 public: 806 enum { 807 jump_ring_buffer_size = 16 808 }; 809 private: // restore original namespace restriction 810 #endif 811 812 JavaFrameAnchor _anchor; // Encapsulation of current java frame and it state 813 814 ThreadFunction _entry_point; 815 816 JNIEnv _jni_environment; 817 818 // Deopt support 819 DeoptResourceMark* _deopt_mark; // Holds special ResourceMark for deoptimization 820 821 intptr_t* _must_deopt_id; // id of frame that needs to be deopted once we 822 // transition out of native 823 CompiledMethod* _deopt_nmethod; // CompiledMethod that is currently being deoptimized 824 vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays 825 vframeArray* _vframe_array_last; // Holds last vFrameArray we popped 826 // Because deoptimization is lazy we must save jvmti requests to set locals 827 // in compiled frames until we deoptimize and we have an interpreter frame. 828 // This holds the pointer to array (yeah like there might be more than one) of 829 // description of compiled vframes that have locals that need to be updated. 830 GrowableArray<jvmtiDeferredLocalVariableSet*>* _deferred_locals_updates; 831 832 // Handshake value for fixing 6243940. We need a place for the i2c 833 // adapter to store the callee Method*. This value is NEVER live 834 // across a gc point so it does NOT have to be gc'd 835 // The handshake is open ended since we can't be certain that it will 836 // be NULLed. This is because we rarely ever see the race and end up 837 // in handle_wrong_method which is the backend of the handshake. See 838 // code in i2c adapters and handle_wrong_method. 839 840 Method* _callee_target; 841 842 // Used to pass back results to the interpreter or generated code running Java code. 843 oop _vm_result; // oop result is GC-preserved 844 Metadata* _vm_result_2; // non-oop result 845 846 // See ReduceInitialCardMarks: this holds the precise space interval of 847 // the most recent slow path allocation for which compiled code has 848 // elided card-marks for performance along the fast-path. 849 MemRegion _deferred_card_mark; 850 851 MonitorChunk* _monitor_chunks; // Contains the off stack monitors 852 // allocated during deoptimization 853 // and by JNI_MonitorEnter/Exit 854 855 // Async. requests support 856 enum AsyncRequests { 857 _no_async_condition = 0, 858 _async_exception, 859 _async_unsafe_access_error 860 }; 861 AsyncRequests _special_runtime_exit_condition; // Enum indicating pending async. request 862 oop _pending_async_exception; 863 864 // Safepoint support 865 public: // Expose _thread_state for SafeFetchInt() 866 volatile JavaThreadState _thread_state; 867 private: 868 ThreadSafepointState *_safepoint_state; // Holds information about a thread during a safepoint 869 address _saved_exception_pc; // Saved pc of instruction where last implicit exception happened 870 871 // JavaThread termination support 872 enum TerminatedTypes { 873 _not_terminated = 0xDEAD - 2, 874 _thread_exiting, // JavaThread::exit() has been called for this thread 875 _thread_terminated, // JavaThread is removed from thread list 876 _vm_exited // JavaThread is still executing native code, but VM is terminated 877 // only VM_Exit can set _vm_exited 878 }; 879 880 // In general a JavaThread's _terminated field transitions as follows: 881 // 882 // _not_terminated => _thread_exiting => _thread_terminated 883 // 884 // _vm_exited is a special value to cover the case of a JavaThread 885 // executing native code after the VM itself is terminated. 886 volatile TerminatedTypes _terminated; 887 // suspend/resume support 888 volatile bool _suspend_equivalent; // Suspend equivalent condition 889 jint _in_deopt_handler; // count of deoptimization 890 // handlers thread is in 891 volatile bool _doing_unsafe_access; // Thread may fault due to unsafe access 892 bool _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was 893 // never locked) when throwing an exception. Used by interpreter only. 894 895 // JNI attach states: 896 enum JNIAttachStates { 897 _not_attaching_via_jni = 1, // thread is not attaching via JNI 898 _attaching_via_jni, // thread is attaching via JNI 899 _attached_via_jni // thread has attached via JNI 900 }; 901 902 // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni. 903 // A native thread that is attaching via JNI starts with a value 904 // of _attaching_via_jni and transitions to _attached_via_jni. 905 volatile JNIAttachStates _jni_attach_state; 906 907 public: 908 // State of the stack guard pages for this thread. 909 enum StackGuardState { 910 stack_guard_unused, // not needed 911 stack_guard_reserved_disabled, 912 stack_guard_yellow_reserved_disabled,// disabled (temporarily) after stack overflow 913 stack_guard_enabled // enabled 914 }; 915 916 private: 917 918 #if INCLUDE_JVMCI 919 // The _pending_* fields below are used to communicate extra information 920 // from an uncommon trap in JVMCI compiled code to the uncommon trap handler. 921 922 // Communicates the DeoptReason and DeoptAction of the uncommon trap 923 int _pending_deoptimization; 924 925 // Specifies whether the uncommon trap is to bci 0 of a synchronized method 926 // before the monitor has been acquired. 927 bool _pending_monitorenter; 928 929 // Specifies if the DeoptReason for the last uncommon trap was Reason_transfer_to_interpreter 930 bool _pending_transfer_to_interpreter; 931 932 // Guard for re-entrant call to JVMCIRuntime::adjust_comp_level 933 bool _adjusting_comp_level; 934 935 // An object that JVMCI compiled code can use to further describe and 936 // uniquely identify the speculative optimization guarded by the uncommon trap 937 oop _pending_failed_speculation; 938 939 // These fields are mutually exclusive in terms of live ranges. 940 union { 941 // Communicates the pc at which the most recent implicit exception occurred 942 // from the signal handler to a deoptimization stub. 943 address _implicit_exception_pc; 944 945 // Communicates an alternative call target to an i2c stub from a JavaCall . 946 address _alternate_call_target; 947 } _jvmci; 948 949 // Support for high precision, thread sensitive counters in JVMCI compiled code. 950 jlong* _jvmci_counters; 951 952 public: 953 static jlong* _jvmci_old_thread_counters; 954 static void collect_counters(typeArrayOop array); 955 private: 956 #endif // INCLUDE_JVMCI 957 958 StackGuardState _stack_guard_state; 959 960 // Precompute the limit of the stack as used in stack overflow checks. 961 // We load it from here to simplify the stack overflow check in assembly. 962 address _stack_overflow_limit; 963 address _reserved_stack_activation; 964 965 // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is 966 // used to temp. parsing values into and out of the runtime system during exception handling for compiled 967 // code) 968 volatile oop _exception_oop; // Exception thrown in compiled code 969 volatile address _exception_pc; // PC where exception happened 970 volatile address _exception_handler_pc; // PC for handler of exception 971 volatile int _is_method_handle_return; // true (== 1) if the current exception PC is a MethodHandle call site. 972 973 private: 974 // support for JNI critical regions 975 jint _jni_active_critical; // count of entries into JNI critical region 976 977 // Checked JNI: function name requires exception check 978 char* _pending_jni_exception_check_fn; 979 980 // For deadlock detection. 981 int _depth_first_number; 982 983 // JVMTI PopFrame support 984 // This is set to popframe_pending to signal that top Java frame should be popped immediately 985 int _popframe_condition; 986 987 // If reallocation of scalar replaced objects fails, we throw OOM 988 // and during exception propagation, pop the top 989 // _frames_to_pop_failed_realloc frames, the ones that reference 990 // failed reallocations. 991 int _frames_to_pop_failed_realloc; 992 993 #ifndef PRODUCT 994 int _jmp_ring_index; 995 struct { 996 // We use intptr_t instead of address so debugger doesn't try and display strings 997 intptr_t _target; 998 intptr_t _instruction; 999 const char* _file; 1000 int _line; 1001 } _jmp_ring[jump_ring_buffer_size]; 1002 #endif // PRODUCT 1003 1004 #if INCLUDE_ALL_GCS 1005 // Support for G1 barriers 1006 1007 SATBMarkQueue _satb_mark_queue; // Thread-local log for SATB barrier. 1008 // Set of all such queues. 1009 static SATBMarkQueueSet _satb_mark_queue_set; 1010 1011 DirtyCardQueue _dirty_card_queue; // Thread-local log for dirty cards. 1012 // Set of all such queues. 1013 static DirtyCardQueueSet _dirty_card_queue_set; 1014 1015 void flush_barrier_queues(); 1016 #endif // INCLUDE_ALL_GCS 1017 1018 friend class VMThread; 1019 friend class ThreadWaitTransition; 1020 friend class VM_Exit; 1021 1022 void initialize(); // Initialized the instance variables 1023 1024 public: 1025 // Constructor 1026 JavaThread(bool is_attaching_via_jni = false); // for main thread and JNI attached threads 1027 JavaThread(ThreadFunction entry_point, size_t stack_size = 0); 1028 ~JavaThread(); 1029 1030 #ifdef ASSERT 1031 // verify this JavaThread hasn't be published in the Threads::list yet 1032 void verify_not_published(); 1033 #endif 1034 1035 //JNI functiontable getter/setter for JVMTI jni function table interception API. 1036 void set_jni_functions(struct JNINativeInterface_* functionTable) { 1037 _jni_environment.functions = functionTable; 1038 } 1039 struct JNINativeInterface_* get_jni_functions() { 1040 return (struct JNINativeInterface_ *)_jni_environment.functions; 1041 } 1042 1043 // This function is called at thread creation to allow 1044 // platform specific thread variables to be initialized. 1045 void cache_global_variables(); 1046 1047 // Executes Shutdown.shutdown() 1048 void invoke_shutdown_hooks(); 1049 1050 // Cleanup on thread exit 1051 enum ExitType { 1052 normal_exit, 1053 jni_detach 1054 }; 1055 void exit(bool destroy_vm, ExitType exit_type = normal_exit); 1056 1057 void cleanup_failed_attach_current_thread(); 1058 1059 // Testers 1060 virtual bool is_Java_thread() const { return true; } 1061 virtual bool can_call_java() const { return true; } 1062 1063 // Thread chain operations 1064 JavaThread* next() const { return _next; } 1065 void set_next(JavaThread* p) { _next = p; } 1066 1067 // Thread oop. threadObj() can be NULL for initial JavaThread 1068 // (or for threads attached via JNI) 1069 oop threadObj() const { return _threadObj; } 1070 void set_threadObj(oop p) { _threadObj = p; } 1071 1072 ThreadPriority java_priority() const; // Read from threadObj() 1073 1074 // Prepare thread and add to priority queue. If a priority is 1075 // not specified, use the priority of the thread object. Threads_lock 1076 // must be held while this function is called. 1077 void prepare(jobject jni_thread, ThreadPriority prio=NoPriority); 1078 void prepare_ext(); 1079 1080 void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; } 1081 address saved_exception_pc() { return _saved_exception_pc; } 1082 1083 1084 ThreadFunction entry_point() const { return _entry_point; } 1085 1086 // Allocates a new Java level thread object for this thread. thread_name may be NULL. 1087 void allocate_threadObj(Handle thread_group, const char* thread_name, bool daemon, TRAPS); 1088 1089 // Last frame anchor routines 1090 1091 JavaFrameAnchor* frame_anchor(void) { return &_anchor; } 1092 1093 // last_Java_sp 1094 bool has_last_Java_frame() const { return _anchor.has_last_Java_frame(); } 1095 intptr_t* last_Java_sp() const { return _anchor.last_Java_sp(); } 1096 1097 // last_Java_pc 1098 1099 address last_Java_pc(void) { return _anchor.last_Java_pc(); } 1100 1101 // Safepoint support 1102 #if !(defined(PPC64) || defined(AARCH64)) 1103 JavaThreadState thread_state() const { return _thread_state; } 1104 void set_thread_state(JavaThreadState s) { _thread_state = s; } 1105 #else 1106 // Use membars when accessing volatile _thread_state. See 1107 // Threads::create_vm() for size checks. 1108 inline JavaThreadState thread_state() const; 1109 inline void set_thread_state(JavaThreadState s); 1110 #endif 1111 ThreadSafepointState *safepoint_state() const { return _safepoint_state; } 1112 void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; } 1113 bool is_at_poll_safepoint() { return _safepoint_state->is_at_poll_safepoint(); } 1114 1115 // thread has called JavaThread::exit() or is terminated 1116 bool is_exiting() { return _terminated == _thread_exiting || is_terminated(); } 1117 // thread is terminated (no longer on the threads list); we compare 1118 // against the two non-terminated values so that a freed JavaThread 1119 // will also be considered terminated. 1120 bool is_terminated() { return _terminated != _not_terminated && _terminated != _thread_exiting; } 1121 void set_terminated(TerminatedTypes t) { _terminated = t; } 1122 // special for Threads::remove() which is static: 1123 void set_terminated_value() { _terminated = _thread_terminated; } 1124 void block_if_vm_exited(); 1125 1126 bool doing_unsafe_access() { return _doing_unsafe_access; } 1127 void set_doing_unsafe_access(bool val) { _doing_unsafe_access = val; } 1128 1129 bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; } 1130 void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; } 1131 1132 // Suspend/resume support for JavaThread 1133 private: 1134 inline void set_ext_suspended(); 1135 inline void clear_ext_suspended(); 1136 1137 public: 1138 void java_suspend(); 1139 void java_resume(); 1140 int java_suspend_self(); 1141 1142 void check_and_wait_while_suspended() { 1143 assert(JavaThread::current() == this, "sanity check"); 1144 1145 bool do_self_suspend; 1146 do { 1147 // were we externally suspended while we were waiting? 1148 do_self_suspend = handle_special_suspend_equivalent_condition(); 1149 if (do_self_suspend) { 1150 // don't surprise the thread that suspended us by returning 1151 java_suspend_self(); 1152 set_suspend_equivalent(); 1153 } 1154 } while (do_self_suspend); 1155 } 1156 static void check_safepoint_and_suspend_for_native_trans(JavaThread *thread); 1157 // Check for async exception in addition to safepoint and suspend request. 1158 static void check_special_condition_for_native_trans(JavaThread *thread); 1159 1160 // Same as check_special_condition_for_native_trans but finishes the 1161 // transition into thread_in_Java mode so that it can potentially 1162 // block. 1163 static void check_special_condition_for_native_trans_and_transition(JavaThread *thread); 1164 1165 bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits); 1166 bool is_ext_suspend_completed_with_lock(uint32_t *bits) { 1167 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1168 // Warning: is_ext_suspend_completed() may temporarily drop the 1169 // SR_lock to allow the thread to reach a stable thread state if 1170 // it is currently in a transient thread state. 1171 return is_ext_suspend_completed(false /* !called_by_wait */, 1172 SuspendRetryDelay, bits); 1173 } 1174 1175 // We cannot allow wait_for_ext_suspend_completion() to run forever or 1176 // we could hang. SuspendRetryCount and SuspendRetryDelay are normally 1177 // passed as the count and delay parameters. Experiments with specific 1178 // calls to wait_for_ext_suspend_completion() can be done by passing 1179 // other values in the code. Experiments with all calls can be done 1180 // via the appropriate -XX options. 1181 bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits); 1182 1183 inline void set_external_suspend(); 1184 inline void clear_external_suspend(); 1185 1186 inline void set_deopt_suspend(); 1187 inline void clear_deopt_suspend(); 1188 bool is_deopt_suspend() { return (_suspend_flags & _deopt_suspend) != 0; } 1189 1190 bool is_external_suspend() const { 1191 return (_suspend_flags & _external_suspend) != 0; 1192 } 1193 // Whenever a thread transitions from native to vm/java it must suspend 1194 // if external|deopt suspend is present. 1195 bool is_suspend_after_native() const { 1196 return (_suspend_flags & (_external_suspend | _deopt_suspend)) != 0; 1197 } 1198 1199 // external suspend request is completed 1200 bool is_ext_suspended() const { 1201 return (_suspend_flags & _ext_suspended) != 0; 1202 } 1203 1204 bool is_external_suspend_with_lock() const { 1205 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1206 return is_external_suspend(); 1207 } 1208 1209 // Special method to handle a pending external suspend request 1210 // when a suspend equivalent condition lifts. 1211 bool handle_special_suspend_equivalent_condition() { 1212 assert(is_suspend_equivalent(), 1213 "should only be called in a suspend equivalence condition"); 1214 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1215 bool ret = is_external_suspend(); 1216 if (!ret) { 1217 // not about to self-suspend so clear suspend equivalence 1218 clear_suspend_equivalent(); 1219 } 1220 // implied else: 1221 // We have a pending external suspend request so we leave the 1222 // suspend_equivalent flag set until java_suspend_self() sets 1223 // the ext_suspended flag and clears the suspend_equivalent 1224 // flag. This insures that wait_for_ext_suspend_completion() 1225 // will return consistent values. 1226 return ret; 1227 } 1228 1229 // utility methods to see if we are doing some kind of suspension 1230 bool is_being_ext_suspended() const { 1231 MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1232 return is_ext_suspended() || is_external_suspend(); 1233 } 1234 1235 bool is_suspend_equivalent() const { return _suspend_equivalent; } 1236 1237 void set_suspend_equivalent() { _suspend_equivalent = true; } 1238 void clear_suspend_equivalent() { _suspend_equivalent = false; } 1239 1240 // Thread.stop support 1241 void send_thread_stop(oop throwable); 1242 AsyncRequests clear_special_runtime_exit_condition() { 1243 AsyncRequests x = _special_runtime_exit_condition; 1244 _special_runtime_exit_condition = _no_async_condition; 1245 return x; 1246 } 1247 1248 // Are any async conditions present? 1249 bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); } 1250 1251 void check_and_handle_async_exceptions(bool check_unsafe_error = true); 1252 1253 // these next two are also used for self-suspension and async exception support 1254 void handle_special_runtime_exit_condition(bool check_asyncs = true); 1255 1256 // Return true if JavaThread has an asynchronous condition or 1257 // if external suspension is requested. 1258 bool has_special_runtime_exit_condition() { 1259 // Because we don't use is_external_suspend_with_lock 1260 // it is possible that we won't see an asynchronous external suspend 1261 // request that has just gotten started, i.e., SR_lock grabbed but 1262 // _external_suspend field change either not made yet or not visible 1263 // yet. However, this is okay because the request is asynchronous and 1264 // we will see the new flag value the next time through. It's also 1265 // possible that the external suspend request is dropped after 1266 // we have checked is_external_suspend(), we will recheck its value 1267 // under SR_lock in java_suspend_self(). 1268 return (_special_runtime_exit_condition != _no_async_condition) || 1269 is_external_suspend() || is_deopt_suspend() || is_trace_suspend(); 1270 } 1271 1272 void set_pending_unsafe_access_error() { _special_runtime_exit_condition = _async_unsafe_access_error; } 1273 1274 inline void set_pending_async_exception(oop e); 1275 1276 // Fast-locking support 1277 bool is_lock_owned(address adr) const; 1278 1279 // Accessors for vframe array top 1280 // The linked list of vframe arrays are sorted on sp. This means when we 1281 // unpack the head must contain the vframe array to unpack. 1282 void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; } 1283 vframeArray* vframe_array_head() const { return _vframe_array_head; } 1284 1285 // Side structure for deferring update of java frame locals until deopt occurs 1286 GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; } 1287 void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; } 1288 1289 // These only really exist to make debugging deopt problems simpler 1290 1291 void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; } 1292 vframeArray* vframe_array_last() const { return _vframe_array_last; } 1293 1294 // The special resourceMark used during deoptimization 1295 1296 void set_deopt_mark(DeoptResourceMark* value) { _deopt_mark = value; } 1297 DeoptResourceMark* deopt_mark(void) { return _deopt_mark; } 1298 1299 intptr_t* must_deopt_id() { return _must_deopt_id; } 1300 void set_must_deopt_id(intptr_t* id) { _must_deopt_id = id; } 1301 void clear_must_deopt_id() { _must_deopt_id = NULL; } 1302 1303 void set_deopt_compiled_method(CompiledMethod* nm) { _deopt_nmethod = nm; } 1304 CompiledMethod* deopt_compiled_method() { return _deopt_nmethod; } 1305 1306 Method* callee_target() const { return _callee_target; } 1307 void set_callee_target (Method* x) { _callee_target = x; } 1308 1309 // Oop results of vm runtime calls 1310 oop vm_result() const { return _vm_result; } 1311 void set_vm_result (oop x) { _vm_result = x; } 1312 1313 Metadata* vm_result_2() const { return _vm_result_2; } 1314 void set_vm_result_2 (Metadata* x) { _vm_result_2 = x; } 1315 1316 MemRegion deferred_card_mark() const { return _deferred_card_mark; } 1317 void set_deferred_card_mark(MemRegion mr) { _deferred_card_mark = mr; } 1318 1319 #if INCLUDE_JVMCI 1320 int pending_deoptimization() const { return _pending_deoptimization; } 1321 oop pending_failed_speculation() const { return _pending_failed_speculation; } 1322 bool adjusting_comp_level() const { return _adjusting_comp_level; } 1323 void set_adjusting_comp_level(bool b) { _adjusting_comp_level = b; } 1324 bool has_pending_monitorenter() const { return _pending_monitorenter; } 1325 void set_pending_monitorenter(bool b) { _pending_monitorenter = b; } 1326 void set_pending_deoptimization(int reason) { _pending_deoptimization = reason; } 1327 void set_pending_failed_speculation(oop failed_speculation) { _pending_failed_speculation = failed_speculation; } 1328 void set_pending_transfer_to_interpreter(bool b) { _pending_transfer_to_interpreter = b; } 1329 void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == NULL, "must be"); _jvmci._alternate_call_target = a; } 1330 void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == NULL, "must be"); _jvmci._implicit_exception_pc = a; } 1331 #endif // INCLUDE_JVMCI 1332 1333 // Exception handling for compiled methods 1334 oop exception_oop() const { return _exception_oop; } 1335 address exception_pc() const { return _exception_pc; } 1336 address exception_handler_pc() const { return _exception_handler_pc; } 1337 bool is_method_handle_return() const { return _is_method_handle_return == 1; } 1338 1339 void set_exception_oop(oop o) { (void)const_cast<oop&>(_exception_oop = o); } 1340 void set_exception_pc(address a) { _exception_pc = a; } 1341 void set_exception_handler_pc(address a) { _exception_handler_pc = a; } 1342 void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; } 1343 1344 void clear_exception_oop_and_pc() { 1345 set_exception_oop(NULL); 1346 set_exception_pc(NULL); 1347 } 1348 1349 // Stack overflow support 1350 // 1351 // (small addresses) 1352 // 1353 // -- <-- stack_end() --- 1354 // | | 1355 // | red pages | 1356 // | | 1357 // -- <-- stack_red_zone_base() | 1358 // | | 1359 // | guard 1360 // | yellow pages zone 1361 // | | 1362 // | | 1363 // -- <-- stack_yellow_zone_base() | 1364 // | | 1365 // | | 1366 // | reserved pages | 1367 // | | 1368 // -- <-- stack_reserved_zone_base() --- --- 1369 // /|\ shadow <-- stack_overflow_limit() (somewhere in here) 1370 // | zone 1371 // \|/ size 1372 // some untouched memory --- 1373 // 1374 // 1375 // -- 1376 // | 1377 // | shadow zone 1378 // | 1379 // -- 1380 // x frame n 1381 // -- 1382 // x frame n-1 1383 // x 1384 // -- 1385 // ... 1386 // 1387 // -- 1388 // x frame 0 1389 // -- <-- stack_base() 1390 // 1391 // (large addresses) 1392 // 1393 1394 private: 1395 // These values are derived from flags StackRedPages, StackYellowPages, 1396 // StackReservedPages and StackShadowPages. The zone size is determined 1397 // ergonomically if page_size > 4K. 1398 static size_t _stack_red_zone_size; 1399 static size_t _stack_yellow_zone_size; 1400 static size_t _stack_reserved_zone_size; 1401 static size_t _stack_shadow_zone_size; 1402 public: 1403 inline size_t stack_available(address cur_sp); 1404 1405 static size_t stack_red_zone_size() { 1406 assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized."); 1407 return _stack_red_zone_size; 1408 } 1409 static void set_stack_red_zone_size(size_t s) { 1410 assert(is_aligned(s, os::vm_page_size()), 1411 "We can not protect if the red zone size is not page aligned."); 1412 assert(_stack_red_zone_size == 0, "This should be called only once."); 1413 _stack_red_zone_size = s; 1414 } 1415 address stack_red_zone_base() { 1416 return (address)(stack_end() + stack_red_zone_size()); 1417 } 1418 bool in_stack_red_zone(address a) { 1419 return a <= stack_red_zone_base() && a >= stack_end(); 1420 } 1421 1422 static size_t stack_yellow_zone_size() { 1423 assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized."); 1424 return _stack_yellow_zone_size; 1425 } 1426 static void set_stack_yellow_zone_size(size_t s) { 1427 assert(is_aligned(s, os::vm_page_size()), 1428 "We can not protect if the yellow zone size is not page aligned."); 1429 assert(_stack_yellow_zone_size == 0, "This should be called only once."); 1430 _stack_yellow_zone_size = s; 1431 } 1432 1433 static size_t stack_reserved_zone_size() { 1434 // _stack_reserved_zone_size may be 0. This indicates the feature is off. 1435 return _stack_reserved_zone_size; 1436 } 1437 static void set_stack_reserved_zone_size(size_t s) { 1438 assert(is_aligned(s, os::vm_page_size()), 1439 "We can not protect if the reserved zone size is not page aligned."); 1440 assert(_stack_reserved_zone_size == 0, "This should be called only once."); 1441 _stack_reserved_zone_size = s; 1442 } 1443 address stack_reserved_zone_base() { 1444 return (address)(stack_end() + 1445 (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size())); 1446 } 1447 bool in_stack_reserved_zone(address a) { 1448 return (a <= stack_reserved_zone_base()) && 1449 (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size())); 1450 } 1451 1452 static size_t stack_yellow_reserved_zone_size() { 1453 return _stack_yellow_zone_size + _stack_reserved_zone_size; 1454 } 1455 bool in_stack_yellow_reserved_zone(address a) { 1456 return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base()); 1457 } 1458 1459 // Size of red + yellow + reserved zones. 1460 static size_t stack_guard_zone_size() { 1461 return stack_red_zone_size() + stack_yellow_reserved_zone_size(); 1462 } 1463 1464 static size_t stack_shadow_zone_size() { 1465 assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized."); 1466 return _stack_shadow_zone_size; 1467 } 1468 static void set_stack_shadow_zone_size(size_t s) { 1469 // The shadow area is not allocated or protected, so 1470 // it needs not be page aligned. 1471 // But the stack bang currently assumes that it is a 1472 // multiple of page size. This guarantees that the bang 1473 // loop touches all pages in the shadow zone. 1474 // This can be guaranteed differently, as well. E.g., if 1475 // the page size is a multiple of 4K, banging in 4K steps 1476 // suffices to touch all pages. (Some pages are banged 1477 // several times, though.) 1478 assert(is_aligned(s, os::vm_page_size()), 1479 "Stack bang assumes multiple of page size."); 1480 assert(_stack_shadow_zone_size == 0, "This should be called only once."); 1481 _stack_shadow_zone_size = s; 1482 } 1483 1484 void create_stack_guard_pages(); 1485 void remove_stack_guard_pages(); 1486 1487 void enable_stack_reserved_zone(); 1488 void disable_stack_reserved_zone(); 1489 void enable_stack_yellow_reserved_zone(); 1490 void disable_stack_yellow_reserved_zone(); 1491 void enable_stack_red_zone(); 1492 void disable_stack_red_zone(); 1493 1494 inline bool stack_guard_zone_unused(); 1495 inline bool stack_yellow_reserved_zone_disabled(); 1496 inline bool stack_reserved_zone_disabled(); 1497 inline bool stack_guards_enabled(); 1498 1499 address reserved_stack_activation() const { return _reserved_stack_activation; } 1500 void set_reserved_stack_activation(address addr) { 1501 assert(_reserved_stack_activation == stack_base() 1502 || _reserved_stack_activation == NULL 1503 || addr == stack_base(), "Must not be set twice"); 1504 _reserved_stack_activation = addr; 1505 } 1506 1507 // Attempt to reguard the stack after a stack overflow may have occurred. 1508 // Returns true if (a) guard pages are not needed on this thread, (b) the 1509 // pages are already guarded, or (c) the pages were successfully reguarded. 1510 // Returns false if there is not enough stack space to reguard the pages, in 1511 // which case the caller should unwind a frame and try again. The argument 1512 // should be the caller's (approximate) sp. 1513 bool reguard_stack(address cur_sp); 1514 // Similar to above but see if current stackpoint is out of the guard area 1515 // and reguard if possible. 1516 bool reguard_stack(void); 1517 1518 address stack_overflow_limit() { return _stack_overflow_limit; } 1519 void set_stack_overflow_limit() { 1520 _stack_overflow_limit = 1521 stack_end() + MAX2(JavaThread::stack_guard_zone_size(), JavaThread::stack_shadow_zone_size()); 1522 } 1523 1524 // Misc. accessors/mutators 1525 void set_do_not_unlock(void) { _do_not_unlock_if_synchronized = true; } 1526 void clr_do_not_unlock(void) { _do_not_unlock_if_synchronized = false; } 1527 bool do_not_unlock(void) { return _do_not_unlock_if_synchronized; } 1528 1529 #ifndef PRODUCT 1530 void record_jump(address target, address instr, const char* file, int line); 1531 #endif // PRODUCT 1532 1533 // For assembly stub generation 1534 static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj); } 1535 #ifndef PRODUCT 1536 static ByteSize jmp_ring_index_offset() { return byte_offset_of(JavaThread, _jmp_ring_index); } 1537 static ByteSize jmp_ring_offset() { return byte_offset_of(JavaThread, _jmp_ring); } 1538 #endif // PRODUCT 1539 static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment); } 1540 static ByteSize pending_jni_exception_check_fn_offset() { 1541 return byte_offset_of(JavaThread, _pending_jni_exception_check_fn); 1542 } 1543 static ByteSize last_Java_sp_offset() { 1544 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset(); 1545 } 1546 static ByteSize last_Java_pc_offset() { 1547 return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset(); 1548 } 1549 static ByteSize frame_anchor_offset() { 1550 return byte_offset_of(JavaThread, _anchor); 1551 } 1552 static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target); } 1553 static ByteSize vm_result_offset() { return byte_offset_of(JavaThread, _vm_result); } 1554 static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2); } 1555 static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); } 1556 static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); } 1557 static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); } 1558 #if INCLUDE_JVMCI 1559 static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); } 1560 static ByteSize pending_monitorenter_offset() { return byte_offset_of(JavaThread, _pending_monitorenter); } 1561 static ByteSize pending_failed_speculation_offset() { return byte_offset_of(JavaThread, _pending_failed_speculation); } 1562 static ByteSize jvmci_alternate_call_target_offset() { return byte_offset_of(JavaThread, _jvmci._alternate_call_target); } 1563 static ByteSize jvmci_implicit_exception_pc_offset() { return byte_offset_of(JavaThread, _jvmci._implicit_exception_pc); } 1564 static ByteSize jvmci_counters_offset() { return byte_offset_of(JavaThread, _jvmci_counters); } 1565 #endif // INCLUDE_JVMCI 1566 static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop); } 1567 static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc); } 1568 static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); } 1569 static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); } 1570 static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); } 1571 static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); } 1572 static ByteSize reserved_stack_activation_offset() { return byte_offset_of(JavaThread, _reserved_stack_activation); } 1573 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); } 1574 1575 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); } 1576 static ByteSize should_post_on_exceptions_flag_offset() { 1577 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag); 1578 } 1579 1580 #if INCLUDE_ALL_GCS 1581 static ByteSize satb_mark_queue_offset() { return byte_offset_of(JavaThread, _satb_mark_queue); } 1582 static ByteSize dirty_card_queue_offset() { return byte_offset_of(JavaThread, _dirty_card_queue); } 1583 #endif // INCLUDE_ALL_GCS 1584 1585 // Returns the jni environment for this thread 1586 JNIEnv* jni_environment() { return &_jni_environment; } 1587 1588 static JavaThread* thread_from_jni_environment(JNIEnv* env) { 1589 JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset())); 1590 // Only return NULL if thread is off the thread list; starting to 1591 // exit should not return NULL. 1592 if (thread_from_jni_env->is_terminated()) { 1593 thread_from_jni_env->block_if_vm_exited(); 1594 return NULL; 1595 } else { 1596 return thread_from_jni_env; 1597 } 1598 } 1599 1600 // JNI critical regions. These can nest. 1601 bool in_critical() { return _jni_active_critical > 0; } 1602 bool in_last_critical() { return _jni_active_critical == 1; } 1603 void enter_critical() { 1604 assert(Thread::current() == this || 1605 (Thread::current()->is_VM_thread() && 1606 SafepointSynchronize::is_synchronizing()), 1607 "this must be current thread or synchronizing"); 1608 _jni_active_critical++; 1609 } 1610 void exit_critical() { 1611 assert(Thread::current() == this, "this must be current thread"); 1612 _jni_active_critical--; 1613 assert(_jni_active_critical >= 0, "JNI critical nesting problem?"); 1614 } 1615 1616 // Checked JNI: is the programmer required to check for exceptions, if so specify 1617 // which function name. Returning to a Java frame should implicitly clear the 1618 // pending check, this is done for Native->Java transitions (i.e. user JNI code). 1619 // VM->Java transistions are not cleared, it is expected that JNI code enclosed 1620 // within ThreadToNativeFromVM makes proper exception checks (i.e. VM internal). 1621 bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; } 1622 void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; } 1623 const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; } 1624 void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; } 1625 1626 // For deadlock detection 1627 int depth_first_number() { return _depth_first_number; } 1628 void set_depth_first_number(int dfn) { _depth_first_number = dfn; } 1629 1630 private: 1631 void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; } 1632 1633 public: 1634 MonitorChunk* monitor_chunks() const { return _monitor_chunks; } 1635 void add_monitor_chunk(MonitorChunk* chunk); 1636 void remove_monitor_chunk(MonitorChunk* chunk); 1637 bool in_deopt_handler() const { return _in_deopt_handler > 0; } 1638 void inc_in_deopt_handler() { _in_deopt_handler++; } 1639 void dec_in_deopt_handler() { 1640 assert(_in_deopt_handler > 0, "mismatched deopt nesting"); 1641 if (_in_deopt_handler > 0) { // robustness 1642 _in_deopt_handler--; 1643 } 1644 } 1645 1646 private: 1647 void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; } 1648 1649 public: 1650 1651 // Frame iteration; calls the function f for all frames on the stack 1652 void frames_do(void f(frame*, const RegisterMap*)); 1653 1654 // Memory operations 1655 void oops_do(OopClosure* f, CodeBlobClosure* cf); 1656 1657 // Sweeper operations 1658 virtual void nmethods_do(CodeBlobClosure* cf); 1659 1660 // RedefineClasses Support 1661 void metadata_do(void f(Metadata*)); 1662 1663 // Misc. operations 1664 char* name() const { return (char*)get_thread_name(); } 1665 void print_on(outputStream* st) const; 1666 void print_value(); 1667 void print_thread_state_on(outputStream*) const PRODUCT_RETURN; 1668 void print_thread_state() const PRODUCT_RETURN; 1669 void print_on_error(outputStream* st, char* buf, int buflen) const; 1670 void print_name_on_error(outputStream* st, char* buf, int buflen) const; 1671 void verify(); 1672 const char* get_thread_name() const; 1673 private: 1674 // factor out low-level mechanics for use in both normal and error cases 1675 const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const; 1676 public: 1677 const char* get_threadgroup_name() const; 1678 const char* get_parent_name() const; 1679 1680 // Accessing frames 1681 frame last_frame() { 1682 _anchor.make_walkable(this); 1683 return pd_last_frame(); 1684 } 1685 javaVFrame* last_java_vframe(RegisterMap* reg_map); 1686 1687 // Returns method at 'depth' java or native frames down the stack 1688 // Used for security checks 1689 Klass* security_get_caller_class(int depth); 1690 1691 // Print stack trace in external format 1692 void print_stack_on(outputStream* st); 1693 void print_stack() { print_stack_on(tty); } 1694 1695 // Print stack traces in various internal formats 1696 void trace_stack() PRODUCT_RETURN; 1697 void trace_stack_from(vframe* start_vf) PRODUCT_RETURN; 1698 void trace_frames() PRODUCT_RETURN; 1699 void trace_oops() PRODUCT_RETURN; 1700 1701 // Print an annotated view of the stack frames 1702 void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN; 1703 void validate_frame_layout() { 1704 print_frame_layout(0, true); 1705 } 1706 1707 // Returns the number of stack frames on the stack 1708 int depth() const; 1709 1710 // Function for testing deoptimization 1711 void deoptimize(); 1712 void make_zombies(); 1713 1714 void deoptimized_wrt_marked_nmethods(); 1715 1716 // Profiling operation (see fprofile.cpp) 1717 public: 1718 bool profile_last_Java_frame(frame* fr); 1719 1720 private: 1721 ThreadProfiler* _thread_profiler; 1722 private: 1723 friend class FlatProfiler; // uses both [gs]et_thread_profiler. 1724 friend class FlatProfilerTask; // uses get_thread_profiler. 1725 friend class ThreadProfilerMark; // uses get_thread_profiler. 1726 ThreadProfiler* get_thread_profiler() { return _thread_profiler; } 1727 ThreadProfiler* set_thread_profiler(ThreadProfiler* tp) { 1728 ThreadProfiler* result = _thread_profiler; 1729 _thread_profiler = tp; 1730 return result; 1731 } 1732 1733 public: 1734 // Returns the running thread as a JavaThread 1735 static inline JavaThread* current(); 1736 1737 // Returns the active Java thread. Do not use this if you know you are calling 1738 // from a JavaThread, as it's slower than JavaThread::current. If called from 1739 // the VMThread, it also returns the JavaThread that instigated the VMThread's 1740 // operation. You may not want that either. 1741 static JavaThread* active(); 1742 1743 inline CompilerThread* as_CompilerThread(); 1744 1745 public: 1746 virtual void run(); 1747 void thread_main_inner(); 1748 1749 private: 1750 // PRIVILEGED STACK 1751 PrivilegedElement* _privileged_stack_top; 1752 GrowableArray<oop>* _array_for_gc; 1753 public: 1754 1755 // Returns the privileged_stack information. 1756 PrivilegedElement* privileged_stack_top() const { return _privileged_stack_top; } 1757 void set_privileged_stack_top(PrivilegedElement *e) { _privileged_stack_top = e; } 1758 void register_array_for_gc(GrowableArray<oop>* array) { _array_for_gc = array; } 1759 1760 public: 1761 // Thread local information maintained by JVMTI. 1762 void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; } 1763 // A JvmtiThreadState is lazily allocated. This jvmti_thread_state() 1764 // getter is used to get this JavaThread's JvmtiThreadState if it has 1765 // one which means NULL can be returned. JvmtiThreadState::state_for() 1766 // is used to get the specified JavaThread's JvmtiThreadState if it has 1767 // one or it allocates a new JvmtiThreadState for the JavaThread and 1768 // returns it. JvmtiThreadState::state_for() will return NULL only if 1769 // the specified JavaThread is exiting. 1770 JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; } 1771 static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); } 1772 void set_jvmti_get_loaded_classes_closure(JvmtiGetLoadedClassesClosure* value) { _jvmti_get_loaded_classes_closure = value; } 1773 JvmtiGetLoadedClassesClosure* get_jvmti_get_loaded_classes_closure() const { return _jvmti_get_loaded_classes_closure; } 1774 1775 // JVMTI PopFrame support 1776 // Setting and clearing popframe_condition 1777 // All of these enumerated values are bits. popframe_pending 1778 // indicates that a PopFrame() has been requested and not yet been 1779 // completed. popframe_processing indicates that that PopFrame() is in 1780 // the process of being completed. popframe_force_deopt_reexecution_bit 1781 // indicates that special handling is required when returning to a 1782 // deoptimized caller. 1783 enum PopCondition { 1784 popframe_inactive = 0x00, 1785 popframe_pending_bit = 0x01, 1786 popframe_processing_bit = 0x02, 1787 popframe_force_deopt_reexecution_bit = 0x04 1788 }; 1789 PopCondition popframe_condition() { return (PopCondition) _popframe_condition; } 1790 void set_popframe_condition(PopCondition c) { _popframe_condition = c; } 1791 void set_popframe_condition_bit(PopCondition c) { _popframe_condition |= c; } 1792 void clear_popframe_condition() { _popframe_condition = popframe_inactive; } 1793 static ByteSize popframe_condition_offset() { return byte_offset_of(JavaThread, _popframe_condition); } 1794 bool has_pending_popframe() { return (popframe_condition() & popframe_pending_bit) != 0; } 1795 bool popframe_forcing_deopt_reexecution() { return (popframe_condition() & popframe_force_deopt_reexecution_bit) != 0; } 1796 void clear_popframe_forcing_deopt_reexecution() { _popframe_condition &= ~popframe_force_deopt_reexecution_bit; } 1797 #ifdef CC_INTERP 1798 bool pop_frame_pending(void) { return ((_popframe_condition & popframe_pending_bit) != 0); } 1799 void clr_pop_frame_pending(void) { _popframe_condition = popframe_inactive; } 1800 bool pop_frame_in_process(void) { return ((_popframe_condition & popframe_processing_bit) != 0); } 1801 void set_pop_frame_in_process(void) { _popframe_condition |= popframe_processing_bit; } 1802 void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; } 1803 #endif 1804 1805 int frames_to_pop_failed_realloc() const { return _frames_to_pop_failed_realloc; } 1806 void set_frames_to_pop_failed_realloc(int nb) { _frames_to_pop_failed_realloc = nb; } 1807 void dec_frames_to_pop_failed_realloc() { _frames_to_pop_failed_realloc--; } 1808 1809 private: 1810 // Saved incoming arguments to popped frame. 1811 // Used only when popped interpreted frame returns to deoptimized frame. 1812 void* _popframe_preserved_args; 1813 int _popframe_preserved_args_size; 1814 1815 public: 1816 void popframe_preserve_args(ByteSize size_in_bytes, void* start); 1817 void* popframe_preserved_args(); 1818 ByteSize popframe_preserved_args_size(); 1819 WordSize popframe_preserved_args_size_in_words(); 1820 void popframe_free_preserved_args(); 1821 1822 1823 private: 1824 JvmtiThreadState *_jvmti_thread_state; 1825 JvmtiGetLoadedClassesClosure* _jvmti_get_loaded_classes_closure; 1826 1827 // Used by the interpreter in fullspeed mode for frame pop, method 1828 // entry, method exit and single stepping support. This field is 1829 // only set to non-zero by the VM_EnterInterpOnlyMode VM operation. 1830 // It can be set to zero asynchronously (i.e., without a VM operation 1831 // or a lock) so we have to be very careful. 1832 int _interp_only_mode; 1833 1834 public: 1835 // used by the interpreter for fullspeed debugging support (see above) 1836 static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); } 1837 bool is_interp_only_mode() { return (_interp_only_mode != 0); } 1838 int get_interp_only_mode() { return _interp_only_mode; } 1839 void increment_interp_only_mode() { ++_interp_only_mode; } 1840 void decrement_interp_only_mode() { --_interp_only_mode; } 1841 1842 // support for cached flag that indicates whether exceptions need to be posted for this thread 1843 // if this is false, we can avoid deoptimizing when events are thrown 1844 // this gets set to reflect whether jvmtiExport::post_exception_throw would actually do anything 1845 private: 1846 int _should_post_on_exceptions_flag; 1847 1848 public: 1849 int should_post_on_exceptions_flag() { return _should_post_on_exceptions_flag; } 1850 void set_should_post_on_exceptions_flag(int val) { _should_post_on_exceptions_flag = val; } 1851 1852 private: 1853 ThreadStatistics *_thread_stat; 1854 1855 public: 1856 ThreadStatistics* get_thread_stat() const { return _thread_stat; } 1857 1858 // Return a blocker object for which this thread is blocked parking. 1859 oop current_park_blocker(); 1860 1861 private: 1862 static size_t _stack_size_at_create; 1863 1864 public: 1865 static inline size_t stack_size_at_create(void) { 1866 return _stack_size_at_create; 1867 } 1868 static inline void set_stack_size_at_create(size_t value) { 1869 _stack_size_at_create = value; 1870 } 1871 1872 #if INCLUDE_ALL_GCS 1873 // SATB marking queue support 1874 SATBMarkQueue& satb_mark_queue() { return _satb_mark_queue; } 1875 static SATBMarkQueueSet& satb_mark_queue_set() { 1876 return _satb_mark_queue_set; 1877 } 1878 1879 // Dirty card queue support 1880 DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; } 1881 static DirtyCardQueueSet& dirty_card_queue_set() { 1882 return _dirty_card_queue_set; 1883 } 1884 #endif // INCLUDE_ALL_GCS 1885 1886 // This method initializes the SATB and dirty card queues before a 1887 // JavaThread is added to the Java thread list. Right now, we don't 1888 // have to do anything to the dirty card queue (it should have been 1889 // activated when the thread was created), but we have to activate 1890 // the SATB queue if the thread is created while a marking cycle is 1891 // in progress. The activation / de-activation of the SATB queues at 1892 // the beginning / end of a marking cycle is done during safepoints 1893 // so we have to make sure this method is called outside one to be 1894 // able to safely read the active field of the SATB queue set. Right 1895 // now, it is called just before the thread is added to the Java 1896 // thread list in the Threads::add() method. That method is holding 1897 // the Threads_lock which ensures we are outside a safepoint. We 1898 // cannot do the obvious and set the active field of the SATB queue 1899 // when the thread is created given that, in some cases, safepoints 1900 // might happen between the JavaThread constructor being called and the 1901 // thread being added to the Java thread list (an example of this is 1902 // when the structure for the DestroyJavaVM thread is created). 1903 #if INCLUDE_ALL_GCS 1904 void initialize_queues(); 1905 #else // INCLUDE_ALL_GCS 1906 void initialize_queues() { } 1907 #endif // INCLUDE_ALL_GCS 1908 1909 // Machine dependent stuff 1910 #include OS_CPU_HEADER(thread) 1911 1912 public: 1913 void set_blocked_on_compilation(bool value) { 1914 _blocked_on_compilation = value; 1915 } 1916 1917 bool blocked_on_compilation() { 1918 return _blocked_on_compilation; 1919 } 1920 protected: 1921 bool _blocked_on_compilation; 1922 1923 1924 // JSR166 per-thread parker 1925 private: 1926 Parker* _parker; 1927 public: 1928 Parker* parker() { return _parker; } 1929 1930 // Biased locking support 1931 private: 1932 GrowableArray<MonitorInfo*>* _cached_monitor_info; 1933 public: 1934 GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; } 1935 void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; } 1936 1937 // clearing/querying jni attach status 1938 bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; } 1939 bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; } 1940 inline void set_done_attaching_via_jni(); 1941 }; 1942 1943 // Inline implementation of JavaThread::current 1944 inline JavaThread* JavaThread::current() { 1945 Thread* thread = Thread::current(); 1946 assert(thread->is_Java_thread(), "just checking"); 1947 return (JavaThread*)thread; 1948 } 1949 1950 inline CompilerThread* JavaThread::as_CompilerThread() { 1951 assert(is_Compiler_thread(), "just checking"); 1952 return (CompilerThread*)this; 1953 } 1954 1955 // Dedicated thread to sweep the code cache 1956 class CodeCacheSweeperThread : public JavaThread { 1957 CompiledMethod* _scanned_compiled_method; // nmethod being scanned by the sweeper 1958 public: 1959 CodeCacheSweeperThread(); 1960 // Track the nmethod currently being scanned by the sweeper 1961 void set_scanned_compiled_method(CompiledMethod* cm) { 1962 assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value"); 1963 _scanned_compiled_method = cm; 1964 } 1965 1966 // Hide sweeper thread from external view. 1967 bool is_hidden_from_external_view() const { return true; } 1968 1969 bool is_Code_cache_sweeper_thread() const { return true; } 1970 1971 // Prevent GC from unloading _scanned_compiled_method 1972 void oops_do(OopClosure* f, CodeBlobClosure* cf); 1973 void nmethods_do(CodeBlobClosure* cf); 1974 }; 1975 1976 // A thread used for Compilation. 1977 class CompilerThread : public JavaThread { 1978 friend class VMStructs; 1979 private: 1980 CompilerCounters* _counters; 1981 1982 ciEnv* _env; 1983 CompileLog* _log; 1984 CompileTask* _task; 1985 CompileQueue* _queue; 1986 BufferBlob* _buffer_blob; 1987 1988 AbstractCompiler* _compiler; 1989 1990 public: 1991 1992 static CompilerThread* current(); 1993 1994 CompilerThread(CompileQueue* queue, CompilerCounters* counters); 1995 1996 bool is_Compiler_thread() const { return true; } 1997 1998 virtual bool can_call_java() const; 1999 2000 // Hide native compiler threads from external view. 2001 bool is_hidden_from_external_view() const { return !can_call_java(); } 2002 2003 void set_compiler(AbstractCompiler* c) { _compiler = c; } 2004 AbstractCompiler* compiler() const { return _compiler; } 2005 2006 CompileQueue* queue() const { return _queue; } 2007 CompilerCounters* counters() const { return _counters; } 2008 2009 // Get/set the thread's compilation environment. 2010 ciEnv* env() { return _env; } 2011 void set_env(ciEnv* env) { _env = env; } 2012 2013 BufferBlob* get_buffer_blob() const { return _buffer_blob; } 2014 void set_buffer_blob(BufferBlob* b) { _buffer_blob = b; } 2015 2016 // Get/set the thread's logging information 2017 CompileLog* log() { return _log; } 2018 void init_log(CompileLog* log) { 2019 // Set once, for good. 2020 assert(_log == NULL, "set only once"); 2021 _log = log; 2022 } 2023 2024 #ifndef PRODUCT 2025 private: 2026 IdealGraphPrinter *_ideal_graph_printer; 2027 public: 2028 IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; } 2029 void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; } 2030 #endif 2031 2032 // Get/set the thread's current task 2033 CompileTask* task() { return _task; } 2034 void set_task(CompileTask* task) { _task = task; } 2035 }; 2036 2037 inline CompilerThread* CompilerThread::current() { 2038 return JavaThread::current()->as_CompilerThread(); 2039 } 2040 2041 // The active thread queue. It also keeps track of the current used 2042 // thread priorities. 2043 class Threads: AllStatic { 2044 friend class VMStructs; 2045 private: 2046 static JavaThread* _thread_list; 2047 static int _number_of_threads; 2048 static int _number_of_non_daemon_threads; 2049 static int _return_code; 2050 static int _thread_claim_parity; 2051 #ifdef ASSERT 2052 static bool _vm_complete; 2053 #endif 2054 2055 static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS); 2056 static void initialize_jsr292_core_classes(TRAPS); 2057 public: 2058 // Thread management 2059 // force_daemon is a concession to JNI, where we may need to add a 2060 // thread to the thread list before allocating its thread object 2061 static void add(JavaThread* p, bool force_daemon = false); 2062 static void remove(JavaThread* p); 2063 static bool includes(JavaThread* p); 2064 static JavaThread* first() { return _thread_list; } 2065 static void threads_do(ThreadClosure* tc); 2066 static void parallel_java_threads_do(ThreadClosure* tc); 2067 2068 // Initializes the vm and creates the vm thread 2069 static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain); 2070 static void convert_vm_init_libraries_to_agents(); 2071 static void create_vm_init_libraries(); 2072 static void create_vm_init_agents(); 2073 static void shutdown_vm_agents(); 2074 static bool destroy_vm(); 2075 // Supported VM versions via JNI 2076 // Includes JNI_VERSION_1_1 2077 static jboolean is_supported_jni_version_including_1_1(jint version); 2078 // Does not include JNI_VERSION_1_1 2079 static jboolean is_supported_jni_version(jint version); 2080 2081 // The "thread claim parity" provides a way for threads to be claimed 2082 // by parallel worker tasks. 2083 // 2084 // Each thread contains a a "parity" field. A task will claim the 2085 // thread only if its parity field is the same as the global parity, 2086 // which is updated by calling change_thread_claim_parity(). 2087 // 2088 // For this to work change_thread_claim_parity() needs to be called 2089 // exactly once in sequential code before starting parallel tasks 2090 // that should claim threads. 2091 // 2092 // New threads get their parity set to 0 and change_thread_claim_parity() 2093 // never set the global parity to 0. 2094 static int thread_claim_parity() { return _thread_claim_parity; } 2095 static void change_thread_claim_parity(); 2096 static void assert_all_threads_claimed() NOT_DEBUG_RETURN; 2097 2098 // Apply "f->do_oop" to all root oops in all threads. 2099 // This version may only be called by sequential code. 2100 static void oops_do(OopClosure* f, CodeBlobClosure* cf); 2101 // This version may be called by sequential or parallel code. 2102 static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf); 2103 // This creates a list of GCTasks, one per thread. 2104 static void create_thread_roots_tasks(GCTaskQueue* q); 2105 // This creates a list of GCTasks, one per thread, for marking objects. 2106 static void create_thread_roots_marking_tasks(GCTaskQueue* q); 2107 2108 // Apply "f->do_oop" to roots in all threads that 2109 // are part of compiled frames 2110 static void compiled_frame_oops_do(OopClosure* f, CodeBlobClosure* cf); 2111 2112 static void convert_hcode_pointers(); 2113 static void restore_hcode_pointers(); 2114 2115 // Sweeper 2116 static void nmethods_do(CodeBlobClosure* cf); 2117 2118 // RedefineClasses support 2119 static void metadata_do(void f(Metadata*)); 2120 static void metadata_handles_do(void f(Metadata*)); 2121 2122 #ifdef ASSERT 2123 static bool is_vm_complete() { return _vm_complete; } 2124 #endif 2125 2126 // Verification 2127 static void verify(); 2128 static void print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks); 2129 static void print(bool print_stacks, bool internal_format) { 2130 // this function is only used by debug.cpp 2131 print_on(tty, print_stacks, internal_format, false /* no concurrent lock printed */); 2132 } 2133 static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen); 2134 static void print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf, 2135 int buflen, bool* found_current); 2136 static void print_threads_compiling(outputStream* st, char* buf, int buflen); 2137 2138 // Get Java threads that are waiting to enter a monitor. If doLock 2139 // is true, then Threads_lock is grabbed as needed. Otherwise, the 2140 // VM needs to be at a safepoint. 2141 static GrowableArray<JavaThread*>* get_pending_threads(int count, 2142 address monitor, bool doLock); 2143 2144 // Get owning Java thread from the monitor's owner field. If doLock 2145 // is true, then Threads_lock is grabbed as needed. Otherwise, the 2146 // VM needs to be at a safepoint. 2147 static JavaThread *owning_thread_from_monitor_owner(address owner, 2148 bool doLock); 2149 2150 // Number of threads on the active threads list 2151 static int number_of_threads() { return _number_of_threads; } 2152 // Number of non-daemon threads on the active threads list 2153 static int number_of_non_daemon_threads() { return _number_of_non_daemon_threads; } 2154 2155 // Deoptimizes all frames tied to marked nmethods 2156 static void deoptimized_wrt_marked_nmethods(); 2157 2158 static JavaThread* find_java_thread_from_java_tid(jlong java_tid); 2159 2160 }; 2161 2162 2163 // Thread iterator 2164 class ThreadClosure: public StackObj { 2165 public: 2166 virtual void do_thread(Thread* thread) = 0; 2167 }; 2168 2169 class SignalHandlerMark: public StackObj { 2170 private: 2171 Thread* _thread; 2172 public: 2173 SignalHandlerMark(Thread* t) { 2174 _thread = t; 2175 if (_thread) _thread->enter_signal_handler(); 2176 } 2177 ~SignalHandlerMark() { 2178 if (_thread) _thread->leave_signal_handler(); 2179 _thread = NULL; 2180 } 2181 }; 2182 2183 2184 #endif // SHARE_VM_RUNTIME_THREAD_HPP