1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_THREAD_HPP
  26 #define SHARE_VM_RUNTIME_THREAD_HPP
  27 
  28 #include "jni.h"
  29 #include "gc/shared/gcThreadLocalData.hpp"
  30 #include "gc/shared/threadLocalAllocBuffer.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "oops/oop.hpp"
  33 #include "prims/jvmtiExport.hpp"
  34 #include "runtime/frame.hpp"
  35 #include "runtime/handshake.hpp"
  36 #include "runtime/javaFrameAnchor.hpp"
  37 #include "runtime/jniHandles.hpp"
  38 #include "runtime/mutexLocker.hpp"
  39 #include "runtime/os.hpp"
  40 #include "runtime/osThread.hpp"
  41 #include "runtime/park.hpp"
  42 #include "runtime/safepoint.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/threadLocalStorage.hpp"
  45 #include "runtime/unhandledOops.hpp"
  46 #include "trace/traceBackend.hpp"
  47 #include "trace/traceMacros.hpp"
  48 #include "utilities/align.hpp"
  49 #include "utilities/exceptions.hpp"
  50 #include "utilities/macros.hpp"
  51 #ifdef ZERO
  52 # include "stack_zero.hpp"
  53 #endif
  54 #include "runtime/threadStatisticInfo.hpp"
  55 
  56 class ThreadSafepointState;
  57 class ThreadsList;
  58 class ThreadsSMRSupport;
  59 class NestedThreadsList;
  60 
  61 class JvmtiThreadState;
  62 class JvmtiGetLoadedClassesClosure;
  63 class ThreadStatistics;
  64 class ConcurrentLocksDump;
  65 class ParkEvent;
  66 class Parker;
  67 
  68 class ciEnv;
  69 class CompileThread;
  70 class CompileLog;
  71 class CompileTask;
  72 class CompileQueue;
  73 class CompilerCounters;
  74 class vframeArray;
  75 
  76 class DeoptResourceMark;
  77 class jvmtiDeferredLocalVariableSet;
  78 
  79 class GCTaskQueue;
  80 class ThreadClosure;
  81 class IdealGraphPrinter;
  82 
  83 class Metadata;
  84 template <class T, MEMFLAGS F> class ChunkedList;
  85 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
  86 
  87 DEBUG_ONLY(class ResourceMark;)
  88 
  89 class WorkerThread;
  90 
  91 // Class hierarchy
  92 // - Thread
  93 //   - NamedThread
  94 //     - VMThread
  95 //     - ConcurrentGCThread
  96 //     - WorkerThread
  97 //       - GangWorker
  98 //       - GCTaskThread
  99 //   - JavaThread
 100 //     - various subclasses eg CompilerThread, ServiceThread
 101 //   - WatcherThread
 102 
 103 class Thread: public ThreadShadow {
 104   friend class VMStructs;
 105   friend class JVMCIVMStructs;
 106  private:
 107 
 108 #ifndef USE_LIBRARY_BASED_TLS_ONLY
 109   // Current thread is maintained as a thread-local variable
 110   static THREAD_LOCAL_DECL Thread* _thr_current;
 111 #endif
 112 
 113  private:
 114   // Thread local data area available to the GC. The internal
 115   // structure and contents of this data area is GC-specific.
 116   // Only GC and GC barrier code should access this data area.
 117   GCThreadLocalData _gc_data;
 118 
 119  public:
 120   static ByteSize gc_data_offset() {
 121     return byte_offset_of(Thread, _gc_data);
 122   }
 123 
 124   template <typename T> T* gc_data() {
 125     STATIC_ASSERT(sizeof(T) <= sizeof(_gc_data));
 126     return reinterpret_cast<T*>(&_gc_data);
 127   }
 128 
 129   // Exception handling
 130   // (Note: _pending_exception and friends are in ThreadShadow)
 131   //oop       _pending_exception;                // pending exception for current thread
 132   // const char* _exception_file;                   // file information for exception (debugging only)
 133   // int         _exception_line;                   // line information for exception (debugging only)
 134  protected:
 135   // Support for forcing alignment of thread objects for biased locking
 136   void*       _real_malloc_address;
 137 
 138   // JavaThread lifecycle support:
 139   friend class ScanHazardPtrGatherProtectedThreadsClosure;  // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
 140   friend class ScanHazardPtrGatherThreadsListClosure;  // for get_nested_threads_hazard_ptr(), get_threads_hazard_ptr(), untag_hazard_ptr() access
 141   friend class ScanHazardPtrPrintMatchingThreadsClosure;  // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
 142   friend class ThreadsListSetter;  // for get_threads_hazard_ptr() access
 143   friend class ThreadsSMRSupport;  // for get_threads_hazard_ptr() access
 144 
 145   ThreadsList* volatile _threads_hazard_ptr;
 146   ThreadsList*          cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value);
 147   ThreadsList*          get_threads_hazard_ptr();
 148   void                  set_threads_hazard_ptr(ThreadsList* new_list);
 149   static bool           is_hazard_ptr_tagged(ThreadsList* list) {
 150     return (intptr_t(list) & intptr_t(1)) == intptr_t(1);
 151   }
 152   static ThreadsList*   tag_hazard_ptr(ThreadsList* list) {
 153     return (ThreadsList*)(intptr_t(list) | intptr_t(1));
 154   }
 155   static ThreadsList*   untag_hazard_ptr(ThreadsList* list) {
 156     return (ThreadsList*)(intptr_t(list) & ~intptr_t(1));
 157   }
 158   NestedThreadsList* _nested_threads_hazard_ptr;
 159   NestedThreadsList* get_nested_threads_hazard_ptr() {
 160     return _nested_threads_hazard_ptr;
 161   }
 162   void set_nested_threads_hazard_ptr(NestedThreadsList* value) {
 163     assert(Threads_lock->owned_by_self(),
 164            "must own Threads_lock for _nested_threads_hazard_ptr to be valid.");
 165     _nested_threads_hazard_ptr = value;
 166   }
 167   // This field is enabled via -XX:+EnableThreadSMRStatistics:
 168   uint _nested_threads_hazard_ptr_cnt;
 169   void dec_nested_threads_hazard_ptr_cnt() {
 170     assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()");
 171     _nested_threads_hazard_ptr_cnt--;
 172   }
 173   void inc_nested_threads_hazard_ptr_cnt() {
 174     _nested_threads_hazard_ptr_cnt++;
 175   }
 176   uint nested_threads_hazard_ptr_cnt() {
 177     return _nested_threads_hazard_ptr_cnt;
 178   }
 179 
 180  public:
 181   void* operator new(size_t size) throw() { return allocate(size, true); }
 182   void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
 183     return allocate(size, false); }
 184   void  operator delete(void* p);
 185 
 186  protected:
 187   static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
 188  private:
 189 
 190   // ***************************************************************
 191   // Suspend and resume support
 192   // ***************************************************************
 193   //
 194   // VM suspend/resume no longer exists - it was once used for various
 195   // things including safepoints but was deprecated and finally removed
 196   // in Java 7. Because VM suspension was considered "internal" Java-level
 197   // suspension was considered "external", and this legacy naming scheme
 198   // remains.
 199   //
 200   // External suspend/resume requests come from JVM_SuspendThread,
 201   // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI
 202   // ResumeThread. External
 203   // suspend requests cause _external_suspend to be set and external
 204   // resume requests cause _external_suspend to be cleared.
 205   // External suspend requests do not nest on top of other external
 206   // suspend requests. The higher level APIs reject suspend requests
 207   // for already suspended threads.
 208   //
 209   // The external_suspend
 210   // flag is checked by has_special_runtime_exit_condition() and java thread
 211   // will self-suspend when handle_special_runtime_exit_condition() is
 212   // called. Most uses of the _thread_blocked state in JavaThreads are
 213   // considered the same as being externally suspended; if the blocking
 214   // condition lifts, the JavaThread will self-suspend. Other places
 215   // where VM checks for external_suspend include:
 216   //   + mutex granting (do not enter monitors when thread is suspended)
 217   //   + state transitions from _thread_in_native
 218   //
 219   // In general, java_suspend() does not wait for an external suspend
 220   // request to complete. When it returns, the only guarantee is that
 221   // the _external_suspend field is true.
 222   //
 223   // wait_for_ext_suspend_completion() is used to wait for an external
 224   // suspend request to complete. External suspend requests are usually
 225   // followed by some other interface call that requires the thread to
 226   // be quiescent, e.g., GetCallTrace(). By moving the "wait time" into
 227   // the interface that requires quiescence, we give the JavaThread a
 228   // chance to self-suspend before we need it to be quiescent. This
 229   // improves overall suspend/query performance.
 230   //
 231   // _suspend_flags controls the behavior of java_ suspend/resume.
 232   // It must be set under the protection of SR_lock. Read from the flag is
 233   // OK without SR_lock as long as the value is only used as a hint.
 234   // (e.g., check _external_suspend first without lock and then recheck
 235   // inside SR_lock and finish the suspension)
 236   //
 237   // _suspend_flags is also overloaded for other "special conditions" so
 238   // that a single check indicates whether any special action is needed
 239   // eg. for async exceptions.
 240   // -------------------------------------------------------------------
 241   // Notes:
 242   // 1. The suspend/resume logic no longer uses ThreadState in OSThread
 243   // but we still update its value to keep other part of the system (mainly
 244   // JVMTI) happy. ThreadState is legacy code (see notes in
 245   // osThread.hpp).
 246   //
 247   // 2. It would be more natural if set_external_suspend() is private and
 248   // part of java_suspend(), but that probably would affect the suspend/query
 249   // performance. Need more investigation on this.
 250 
 251   // suspend/resume lock: used for self-suspend
 252   Monitor* _SR_lock;
 253 
 254  protected:
 255   enum SuspendFlags {
 256     // NOTE: avoid using the sign-bit as cc generates different test code
 257     //       when the sign-bit is used, and sometimes incorrectly - see CR 6398077
 258 
 259     _external_suspend       = 0x20000000U, // thread is asked to self suspend
 260     _ext_suspended          = 0x40000000U, // thread has self-suspended
 261     _deopt_suspend          = 0x10000000U, // thread needs to self suspend for deopt
 262 
 263     _has_async_exception    = 0x00000001U, // there is a pending async exception
 264     _critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock
 265 
 266     _trace_flag             = 0x00000004U  // call tracing backend
 267   };
 268 
 269   // various suspension related flags - atomically updated
 270   // overloaded for async exception checking in check_special_condition_for_native_trans.
 271   volatile uint32_t _suspend_flags;
 272 
 273  private:
 274   int _num_nested_signal;
 275 
 276   DEBUG_ONLY(bool _suspendible_thread;)
 277 
 278  public:
 279   void enter_signal_handler() { _num_nested_signal++; }
 280   void leave_signal_handler() { _num_nested_signal--; }
 281   bool is_inside_signal_handler() const { return _num_nested_signal > 0; }
 282 
 283 #ifdef ASSERT
 284   void set_suspendible_thread() {
 285     _suspendible_thread = true;
 286   }
 287 
 288   void clear_suspendible_thread() {
 289     _suspendible_thread = false;
 290   }
 291 
 292   bool is_suspendible_thread() { return _suspendible_thread; }
 293 #endif
 294 
 295  private:
 296   // Active_handles points to a block of handles
 297   JNIHandleBlock* _active_handles;
 298 
 299   // One-element thread local free list
 300   JNIHandleBlock* _free_handle_block;
 301 
 302   // Point to the last handle mark
 303   HandleMark* _last_handle_mark;
 304 
 305   // The parity of the last strong_roots iteration in which this thread was
 306   // claimed as a task.
 307   int _oops_do_parity;
 308 
 309   // Support for GlobalCounter
 310  private:
 311   volatile uintx _rcu_counter;
 312  public:
 313   volatile uintx* get_rcu_counter() {
 314     return &_rcu_counter;
 315   }
 316 
 317  public:
 318   void set_last_handle_mark(HandleMark* mark)   { _last_handle_mark = mark; }
 319   HandleMark* last_handle_mark() const          { return _last_handle_mark; }
 320  private:
 321 
 322   // debug support for checking if code does allow safepoints or not
 323   // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on
 324   // mutex, or blocking on an object synchronizer (Java locking).
 325   // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
 326   // If !allow_allocation(), then an assertion failure will happen during allocation
 327   // (Hence, !allow_safepoint() => !allow_allocation()).
 328   //
 329   // The two classes NoSafepointVerifier and No_Allocation_Verifier are used to set these counters.
 330   //
 331   NOT_PRODUCT(int _allow_safepoint_count;)      // If 0, thread allow a safepoint to happen
 332   debug_only(int _allow_allocation_count;)     // If 0, the thread is allowed to allocate oops.
 333 
 334   // Used by SkipGCALot class.
 335   NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
 336 
 337   friend class NoAllocVerifier;
 338   friend class NoSafepointVerifier;
 339   friend class PauseNoSafepointVerifier;
 340   friend class GCLocker;
 341 
 342   volatile void* _polling_page;                 // Thread local polling page
 343 
 344   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
 345   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
 346                                                 // the Java heap
 347 
 348   ThreadStatisticInfo _statistic_info;          // Statistic info about the thread
 349 
 350   mutable TRACE_DATA _trace_data;               // Thread-local data for tracing
 351 
 352   int   _vm_operation_started_count;            // VM_Operation support
 353   int   _vm_operation_completed_count;          // VM_Operation support
 354 
 355   ObjectMonitor* _current_pending_monitor;      // ObjectMonitor this thread
 356                                                 // is waiting to lock
 357   bool _current_pending_monitor_is_from_java;   // locking is from Java code
 358 
 359   // ObjectMonitor on which this thread called Object.wait()
 360   ObjectMonitor* _current_waiting_monitor;
 361 
 362   // Private thread-local objectmonitor list - a simple cache organized as a SLL.
 363  public:
 364   ObjectMonitor* omFreeList;
 365   int omFreeCount;                              // length of omFreeList
 366   int omFreeProvision;                          // reload chunk size
 367   ObjectMonitor* omInUseList;                   // SLL to track monitors in circulation
 368   int omInUseCount;                             // length of omInUseList
 369 
 370 #ifdef ASSERT
 371  private:
 372   bool _visited_for_critical_count;
 373 
 374  public:
 375   void set_visited_for_critical_count(bool z) { _visited_for_critical_count = z; }
 376   bool was_visited_for_critical_count() const   { return _visited_for_critical_count; }
 377 #endif
 378 
 379  public:
 380   enum {
 381     is_definitely_current_thread = true
 382   };
 383 
 384   // Constructor
 385   Thread();
 386   virtual ~Thread();
 387 
 388   // Manage Thread::current()
 389   void initialize_thread_current();
 390   void clear_thread_current(); // TLS cleanup needed before threads terminate
 391 
 392  public:
 393   // thread entry point
 394   virtual void run();
 395 
 396   // Testers
 397   virtual bool is_VM_thread()       const            { return false; }
 398   virtual bool is_Java_thread()     const            { return false; }
 399   virtual bool is_Compiler_thread() const            { return false; }
 400   virtual bool is_Code_cache_sweeper_thread() const  { return false; }
 401   virtual bool is_hidden_from_external_view() const  { return false; }
 402   virtual bool is_jvmti_agent_thread() const         { return false; }
 403   // True iff the thread can perform GC operations at a safepoint.
 404   // Generally will be true only of VM thread and parallel GC WorkGang
 405   // threads.
 406   virtual bool is_GC_task_thread() const             { return false; }
 407   virtual bool is_Watcher_thread() const             { return false; }
 408   virtual bool is_ConcurrentGC_thread() const        { return false; }
 409   virtual bool is_Named_thread() const               { return false; }
 410   virtual bool is_Worker_thread() const              { return false; }
 411 
 412   // Can this thread make Java upcalls
 413   virtual bool can_call_java() const                 { return false; }
 414 
 415   // Casts
 416   virtual WorkerThread* as_Worker_thread() const     { return NULL; }
 417 
 418   virtual char* name() const { return (char*)"Unknown thread"; }
 419 
 420   // Returns the current thread (ASSERTS if NULL)
 421   static inline Thread* current();
 422   // Returns the current thread, or NULL if not attached
 423   static inline Thread* current_or_null();
 424   // Returns the current thread, or NULL if not attached, and is
 425   // safe for use from signal-handlers
 426   static inline Thread* current_or_null_safe();
 427 
 428   // Common thread operations
 429 #ifdef ASSERT
 430   static void check_for_dangling_thread_pointer(Thread *thread);
 431 #endif
 432   static void set_priority(Thread* thread, ThreadPriority priority);
 433   static ThreadPriority get_priority(const Thread* const thread);
 434   static void start(Thread* thread);
 435   static void interrupt(Thread* thr);
 436   static bool is_interrupted(Thread* thr, bool clear_interrupted);
 437 
 438   void set_native_thread_name(const char *name) {
 439     assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
 440     os::set_native_thread_name(name);
 441   }
 442 
 443   ObjectMonitor** omInUseList_addr()             { return (ObjectMonitor **)&omInUseList; }
 444   Monitor* SR_lock() const                       { return _SR_lock; }
 445 
 446   bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
 447 
 448   inline void set_suspend_flag(SuspendFlags f);
 449   inline void clear_suspend_flag(SuspendFlags f);
 450 
 451   inline void set_has_async_exception();
 452   inline void clear_has_async_exception();
 453 
 454   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 455 
 456   inline void set_critical_native_unlock();
 457   inline void clear_critical_native_unlock();
 458 
 459   inline void set_trace_flag();
 460   inline void clear_trace_flag();
 461 
 462   // Support for Unhandled Oop detection
 463   // Add the field for both, fastdebug and debug, builds to keep
 464   // Thread's fields layout the same.
 465   // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build.
 466 #ifdef CHECK_UNHANDLED_OOPS
 467  private:
 468   UnhandledOops* _unhandled_oops;
 469 #elif defined(ASSERT)
 470  private:
 471   void* _unhandled_oops;
 472 #endif
 473 #ifdef CHECK_UNHANDLED_OOPS
 474  public:
 475   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
 476   // Mark oop safe for gc.  It may be stack allocated but won't move.
 477   void allow_unhandled_oop(oop *op) {
 478     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
 479   }
 480   // Clear oops at safepoint so crashes point to unhandled oop violator
 481   void clear_unhandled_oops() {
 482     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
 483   }
 484 #endif // CHECK_UNHANDLED_OOPS
 485 
 486  public:
 487 #ifndef PRODUCT
 488   bool skip_gcalot()           { return _skip_gcalot; }
 489   void set_skip_gcalot(bool v) { _skip_gcalot = v;    }
 490 #endif
 491 
 492   // Installs a pending exception to be inserted later
 493   static void send_async_exception(oop thread_oop, oop java_throwable);
 494 
 495   // Resource area
 496   ResourceArea* resource_area() const            { return _resource_area; }
 497   void set_resource_area(ResourceArea* area)     { _resource_area = area; }
 498 
 499   OSThread* osthread() const                     { return _osthread;   }
 500   void set_osthread(OSThread* thread)            { _osthread = thread; }
 501 
 502   // JNI handle support
 503   JNIHandleBlock* active_handles() const         { return _active_handles; }
 504   void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
 505   JNIHandleBlock* free_handle_block() const      { return _free_handle_block; }
 506   void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
 507 
 508   // Internal handle support
 509   HandleArea* handle_area() const                { return _handle_area; }
 510   void set_handle_area(HandleArea* area)         { _handle_area = area; }
 511 
 512   GrowableArray<Metadata*>* metadata_handles() const          { return _metadata_handles; }
 513   void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
 514 
 515   // Thread-Local Allocation Buffer (TLAB) support
 516   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
 517   void initialize_tlab() {
 518     if (UseTLAB) {
 519       tlab().initialize();
 520     }
 521   }
 522 
 523   jlong allocated_bytes()               { return _allocated_bytes; }
 524   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
 525   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
 526   inline jlong cooked_allocated_bytes();
 527 
 528   ThreadStatisticInfo& statistic_info() { return _statistic_info; }
 529 
 530   TRACE_DEFINE_THREAD_TRACE_DATA_OFFSET;
 531   TRACE_DATA* trace_data() const        { return &_trace_data; }
 532   bool is_trace_suspend()               { return (_suspend_flags & _trace_flag) != 0; }
 533 
 534   // VM operation support
 535   int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
 536   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
 537   void increment_vm_operation_completed_count()  { _vm_operation_completed_count++; }
 538 
 539   // For tracking the heavyweight monitor the thread is pending on.
 540   ObjectMonitor* current_pending_monitor() {
 541     return _current_pending_monitor;
 542   }
 543   void set_current_pending_monitor(ObjectMonitor* monitor) {
 544     _current_pending_monitor = monitor;
 545   }
 546   void set_current_pending_monitor_is_from_java(bool from_java) {
 547     _current_pending_monitor_is_from_java = from_java;
 548   }
 549   bool current_pending_monitor_is_from_java() {
 550     return _current_pending_monitor_is_from_java;
 551   }
 552 
 553   // For tracking the ObjectMonitor on which this thread called Object.wait()
 554   ObjectMonitor* current_waiting_monitor() {
 555     return _current_waiting_monitor;
 556   }
 557   void set_current_waiting_monitor(ObjectMonitor* monitor) {
 558     _current_waiting_monitor = monitor;
 559   }
 560 
 561   // GC support
 562   // Apply "f->do_oop" to all root oops in "this".
 563   //   Used by JavaThread::oops_do.
 564   // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
 565   virtual void oops_do(OopClosure* f, CodeBlobClosure* cf);
 566 
 567   // Handles the parallel case for the method below.
 568  private:
 569   bool claim_oops_do_par_case(int collection_parity);
 570  public:
 571   // Requires that "collection_parity" is that of the current roots
 572   // iteration.  If "is_par" is false, sets the parity of "this" to
 573   // "collection_parity", and returns "true".  If "is_par" is true,
 574   // uses an atomic instruction to set the current threads parity to
 575   // "collection_parity", if it is not already.  Returns "true" iff the
 576   // calling thread does the update, this indicates that the calling thread
 577   // has claimed the thread's stack as a root groop in the current
 578   // collection.
 579   bool claim_oops_do(bool is_par, int collection_parity) {
 580     if (!is_par) {
 581       _oops_do_parity = collection_parity;
 582       return true;
 583     } else {
 584       return claim_oops_do_par_case(collection_parity);
 585     }
 586   }
 587 
 588   // jvmtiRedefineClasses support
 589   void metadata_handles_do(void f(Metadata*));
 590 
 591   // Used by fast lock support
 592   virtual bool is_lock_owned(address adr) const;
 593 
 594   // Check if address is in the stack of the thread (not just for locks).
 595   // Warning: the method can only be used on the running thread
 596   bool is_in_stack(address adr) const;
 597   // Check if address is in the usable part of the stack (excludes protected
 598   // guard pages)
 599   bool is_in_usable_stack(address adr) const;
 600 
 601   // Sets this thread as starting thread. Returns failure if thread
 602   // creation fails due to lack of memory, too many threads etc.
 603   bool set_as_starting_thread();
 604 
 605 protected:
 606   // OS data associated with the thread
 607   OSThread* _osthread;  // Platform-specific thread information
 608 
 609   // Thread local resource area for temporary allocation within the VM
 610   ResourceArea* _resource_area;
 611 
 612   DEBUG_ONLY(ResourceMark* _current_resource_mark;)
 613 
 614   // Thread local handle area for allocation of handles within the VM
 615   HandleArea* _handle_area;
 616   GrowableArray<Metadata*>* _metadata_handles;
 617 
 618   // Support for stack overflow handling, get_thread, etc.
 619   address          _stack_base;
 620   size_t           _stack_size;
 621   uintptr_t        _self_raw_id;      // used by get_thread (mutable)
 622   int              _lgrp_id;
 623 
 624   volatile void** polling_page_addr() { return &_polling_page; }
 625 
 626  public:
 627   // Stack overflow support
 628   address stack_base() const           { assert(_stack_base != NULL,"Sanity check"); return _stack_base; }
 629   void    set_stack_base(address base) { _stack_base = base; }
 630   size_t  stack_size() const           { return _stack_size; }
 631   void    set_stack_size(size_t size)  { _stack_size = size; }
 632   address stack_end()  const           { return stack_base() - stack_size(); }
 633   void    record_stack_base_and_size();
 634 
 635   bool    on_local_stack(address adr) const {
 636     // QQQ this has knowledge of direction, ought to be a stack method
 637     return (_stack_base >= adr && adr >= stack_end());
 638   }
 639 
 640   uintptr_t self_raw_id()                    { return _self_raw_id; }
 641   void      set_self_raw_id(uintptr_t value) { _self_raw_id = value; }
 642 
 643   int     lgrp_id() const        { return _lgrp_id; }
 644   void    set_lgrp_id(int value) { _lgrp_id = value; }
 645 
 646   // Printing
 647   virtual void print_on(outputStream* st) const;
 648   virtual void print_nested_threads_hazard_ptrs_on(outputStream* st) const;
 649   void print() const { print_on(tty); }
 650   virtual void print_on_error(outputStream* st, char* buf, int buflen) const;
 651   void print_value_on(outputStream* st) const;
 652 
 653   // Debug-only code
 654 #ifdef ASSERT
 655  private:
 656   // Deadlock detection support for Mutex locks. List of locks own by thread.
 657   Monitor* _owned_locks;
 658   // Mutex::set_owner_implementation is the only place where _owned_locks is modified,
 659   // thus the friendship
 660   friend class Mutex;
 661   friend class Monitor;
 662 
 663  public:
 664   void print_owned_locks_on(outputStream* st) const;
 665   void print_owned_locks() const                 { print_owned_locks_on(tty);    }
 666   Monitor* owned_locks() const                   { return _owned_locks;          }
 667   bool owns_locks() const                        { return owned_locks() != NULL; }
 668   bool owns_locks_but_compiled_lock() const;
 669   int oops_do_parity() const                     { return _oops_do_parity; }
 670 
 671   // Deadlock detection
 672   bool allow_allocation()                        { return _allow_allocation_count == 0; }
 673   ResourceMark* current_resource_mark()          { return _current_resource_mark; }
 674   void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; }
 675 #endif
 676 
 677   void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN;
 678 
 679  private:
 680   volatile int _jvmti_env_iteration_count;
 681 
 682  public:
 683   void entering_jvmti_env_iteration()            { ++_jvmti_env_iteration_count; }
 684   void leaving_jvmti_env_iteration()             { --_jvmti_env_iteration_count; }
 685   bool is_inside_jvmti_env_iteration()           { return _jvmti_env_iteration_count > 0; }
 686 
 687   // Code generation
 688   static ByteSize exception_file_offset()        { return byte_offset_of(Thread, _exception_file); }
 689   static ByteSize exception_line_offset()        { return byte_offset_of(Thread, _exception_line); }
 690   static ByteSize active_handles_offset()        { return byte_offset_of(Thread, _active_handles); }
 691 
 692   static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base); }
 693   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size); }
 694 
 695   static ByteSize polling_page_offset()          { return byte_offset_of(Thread, _polling_page); }
 696 
 697 #define TLAB_FIELD_OFFSET(name) \
 698   static ByteSize tlab_##name##_offset()         { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
 699 
 700   TLAB_FIELD_OFFSET(start)
 701   TLAB_FIELD_OFFSET(end)
 702   TLAB_FIELD_OFFSET(top)
 703   TLAB_FIELD_OFFSET(pf_top)
 704   TLAB_FIELD_OFFSET(size)                   // desired_size
 705   TLAB_FIELD_OFFSET(refill_waste_limit)
 706   TLAB_FIELD_OFFSET(number_of_refills)
 707   TLAB_FIELD_OFFSET(fast_refill_waste)
 708   TLAB_FIELD_OFFSET(slow_allocations)
 709 
 710 #undef TLAB_FIELD_OFFSET
 711 
 712   static ByteSize allocated_bytes_offset()       { return byte_offset_of(Thread, _allocated_bytes); }
 713 
 714  public:
 715   volatile intptr_t _Stalled;
 716   volatile int _TypeTag;
 717   ParkEvent * _ParkEvent;                     // for synchronized()
 718   ParkEvent * _SleepEvent;                    // for Thread.sleep
 719   ParkEvent * _MutexEvent;                    // for native internal Mutex/Monitor
 720   ParkEvent * _MuxEvent;                      // for low-level muxAcquire-muxRelease
 721   int NativeSyncRecursion;                    // diagnostic
 722 
 723   volatile int _OnTrap;                       // Resume-at IP delta
 724   jint _hashStateW;                           // Marsaglia Shift-XOR thread-local RNG
 725   jint _hashStateX;                           // thread-specific hashCode generator state
 726   jint _hashStateY;
 727   jint _hashStateZ;
 728   void * _schedctl;
 729 
 730 
 731   volatile jint rng[4];                      // RNG for spin loop
 732 
 733   // Low-level leaf-lock primitives used to implement synchronization
 734   // and native monitor-mutex infrastructure.
 735   // Not for general synchronization use.
 736   static void SpinAcquire(volatile int * Lock, const char * Name);
 737   static void SpinRelease(volatile int * Lock);
 738   static void muxAcquire(volatile intptr_t * Lock, const char * Name);
 739   static void muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev);
 740   static void muxRelease(volatile intptr_t * Lock);
 741 };
 742 
 743 // Inline implementation of Thread::current()
 744 inline Thread* Thread::current() {
 745   Thread* current = current_or_null();
 746   assert(current != NULL, "Thread::current() called on detached thread");
 747   return current;
 748 }
 749 
 750 inline Thread* Thread::current_or_null() {
 751 #ifndef USE_LIBRARY_BASED_TLS_ONLY
 752   return _thr_current;
 753 #else
 754   if (ThreadLocalStorage::is_initialized()) {
 755     return ThreadLocalStorage::thread();
 756   }
 757   return NULL;
 758 #endif
 759 }
 760 
 761 inline Thread* Thread::current_or_null_safe() {
 762   if (ThreadLocalStorage::is_initialized()) {
 763     return ThreadLocalStorage::thread();
 764   }
 765   return NULL;
 766 }
 767 
 768 // Name support for threads.  non-JavaThread subclasses with multiple
 769 // uniquely named instances should derive from this.
 770 class NamedThread: public Thread {
 771   friend class VMStructs;
 772   enum {
 773     max_name_len = 64
 774   };
 775  private:
 776   char* _name;
 777   // log JavaThread being processed by oops_do
 778   JavaThread* _processed_thread;
 779   uint _gc_id; // The current GC id when a thread takes part in GC
 780 
 781  public:
 782   NamedThread();
 783   ~NamedThread();
 784   // May only be called once per thread.
 785   void set_name(const char* format, ...)  ATTRIBUTE_PRINTF(2, 3);
 786   void initialize_named_thread();
 787   virtual bool is_Named_thread() const { return true; }
 788   virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; }
 789   JavaThread *processed_thread() { return _processed_thread; }
 790   void set_processed_thread(JavaThread *thread) { _processed_thread = thread; }
 791   virtual void print_on(outputStream* st) const;
 792 
 793   void set_gc_id(uint gc_id) { _gc_id = gc_id; }
 794   uint gc_id() { return _gc_id; }
 795 };
 796 
 797 // Worker threads are named and have an id of an assigned work.
 798 class WorkerThread: public NamedThread {
 799  private:
 800   uint _id;
 801  public:
 802   WorkerThread() : _id(0)               { }
 803   virtual bool is_Worker_thread() const { return true; }
 804 
 805   virtual WorkerThread* as_Worker_thread() const {
 806     assert(is_Worker_thread(), "Dubious cast to WorkerThread*?");
 807     return (WorkerThread*) this;
 808   }
 809 
 810   void set_id(uint work_id)             { _id = work_id; }
 811   uint id() const                       { return _id; }
 812 };
 813 
 814 // A single WatcherThread is used for simulating timer interrupts.
 815 class WatcherThread: public Thread {
 816   friend class VMStructs;
 817  public:
 818   virtual void run();
 819 
 820  private:
 821   static WatcherThread* _watcher_thread;
 822 
 823   static bool _startable;
 824   // volatile due to at least one lock-free read
 825   volatile static bool _should_terminate;
 826  public:
 827   enum SomeConstants {
 828     delay_interval = 10                          // interrupt delay in milliseconds
 829   };
 830 
 831   // Constructor
 832   WatcherThread();
 833 
 834   // No destruction allowed
 835   ~WatcherThread() {
 836     guarantee(false, "WatcherThread deletion must fix the race with VM termination");
 837   }
 838 
 839   // Tester
 840   bool is_Watcher_thread() const                 { return true; }
 841 
 842   // Printing
 843   char* name() const { return (char*)"VM Periodic Task Thread"; }
 844   void print_on(outputStream* st) const;
 845   void unpark();
 846 
 847   // Returns the single instance of WatcherThread
 848   static WatcherThread* watcher_thread()         { return _watcher_thread; }
 849 
 850   // Create and start the single instance of WatcherThread, or stop it on shutdown
 851   static void start();
 852   static void stop();
 853   // Only allow start once the VM is sufficiently initialized
 854   // Otherwise the first task to enroll will trigger the start
 855   static void make_startable();
 856  private:
 857   int sleep() const;
 858 };
 859 
 860 
 861 class CompilerThread;
 862 
 863 typedef void (*ThreadFunction)(JavaThread*, TRAPS);
 864 
 865 class JavaThread: public Thread {
 866   friend class VMStructs;
 867   friend class JVMCIVMStructs;
 868   friend class WhiteBox;
 869  private:
 870   JavaThread*    _next;                          // The next thread in the Threads list
 871   bool           _on_thread_list;                // Is set when this JavaThread is added to the Threads list
 872   oop            _threadObj;                     // The Java level thread object
 873 
 874 #ifdef ASSERT
 875  private:
 876   int _java_call_counter;
 877 
 878  public:
 879   int  java_call_counter()                       { return _java_call_counter; }
 880   void inc_java_call_counter()                   { _java_call_counter++; }
 881   void dec_java_call_counter() {
 882     assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper");
 883     _java_call_counter--;
 884   }
 885  private:  // restore original namespace restriction
 886 #endif  // ifdef ASSERT
 887 
 888 #ifndef PRODUCT
 889  public:
 890   enum {
 891     jump_ring_buffer_size = 16
 892   };
 893  private:  // restore original namespace restriction
 894 #endif
 895 
 896   JavaFrameAnchor _anchor;                       // Encapsulation of current java frame and it state
 897 
 898   ThreadFunction _entry_point;
 899 
 900   JNIEnv        _jni_environment;
 901 
 902   // Deopt support
 903   DeoptResourceMark*  _deopt_mark;               // Holds special ResourceMark for deoptimization
 904 
 905   intptr_t*      _must_deopt_id;                 // id of frame that needs to be deopted once we
 906                                                  // transition out of native
 907   CompiledMethod*       _deopt_nmethod;         // CompiledMethod that is currently being deoptimized
 908   vframeArray*  _vframe_array_head;              // Holds the heap of the active vframeArrays
 909   vframeArray*  _vframe_array_last;              // Holds last vFrameArray we popped
 910   // Because deoptimization is lazy we must save jvmti requests to set locals
 911   // in compiled frames until we deoptimize and we have an interpreter frame.
 912   // This holds the pointer to array (yeah like there might be more than one) of
 913   // description of compiled vframes that have locals that need to be updated.
 914   GrowableArray<jvmtiDeferredLocalVariableSet*>* _deferred_locals_updates;
 915 
 916   // Handshake value for fixing 6243940. We need a place for the i2c
 917   // adapter to store the callee Method*. This value is NEVER live
 918   // across a gc point so it does NOT have to be gc'd
 919   // The handshake is open ended since we can't be certain that it will
 920   // be NULLed. This is because we rarely ever see the race and end up
 921   // in handle_wrong_method which is the backend of the handshake. See
 922   // code in i2c adapters and handle_wrong_method.
 923 
 924   Method*       _callee_target;
 925 
 926   // Used to pass back results to the interpreter or generated code running Java code.
 927   oop           _vm_result;    // oop result is GC-preserved
 928   Metadata*     _vm_result_2;  // non-oop result
 929 
 930   // See ReduceInitialCardMarks: this holds the precise space interval of
 931   // the most recent slow path allocation for which compiled code has
 932   // elided card-marks for performance along the fast-path.
 933   MemRegion     _deferred_card_mark;
 934 
 935   MonitorChunk* _monitor_chunks;                 // Contains the off stack monitors
 936                                                  // allocated during deoptimization
 937                                                  // and by JNI_MonitorEnter/Exit
 938 
 939   // Async. requests support
 940   enum AsyncRequests {
 941     _no_async_condition = 0,
 942     _async_exception,
 943     _async_unsafe_access_error
 944   };
 945   AsyncRequests _special_runtime_exit_condition; // Enum indicating pending async. request
 946   oop           _pending_async_exception;
 947 
 948   // Safepoint support
 949  public:                                         // Expose _thread_state for SafeFetchInt()
 950   volatile JavaThreadState _thread_state;
 951  private:
 952   ThreadSafepointState *_safepoint_state;        // Holds information about a thread during a safepoint
 953   address               _saved_exception_pc;     // Saved pc of instruction where last implicit exception happened
 954 
 955   // JavaThread termination support
 956   enum TerminatedTypes {
 957     _not_terminated = 0xDEAD - 2,
 958     _thread_exiting,                             // JavaThread::exit() has been called for this thread
 959     _thread_terminated,                          // JavaThread is removed from thread list
 960     _vm_exited                                   // JavaThread is still executing native code, but VM is terminated
 961                                                  // only VM_Exit can set _vm_exited
 962   };
 963 
 964   // In general a JavaThread's _terminated field transitions as follows:
 965   //
 966   //   _not_terminated => _thread_exiting => _thread_terminated
 967   //
 968   // _vm_exited is a special value to cover the case of a JavaThread
 969   // executing native code after the VM itself is terminated.
 970   volatile TerminatedTypes _terminated;
 971   // suspend/resume support
 972   volatile bool         _suspend_equivalent;     // Suspend equivalent condition
 973   jint                  _in_deopt_handler;       // count of deoptimization
 974                                                  // handlers thread is in
 975   volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
 976   bool                  _do_not_unlock_if_synchronized;  // Do not unlock the receiver of a synchronized method (since it was
 977                                                          // never locked) when throwing an exception. Used by interpreter only.
 978 
 979   // JNI attach states:
 980   enum JNIAttachStates {
 981     _not_attaching_via_jni = 1,  // thread is not attaching via JNI
 982     _attaching_via_jni,          // thread is attaching via JNI
 983     _attached_via_jni            // thread has attached via JNI
 984   };
 985 
 986   // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni.
 987   // A native thread that is attaching via JNI starts with a value
 988   // of _attaching_via_jni and transitions to _attached_via_jni.
 989   volatile JNIAttachStates _jni_attach_state;
 990 
 991  public:
 992   // State of the stack guard pages for this thread.
 993   enum StackGuardState {
 994     stack_guard_unused,         // not needed
 995     stack_guard_reserved_disabled,
 996     stack_guard_yellow_reserved_disabled,// disabled (temporarily) after stack overflow
 997     stack_guard_enabled         // enabled
 998   };
 999 
1000  private:
1001 
1002 #if INCLUDE_JVMCI
1003   // The _pending_* fields below are used to communicate extra information
1004   // from an uncommon trap in JVMCI compiled code to the uncommon trap handler.
1005 
1006   // Communicates the DeoptReason and DeoptAction of the uncommon trap
1007   int       _pending_deoptimization;
1008 
1009   // Specifies whether the uncommon trap is to bci 0 of a synchronized method
1010   // before the monitor has been acquired.
1011   bool      _pending_monitorenter;
1012 
1013   // Specifies if the DeoptReason for the last uncommon trap was Reason_transfer_to_interpreter
1014   bool      _pending_transfer_to_interpreter;
1015 
1016   // Guard for re-entrant call to JVMCIRuntime::adjust_comp_level
1017   bool      _adjusting_comp_level;
1018 
1019   // An object that JVMCI compiled code can use to further describe and
1020   // uniquely identify the  speculative optimization guarded by the uncommon trap
1021   oop       _pending_failed_speculation;
1022 
1023   // These fields are mutually exclusive in terms of live ranges.
1024   union {
1025     // Communicates the pc at which the most recent implicit exception occurred
1026     // from the signal handler to a deoptimization stub.
1027     address   _implicit_exception_pc;
1028 
1029     // Communicates an alternative call target to an i2c stub from a JavaCall .
1030     address   _alternate_call_target;
1031   } _jvmci;
1032 
1033   // Support for high precision, thread sensitive counters in JVMCI compiled code.
1034   jlong*    _jvmci_counters;
1035 
1036  public:
1037   static jlong* _jvmci_old_thread_counters;
1038   static void collect_counters(typeArrayOop array);
1039  private:
1040 #endif // INCLUDE_JVMCI
1041 
1042   StackGuardState  _stack_guard_state;
1043 
1044   // Precompute the limit of the stack as used in stack overflow checks.
1045   // We load it from here to simplify the stack overflow check in assembly.
1046   address          _stack_overflow_limit;
1047   address          _reserved_stack_activation;
1048 
1049   // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
1050   // used to temp. parsing values into and out of the runtime system during exception handling for compiled
1051   // code)
1052   volatile oop     _exception_oop;               // Exception thrown in compiled code
1053   volatile address _exception_pc;                // PC where exception happened
1054   volatile address _exception_handler_pc;        // PC for handler of exception
1055   volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
1056 
1057  private:
1058   // support for JNI critical regions
1059   jint    _jni_active_critical;                  // count of entries into JNI critical region
1060 
1061   // Checked JNI: function name requires exception check
1062   char* _pending_jni_exception_check_fn;
1063 
1064   // For deadlock detection.
1065   int _depth_first_number;
1066 
1067   // JVMTI PopFrame support
1068   // This is set to popframe_pending to signal that top Java frame should be popped immediately
1069   int _popframe_condition;
1070 
1071   // If reallocation of scalar replaced objects fails, we throw OOM
1072   // and during exception propagation, pop the top
1073   // _frames_to_pop_failed_realloc frames, the ones that reference
1074   // failed reallocations.
1075   int _frames_to_pop_failed_realloc;
1076 
1077 #ifndef PRODUCT
1078   int _jmp_ring_index;
1079   struct {
1080     // We use intptr_t instead of address so debugger doesn't try and display strings
1081     intptr_t _target;
1082     intptr_t _instruction;
1083     const char*  _file;
1084     int _line;
1085   }   _jmp_ring[jump_ring_buffer_size];
1086 #endif // PRODUCT
1087 
1088   friend class VMThread;
1089   friend class ThreadWaitTransition;
1090   friend class VM_Exit;
1091 
1092   void initialize();                             // Initialized the instance variables
1093 
1094  public:
1095   // Constructor
1096   JavaThread(bool is_attaching_via_jni = false); // for main thread and JNI attached threads
1097   JavaThread(ThreadFunction entry_point, size_t stack_size = 0);
1098   ~JavaThread();
1099 
1100 #ifdef ASSERT
1101   // verify this JavaThread hasn't be published in the Threads::list yet
1102   void verify_not_published();
1103 #endif
1104 
1105   //JNI functiontable getter/setter for JVMTI jni function table interception API.
1106   void set_jni_functions(struct JNINativeInterface_* functionTable) {
1107     _jni_environment.functions = functionTable;
1108   }
1109   struct JNINativeInterface_* get_jni_functions() {
1110     return (struct JNINativeInterface_ *)_jni_environment.functions;
1111   }
1112 
1113   // This function is called at thread creation to allow
1114   // platform specific thread variables to be initialized.
1115   void cache_global_variables();
1116 
1117   // Executes Shutdown.shutdown()
1118   void invoke_shutdown_hooks();
1119 
1120   // Cleanup on thread exit
1121   enum ExitType {
1122     normal_exit,
1123     jni_detach
1124   };
1125   void exit(bool destroy_vm, ExitType exit_type = normal_exit);
1126 
1127   void cleanup_failed_attach_current_thread();
1128 
1129   // Testers
1130   virtual bool is_Java_thread() const            { return true;  }
1131   virtual bool can_call_java() const             { return true; }
1132 
1133   // Thread chain operations
1134   JavaThread* next() const                       { return _next; }
1135   void set_next(JavaThread* p)                   { _next = p; }
1136 
1137   // Thread oop. threadObj() can be NULL for initial JavaThread
1138   // (or for threads attached via JNI)
1139   oop threadObj() const                          { return _threadObj; }
1140   void set_threadObj(oop p)                      { _threadObj = p; }
1141 
1142   ThreadPriority java_priority() const;          // Read from threadObj()
1143 
1144   // Prepare thread and add to priority queue.  If a priority is
1145   // not specified, use the priority of the thread object. Threads_lock
1146   // must be held while this function is called.
1147   void prepare(jobject jni_thread, ThreadPriority prio=NoPriority);
1148 
1149   void set_saved_exception_pc(address pc)        { _saved_exception_pc = pc; }
1150   address saved_exception_pc()                   { return _saved_exception_pc; }
1151 
1152 
1153   ThreadFunction entry_point() const             { return _entry_point; }
1154 
1155   // Allocates a new Java level thread object for this thread. thread_name may be NULL.
1156   void allocate_threadObj(Handle thread_group, const char* thread_name, bool daemon, TRAPS);
1157 
1158   // Last frame anchor routines
1159 
1160   JavaFrameAnchor* frame_anchor(void)            { return &_anchor; }
1161 
1162   // last_Java_sp
1163   bool has_last_Java_frame() const               { return _anchor.has_last_Java_frame(); }
1164   intptr_t* last_Java_sp() const                 { return _anchor.last_Java_sp(); }
1165 
1166   // last_Java_pc
1167 
1168   address last_Java_pc(void)                     { return _anchor.last_Java_pc(); }
1169 
1170   // Safepoint support
1171 #if !(defined(PPC64) || defined(AARCH64))
1172   JavaThreadState thread_state() const           { return _thread_state; }
1173   void set_thread_state(JavaThreadState s)       { _thread_state = s;    }
1174 #else
1175   // Use membars when accessing volatile _thread_state. See
1176   // Threads::create_vm() for size checks.
1177   inline JavaThreadState thread_state() const;
1178   inline void set_thread_state(JavaThreadState s);
1179 #endif
1180   ThreadSafepointState *safepoint_state() const  { return _safepoint_state; }
1181   void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; }
1182   bool is_at_poll_safepoint()                    { return _safepoint_state->is_at_poll_safepoint(); }
1183 
1184   // JavaThread termination and lifecycle support:
1185   void smr_delete();
1186   bool on_thread_list() const { return _on_thread_list; }
1187   void set_on_thread_list() { _on_thread_list = true; }
1188 
1189   // thread has called JavaThread::exit() or is terminated
1190   bool is_exiting() const;
1191   // thread is terminated (no longer on the threads list); we compare
1192   // against the two non-terminated values so that a freed JavaThread
1193   // will also be considered terminated.
1194   bool check_is_terminated(TerminatedTypes l_terminated) const {
1195     return l_terminated != _not_terminated && l_terminated != _thread_exiting;
1196   }
1197   bool is_terminated() const;
1198   void set_terminated(TerminatedTypes t);
1199   // special for Threads::remove() which is static:
1200   void set_terminated_value();
1201   void block_if_vm_exited();
1202 
1203   bool doing_unsafe_access()                     { return _doing_unsafe_access; }
1204   void set_doing_unsafe_access(bool val)         { _doing_unsafe_access = val; }
1205 
1206   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
1207   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
1208 
1209   inline void set_polling_page(void* poll_value);
1210   inline volatile void* get_polling_page();
1211 
1212  private:
1213   // Support for thread handshake operations
1214   HandshakeState _handshake;
1215  public:
1216   void set_handshake_operation(HandshakeOperation* op) {
1217     _handshake.set_operation(this, op);
1218   }
1219 
1220   bool has_handshake() const {
1221     return _handshake.has_operation();
1222   }
1223 
1224   void cancel_handshake() {
1225     _handshake.cancel(this);
1226   }
1227 
1228   void handshake_process_by_self() {
1229     _handshake.process_by_self(this);
1230   }
1231 
1232   void handshake_process_by_vmthread() {
1233     _handshake.process_by_vmthread(this);
1234   }
1235 
1236   // Suspend/resume support for JavaThread
1237  private:
1238   inline void set_ext_suspended();
1239   inline void clear_ext_suspended();
1240 
1241  public:
1242   void java_suspend();
1243   void java_resume();
1244   int  java_suspend_self();
1245 
1246   void check_and_wait_while_suspended() {
1247     assert(JavaThread::current() == this, "sanity check");
1248 
1249     bool do_self_suspend;
1250     do {
1251       // were we externally suspended while we were waiting?
1252       do_self_suspend = handle_special_suspend_equivalent_condition();
1253       if (do_self_suspend) {
1254         // don't surprise the thread that suspended us by returning
1255         java_suspend_self();
1256         set_suspend_equivalent();
1257       }
1258     } while (do_self_suspend);
1259   }
1260   static void check_safepoint_and_suspend_for_native_trans(JavaThread *thread);
1261   // Check for async exception in addition to safepoint and suspend request.
1262   static void check_special_condition_for_native_trans(JavaThread *thread);
1263 
1264   // Same as check_special_condition_for_native_trans but finishes the
1265   // transition into thread_in_Java mode so that it can potentially
1266   // block.
1267   static void check_special_condition_for_native_trans_and_transition(JavaThread *thread);
1268 
1269   bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits);
1270   bool is_ext_suspend_completed_with_lock(uint32_t *bits) {
1271     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1272     // Warning: is_ext_suspend_completed() may temporarily drop the
1273     // SR_lock to allow the thread to reach a stable thread state if
1274     // it is currently in a transient thread state.
1275     return is_ext_suspend_completed(false /* !called_by_wait */,
1276                                     SuspendRetryDelay, bits);
1277   }
1278 
1279   // We cannot allow wait_for_ext_suspend_completion() to run forever or
1280   // we could hang. SuspendRetryCount and SuspendRetryDelay are normally
1281   // passed as the count and delay parameters. Experiments with specific
1282   // calls to wait_for_ext_suspend_completion() can be done by passing
1283   // other values in the code. Experiments with all calls can be done
1284   // via the appropriate -XX options.
1285   bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits);
1286 
1287   // test for suspend - most (all?) of these should go away
1288   bool is_thread_fully_suspended(bool wait_for_suspend, uint32_t *bits);
1289 
1290   inline void set_external_suspend();
1291   inline void clear_external_suspend();
1292 
1293   inline void set_deopt_suspend();
1294   inline void clear_deopt_suspend();
1295   bool is_deopt_suspend()         { return (_suspend_flags & _deopt_suspend) != 0; }
1296 
1297   bool is_external_suspend() const {
1298     return (_suspend_flags & _external_suspend) != 0;
1299   }
1300   // Whenever a thread transitions from native to vm/java it must suspend
1301   // if external|deopt suspend is present.
1302   bool is_suspend_after_native() const {
1303     return (_suspend_flags & (_external_suspend | _deopt_suspend)) != 0;
1304   }
1305 
1306   // external suspend request is completed
1307   bool is_ext_suspended() const {
1308     return (_suspend_flags & _ext_suspended) != 0;
1309   }
1310 
1311   bool is_external_suspend_with_lock() const {
1312     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1313     return is_external_suspend();
1314   }
1315 
1316   // Special method to handle a pending external suspend request
1317   // when a suspend equivalent condition lifts.
1318   bool handle_special_suspend_equivalent_condition() {
1319     assert(is_suspend_equivalent(),
1320            "should only be called in a suspend equivalence condition");
1321     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1322     bool ret = is_external_suspend();
1323     if (!ret) {
1324       // not about to self-suspend so clear suspend equivalence
1325       clear_suspend_equivalent();
1326     }
1327     // implied else:
1328     // We have a pending external suspend request so we leave the
1329     // suspend_equivalent flag set until java_suspend_self() sets
1330     // the ext_suspended flag and clears the suspend_equivalent
1331     // flag. This insures that wait_for_ext_suspend_completion()
1332     // will return consistent values.
1333     return ret;
1334   }
1335 
1336   // utility methods to see if we are doing some kind of suspension
1337   bool is_being_ext_suspended() const            {
1338     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1339     return is_ext_suspended() || is_external_suspend();
1340   }
1341 
1342   bool is_suspend_equivalent() const             { return _suspend_equivalent; }
1343 
1344   void set_suspend_equivalent()                  { _suspend_equivalent = true; }
1345   void clear_suspend_equivalent()                { _suspend_equivalent = false; }
1346 
1347   // Thread.stop support
1348   void send_thread_stop(oop throwable);
1349   AsyncRequests clear_special_runtime_exit_condition() {
1350     AsyncRequests x = _special_runtime_exit_condition;
1351     _special_runtime_exit_condition = _no_async_condition;
1352     return x;
1353   }
1354 
1355   // Are any async conditions present?
1356   bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); }
1357 
1358   void check_and_handle_async_exceptions(bool check_unsafe_error = true);
1359 
1360   // these next two are also used for self-suspension and async exception support
1361   void handle_special_runtime_exit_condition(bool check_asyncs = true);
1362 
1363   // Return true if JavaThread has an asynchronous condition or
1364   // if external suspension is requested.
1365   bool has_special_runtime_exit_condition() {
1366     // Because we don't use is_external_suspend_with_lock
1367     // it is possible that we won't see an asynchronous external suspend
1368     // request that has just gotten started, i.e., SR_lock grabbed but
1369     // _external_suspend field change either not made yet or not visible
1370     // yet. However, this is okay because the request is asynchronous and
1371     // we will see the new flag value the next time through. It's also
1372     // possible that the external suspend request is dropped after
1373     // we have checked is_external_suspend(), we will recheck its value
1374     // under SR_lock in java_suspend_self().
1375     return (_special_runtime_exit_condition != _no_async_condition) ||
1376             is_external_suspend() || is_trace_suspend();
1377   }
1378 
1379   void set_pending_unsafe_access_error()          { _special_runtime_exit_condition = _async_unsafe_access_error; }
1380 
1381   inline void set_pending_async_exception(oop e);
1382 
1383   // Fast-locking support
1384   bool is_lock_owned(address adr) const;
1385 
1386   // Accessors for vframe array top
1387   // The linked list of vframe arrays are sorted on sp. This means when we
1388   // unpack the head must contain the vframe array to unpack.
1389   void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
1390   vframeArray* vframe_array_head() const         { return _vframe_array_head;  }
1391 
1392   // Side structure for deferring update of java frame locals until deopt occurs
1393   GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; }
1394   void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; }
1395 
1396   // These only really exist to make debugging deopt problems simpler
1397 
1398   void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; }
1399   vframeArray* vframe_array_last() const         { return _vframe_array_last;  }
1400 
1401   // The special resourceMark used during deoptimization
1402 
1403   void set_deopt_mark(DeoptResourceMark* value)  { _deopt_mark = value; }
1404   DeoptResourceMark* deopt_mark(void)            { return _deopt_mark; }
1405 
1406   intptr_t* must_deopt_id()                      { return _must_deopt_id; }
1407   void     set_must_deopt_id(intptr_t* id)       { _must_deopt_id = id; }
1408   void     clear_must_deopt_id()                 { _must_deopt_id = NULL; }
1409 
1410   void set_deopt_compiled_method(CompiledMethod* nm)  { _deopt_nmethod = nm; }
1411   CompiledMethod* deopt_compiled_method()        { return _deopt_nmethod; }
1412 
1413   Method*    callee_target() const               { return _callee_target; }
1414   void set_callee_target  (Method* x)          { _callee_target   = x; }
1415 
1416   // Oop results of vm runtime calls
1417   oop  vm_result() const                         { return _vm_result; }
1418   void set_vm_result  (oop x)                    { _vm_result   = x; }
1419 
1420   Metadata*    vm_result_2() const               { return _vm_result_2; }
1421   void set_vm_result_2  (Metadata* x)          { _vm_result_2   = x; }
1422 
1423   MemRegion deferred_card_mark() const           { return _deferred_card_mark; }
1424   void set_deferred_card_mark(MemRegion mr)      { _deferred_card_mark = mr;   }
1425 
1426 #if INCLUDE_JVMCI
1427   int  pending_deoptimization() const             { return _pending_deoptimization; }
1428   oop  pending_failed_speculation() const         { return _pending_failed_speculation; }
1429   bool adjusting_comp_level() const               { return _adjusting_comp_level; }
1430   void set_adjusting_comp_level(bool b)           { _adjusting_comp_level = b; }
1431   bool has_pending_monitorenter() const           { return _pending_monitorenter; }
1432   void set_pending_monitorenter(bool b)           { _pending_monitorenter = b; }
1433   void set_pending_deoptimization(int reason)     { _pending_deoptimization = reason; }
1434   void set_pending_failed_speculation(oop failed_speculation) { _pending_failed_speculation = failed_speculation; }
1435   void set_pending_transfer_to_interpreter(bool b) { _pending_transfer_to_interpreter = b; }
1436   void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == NULL, "must be"); _jvmci._alternate_call_target = a; }
1437   void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == NULL, "must be"); _jvmci._implicit_exception_pc = a; }
1438 #endif // INCLUDE_JVMCI
1439 
1440   // Exception handling for compiled methods
1441   oop      exception_oop() const                 { return _exception_oop; }
1442   address  exception_pc() const                  { return _exception_pc; }
1443   address  exception_handler_pc() const          { return _exception_handler_pc; }
1444   bool     is_method_handle_return() const       { return _is_method_handle_return == 1; }
1445 
1446   void set_exception_oop(oop o)                  { (void)const_cast<oop&>(_exception_oop = o); }
1447   void set_exception_pc(address a)               { _exception_pc = a; }
1448   void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
1449   void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
1450 
1451   void clear_exception_oop_and_pc() {
1452     set_exception_oop(NULL);
1453     set_exception_pc(NULL);
1454   }
1455 
1456   // Stack overflow support
1457   //
1458   //  (small addresses)
1459   //
1460   //  --  <-- stack_end()                   ---
1461   //  |                                      |
1462   //  |  red pages                           |
1463   //  |                                      |
1464   //  --  <-- stack_red_zone_base()          |
1465   //  |                                      |
1466   //  |                                     guard
1467   //  |  yellow pages                       zone
1468   //  |                                      |
1469   //  |                                      |
1470   //  --  <-- stack_yellow_zone_base()       |
1471   //  |                                      |
1472   //  |                                      |
1473   //  |  reserved pages                      |
1474   //  |                                      |
1475   //  --  <-- stack_reserved_zone_base()    ---      ---
1476   //                                                 /|\  shadow     <--  stack_overflow_limit() (somewhere in here)
1477   //                                                  |   zone
1478   //                                                 \|/  size
1479   //  some untouched memory                          ---
1480   //
1481   //
1482   //  --
1483   //  |
1484   //  |  shadow zone
1485   //  |
1486   //  --
1487   //  x    frame n
1488   //  --
1489   //  x    frame n-1
1490   //  x
1491   //  --
1492   //  ...
1493   //
1494   //  --
1495   //  x    frame 0
1496   //  --  <-- stack_base()
1497   //
1498   //  (large addresses)
1499   //
1500 
1501  private:
1502   // These values are derived from flags StackRedPages, StackYellowPages,
1503   // StackReservedPages and StackShadowPages. The zone size is determined
1504   // ergonomically if page_size > 4K.
1505   static size_t _stack_red_zone_size;
1506   static size_t _stack_yellow_zone_size;
1507   static size_t _stack_reserved_zone_size;
1508   static size_t _stack_shadow_zone_size;
1509  public:
1510   inline size_t stack_available(address cur_sp);
1511 
1512   static size_t stack_red_zone_size() {
1513     assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized.");
1514     return _stack_red_zone_size;
1515   }
1516   static void set_stack_red_zone_size(size_t s) {
1517     assert(is_aligned(s, os::vm_page_size()),
1518            "We can not protect if the red zone size is not page aligned.");
1519     assert(_stack_red_zone_size == 0, "This should be called only once.");
1520     _stack_red_zone_size = s;
1521   }
1522   address stack_red_zone_base() {
1523     return (address)(stack_end() + stack_red_zone_size());
1524   }
1525   bool in_stack_red_zone(address a) {
1526     return a <= stack_red_zone_base() && a >= stack_end();
1527   }
1528 
1529   static size_t stack_yellow_zone_size() {
1530     assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized.");
1531     return _stack_yellow_zone_size;
1532   }
1533   static void set_stack_yellow_zone_size(size_t s) {
1534     assert(is_aligned(s, os::vm_page_size()),
1535            "We can not protect if the yellow zone size is not page aligned.");
1536     assert(_stack_yellow_zone_size == 0, "This should be called only once.");
1537     _stack_yellow_zone_size = s;
1538   }
1539 
1540   static size_t stack_reserved_zone_size() {
1541     // _stack_reserved_zone_size may be 0. This indicates the feature is off.
1542     return _stack_reserved_zone_size;
1543   }
1544   static void set_stack_reserved_zone_size(size_t s) {
1545     assert(is_aligned(s, os::vm_page_size()),
1546            "We can not protect if the reserved zone size is not page aligned.");
1547     assert(_stack_reserved_zone_size == 0, "This should be called only once.");
1548     _stack_reserved_zone_size = s;
1549   }
1550   address stack_reserved_zone_base() {
1551     return (address)(stack_end() +
1552                      (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size()));
1553   }
1554   bool in_stack_reserved_zone(address a) {
1555     return (a <= stack_reserved_zone_base()) &&
1556            (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
1557   }
1558 
1559   static size_t stack_yellow_reserved_zone_size() {
1560     return _stack_yellow_zone_size + _stack_reserved_zone_size;
1561   }
1562   bool in_stack_yellow_reserved_zone(address a) {
1563     return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base());
1564   }
1565 
1566   // Size of red + yellow + reserved zones.
1567   static size_t stack_guard_zone_size() {
1568     return stack_red_zone_size() + stack_yellow_reserved_zone_size();
1569   }
1570 
1571   static size_t stack_shadow_zone_size() {
1572     assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized.");
1573     return _stack_shadow_zone_size;
1574   }
1575   static void set_stack_shadow_zone_size(size_t s) {
1576     // The shadow area is not allocated or protected, so
1577     // it needs not be page aligned.
1578     // But the stack bang currently assumes that it is a
1579     // multiple of page size. This guarantees that the bang
1580     // loop touches all pages in the shadow zone.
1581     // This can be guaranteed differently, as well.  E.g., if
1582     // the page size is a multiple of 4K, banging in 4K steps
1583     // suffices to touch all pages. (Some pages are banged
1584     // several times, though.)
1585     assert(is_aligned(s, os::vm_page_size()),
1586            "Stack bang assumes multiple of page size.");
1587     assert(_stack_shadow_zone_size == 0, "This should be called only once.");
1588     _stack_shadow_zone_size = s;
1589   }
1590 
1591   void create_stack_guard_pages();
1592   void remove_stack_guard_pages();
1593 
1594   void enable_stack_reserved_zone();
1595   void disable_stack_reserved_zone();
1596   void enable_stack_yellow_reserved_zone();
1597   void disable_stack_yellow_reserved_zone();
1598   void enable_stack_red_zone();
1599   void disable_stack_red_zone();
1600 
1601   inline bool stack_guard_zone_unused();
1602   inline bool stack_yellow_reserved_zone_disabled();
1603   inline bool stack_reserved_zone_disabled();
1604   inline bool stack_guards_enabled();
1605 
1606   address reserved_stack_activation() const { return _reserved_stack_activation; }
1607   void set_reserved_stack_activation(address addr) {
1608     assert(_reserved_stack_activation == stack_base()
1609             || _reserved_stack_activation == NULL
1610             || addr == stack_base(), "Must not be set twice");
1611     _reserved_stack_activation = addr;
1612   }
1613 
1614   // Attempt to reguard the stack after a stack overflow may have occurred.
1615   // Returns true if (a) guard pages are not needed on this thread, (b) the
1616   // pages are already guarded, or (c) the pages were successfully reguarded.
1617   // Returns false if there is not enough stack space to reguard the pages, in
1618   // which case the caller should unwind a frame and try again.  The argument
1619   // should be the caller's (approximate) sp.
1620   bool reguard_stack(address cur_sp);
1621   // Similar to above but see if current stackpoint is out of the guard area
1622   // and reguard if possible.
1623   bool reguard_stack(void);
1624 
1625   address stack_overflow_limit() { return _stack_overflow_limit; }
1626   void set_stack_overflow_limit() {
1627     _stack_overflow_limit =
1628       stack_end() + MAX2(JavaThread::stack_guard_zone_size(), JavaThread::stack_shadow_zone_size());
1629   }
1630 
1631   // Misc. accessors/mutators
1632   void set_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = true; }
1633   void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
1634   bool do_not_unlock(void)                       { return _do_not_unlock_if_synchronized; }
1635 
1636 #ifndef PRODUCT
1637   void record_jump(address target, address instr, const char* file, int line);
1638 #endif // PRODUCT
1639 
1640   // For assembly stub generation
1641   static ByteSize threadObj_offset()             { return byte_offset_of(JavaThread, _threadObj); }
1642 #ifndef PRODUCT
1643   static ByteSize jmp_ring_index_offset()        { return byte_offset_of(JavaThread, _jmp_ring_index); }
1644   static ByteSize jmp_ring_offset()              { return byte_offset_of(JavaThread, _jmp_ring); }
1645 #endif // PRODUCT
1646   static ByteSize jni_environment_offset()       { return byte_offset_of(JavaThread, _jni_environment); }
1647   static ByteSize pending_jni_exception_check_fn_offset() {
1648     return byte_offset_of(JavaThread, _pending_jni_exception_check_fn);
1649   }
1650   static ByteSize last_Java_sp_offset() {
1651     return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset();
1652   }
1653   static ByteSize last_Java_pc_offset() {
1654     return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset();
1655   }
1656   static ByteSize frame_anchor_offset() {
1657     return byte_offset_of(JavaThread, _anchor);
1658   }
1659   static ByteSize callee_target_offset()         { return byte_offset_of(JavaThread, _callee_target); }
1660   static ByteSize vm_result_offset()             { return byte_offset_of(JavaThread, _vm_result); }
1661   static ByteSize vm_result_2_offset()           { return byte_offset_of(JavaThread, _vm_result_2); }
1662   static ByteSize thread_state_offset()          { return byte_offset_of(JavaThread, _thread_state); }
1663   static ByteSize saved_exception_pc_offset()    { return byte_offset_of(JavaThread, _saved_exception_pc); }
1664   static ByteSize osthread_offset()              { return byte_offset_of(JavaThread, _osthread); }
1665 #if INCLUDE_JVMCI
1666   static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); }
1667   static ByteSize pending_monitorenter_offset()  { return byte_offset_of(JavaThread, _pending_monitorenter); }
1668   static ByteSize pending_failed_speculation_offset() { return byte_offset_of(JavaThread, _pending_failed_speculation); }
1669   static ByteSize jvmci_alternate_call_target_offset() { return byte_offset_of(JavaThread, _jvmci._alternate_call_target); }
1670   static ByteSize jvmci_implicit_exception_pc_offset() { return byte_offset_of(JavaThread, _jvmci._implicit_exception_pc); }
1671   static ByteSize jvmci_counters_offset()        { return byte_offset_of(JavaThread, _jvmci_counters); }
1672 #endif // INCLUDE_JVMCI
1673   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop); }
1674   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc); }
1675   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
1676   static ByteSize stack_overflow_limit_offset()  { return byte_offset_of(JavaThread, _stack_overflow_limit); }
1677   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
1678   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state); }
1679   static ByteSize reserved_stack_activation_offset() { return byte_offset_of(JavaThread, _reserved_stack_activation); }
1680   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
1681 
1682   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
1683   static ByteSize should_post_on_exceptions_flag_offset() {
1684     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
1685   }
1686 
1687   // Returns the jni environment for this thread
1688   JNIEnv* jni_environment()                      { return &_jni_environment; }
1689 
1690   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
1691     JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
1692     // Only return NULL if thread is off the thread list; starting to
1693     // exit should not return NULL.
1694     if (thread_from_jni_env->is_terminated()) {
1695       thread_from_jni_env->block_if_vm_exited();
1696       return NULL;
1697     } else {
1698       return thread_from_jni_env;
1699     }
1700   }
1701 
1702   // JNI critical regions. These can nest.
1703   bool in_critical()    { return _jni_active_critical > 0; }
1704   bool in_last_critical()  { return _jni_active_critical == 1; }
1705   void enter_critical() {
1706     assert(Thread::current() == this ||
1707            (Thread::current()->is_VM_thread() &&
1708            SafepointSynchronize::is_synchronizing()),
1709            "this must be current thread or synchronizing");
1710     _jni_active_critical++;
1711   }
1712   void exit_critical() {
1713     assert(Thread::current() == this, "this must be current thread");
1714     _jni_active_critical--;
1715     assert(_jni_active_critical >= 0, "JNI critical nesting problem?");
1716   }
1717 
1718   // Checked JNI: is the programmer required to check for exceptions, if so specify
1719   // which function name. Returning to a Java frame should implicitly clear the
1720   // pending check, this is done for Native->Java transitions (i.e. user JNI code).
1721   // VM->Java transistions are not cleared, it is expected that JNI code enclosed
1722   // within ThreadToNativeFromVM makes proper exception checks (i.e. VM internal).
1723   bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; }
1724   void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; }
1725   const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; }
1726   void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; }
1727 
1728   // For deadlock detection
1729   int depth_first_number() { return _depth_first_number; }
1730   void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
1731 
1732  private:
1733   void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; }
1734 
1735  public:
1736   MonitorChunk* monitor_chunks() const           { return _monitor_chunks; }
1737   void add_monitor_chunk(MonitorChunk* chunk);
1738   void remove_monitor_chunk(MonitorChunk* chunk);
1739   bool in_deopt_handler() const                  { return _in_deopt_handler > 0; }
1740   void inc_in_deopt_handler()                    { _in_deopt_handler++; }
1741   void dec_in_deopt_handler() {
1742     assert(_in_deopt_handler > 0, "mismatched deopt nesting");
1743     if (_in_deopt_handler > 0) { // robustness
1744       _in_deopt_handler--;
1745     }
1746   }
1747 
1748  private:
1749   void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; }
1750 
1751  public:
1752 
1753   // Frame iteration; calls the function f for all frames on the stack
1754   void frames_do(void f(frame*, const RegisterMap*));
1755 
1756   // Memory operations
1757   void oops_do(OopClosure* f, CodeBlobClosure* cf);
1758 
1759   // Sweeper operations
1760   virtual void nmethods_do(CodeBlobClosure* cf);
1761 
1762   // RedefineClasses Support
1763   void metadata_do(void f(Metadata*));
1764 
1765   // Misc. operations
1766   char* name() const { return (char*)get_thread_name(); }
1767   void print_on(outputStream* st) const;
1768   void print_value();
1769   void print_thread_state_on(outputStream*) const      PRODUCT_RETURN;
1770   void print_thread_state() const                      PRODUCT_RETURN;
1771   void print_on_error(outputStream* st, char* buf, int buflen) const;
1772   void print_name_on_error(outputStream* st, char* buf, int buflen) const;
1773   void verify();
1774   const char* get_thread_name() const;
1775  private:
1776   // factor out low-level mechanics for use in both normal and error cases
1777   const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const;
1778  public:
1779   const char* get_threadgroup_name() const;
1780   const char* get_parent_name() const;
1781 
1782   // Accessing frames
1783   frame last_frame() {
1784     _anchor.make_walkable(this);
1785     return pd_last_frame();
1786   }
1787   javaVFrame* last_java_vframe(RegisterMap* reg_map);
1788 
1789   // Returns method at 'depth' java or native frames down the stack
1790   // Used for security checks
1791   Klass* security_get_caller_class(int depth);
1792 
1793   // Print stack trace in external format
1794   void print_stack_on(outputStream* st);
1795   void print_stack() { print_stack_on(tty); }
1796 
1797   // Print stack traces in various internal formats
1798   void trace_stack()                             PRODUCT_RETURN;
1799   void trace_stack_from(vframe* start_vf)        PRODUCT_RETURN;
1800   void trace_frames()                            PRODUCT_RETURN;
1801   void trace_oops()                              PRODUCT_RETURN;
1802 
1803   // Print an annotated view of the stack frames
1804   void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN;
1805   void validate_frame_layout() {
1806     print_frame_layout(0, true);
1807   }
1808 
1809   // Returns the number of stack frames on the stack
1810   int depth() const;
1811 
1812   // Function for testing deoptimization
1813   void deoptimize();
1814   void make_zombies();
1815 
1816   void deoptimized_wrt_marked_nmethods();
1817 
1818  public:
1819   // Returns the running thread as a JavaThread
1820   static inline JavaThread* current();
1821 
1822   // Returns the active Java thread.  Do not use this if you know you are calling
1823   // from a JavaThread, as it's slower than JavaThread::current.  If called from
1824   // the VMThread, it also returns the JavaThread that instigated the VMThread's
1825   // operation.  You may not want that either.
1826   static JavaThread* active();
1827 
1828   inline CompilerThread* as_CompilerThread();
1829 
1830  public:
1831   virtual void run();
1832   void thread_main_inner();
1833 
1834  private:
1835   // PRIVILEGED STACK
1836   PrivilegedElement*  _privileged_stack_top;
1837   GrowableArray<oop>* _array_for_gc;
1838  public:
1839 
1840   // Returns the privileged_stack information.
1841   PrivilegedElement* privileged_stack_top() const       { return _privileged_stack_top; }
1842   void set_privileged_stack_top(PrivilegedElement *e)   { _privileged_stack_top = e; }
1843   void register_array_for_gc(GrowableArray<oop>* array) { _array_for_gc = array; }
1844 
1845  public:
1846   // Thread local information maintained by JVMTI.
1847   void set_jvmti_thread_state(JvmtiThreadState *value)                           { _jvmti_thread_state = value; }
1848   // A JvmtiThreadState is lazily allocated. This jvmti_thread_state()
1849   // getter is used to get this JavaThread's JvmtiThreadState if it has
1850   // one which means NULL can be returned. JvmtiThreadState::state_for()
1851   // is used to get the specified JavaThread's JvmtiThreadState if it has
1852   // one or it allocates a new JvmtiThreadState for the JavaThread and
1853   // returns it. JvmtiThreadState::state_for() will return NULL only if
1854   // the specified JavaThread is exiting.
1855   JvmtiThreadState *jvmti_thread_state() const                                   { return _jvmti_thread_state; }
1856   static ByteSize jvmti_thread_state_offset()                                    { return byte_offset_of(JavaThread, _jvmti_thread_state); }
1857   void set_jvmti_get_loaded_classes_closure(JvmtiGetLoadedClassesClosure* value) { _jvmti_get_loaded_classes_closure = value; }
1858   JvmtiGetLoadedClassesClosure* get_jvmti_get_loaded_classes_closure() const     { return _jvmti_get_loaded_classes_closure; }
1859 
1860   // JVMTI PopFrame support
1861   // Setting and clearing popframe_condition
1862   // All of these enumerated values are bits. popframe_pending
1863   // indicates that a PopFrame() has been requested and not yet been
1864   // completed. popframe_processing indicates that that PopFrame() is in
1865   // the process of being completed. popframe_force_deopt_reexecution_bit
1866   // indicates that special handling is required when returning to a
1867   // deoptimized caller.
1868   enum PopCondition {
1869     popframe_inactive                      = 0x00,
1870     popframe_pending_bit                   = 0x01,
1871     popframe_processing_bit                = 0x02,
1872     popframe_force_deopt_reexecution_bit   = 0x04
1873   };
1874   PopCondition popframe_condition()                   { return (PopCondition) _popframe_condition; }
1875   void set_popframe_condition(PopCondition c)         { _popframe_condition = c; }
1876   void set_popframe_condition_bit(PopCondition c)     { _popframe_condition |= c; }
1877   void clear_popframe_condition()                     { _popframe_condition = popframe_inactive; }
1878   static ByteSize popframe_condition_offset()         { return byte_offset_of(JavaThread, _popframe_condition); }
1879   bool has_pending_popframe()                         { return (popframe_condition() & popframe_pending_bit) != 0; }
1880   bool popframe_forcing_deopt_reexecution()           { return (popframe_condition() & popframe_force_deopt_reexecution_bit) != 0; }
1881   void clear_popframe_forcing_deopt_reexecution()     { _popframe_condition &= ~popframe_force_deopt_reexecution_bit; }
1882 #ifdef CC_INTERP
1883   bool pop_frame_pending(void)                        { return ((_popframe_condition & popframe_pending_bit) != 0); }
1884   void clr_pop_frame_pending(void)                    { _popframe_condition = popframe_inactive; }
1885   bool pop_frame_in_process(void)                     { return ((_popframe_condition & popframe_processing_bit) != 0); }
1886   void set_pop_frame_in_process(void)                 { _popframe_condition |= popframe_processing_bit; }
1887   void clr_pop_frame_in_process(void)                 { _popframe_condition &= ~popframe_processing_bit; }
1888 #endif
1889 
1890   int frames_to_pop_failed_realloc() const            { return _frames_to_pop_failed_realloc; }
1891   void set_frames_to_pop_failed_realloc(int nb)       { _frames_to_pop_failed_realloc = nb; }
1892   void dec_frames_to_pop_failed_realloc()             { _frames_to_pop_failed_realloc--; }
1893 
1894  private:
1895   // Saved incoming arguments to popped frame.
1896   // Used only when popped interpreted frame returns to deoptimized frame.
1897   void*    _popframe_preserved_args;
1898   int      _popframe_preserved_args_size;
1899 
1900  public:
1901   void  popframe_preserve_args(ByteSize size_in_bytes, void* start);
1902   void* popframe_preserved_args();
1903   ByteSize popframe_preserved_args_size();
1904   WordSize popframe_preserved_args_size_in_words();
1905   void  popframe_free_preserved_args();
1906 
1907 
1908  private:
1909   JvmtiThreadState *_jvmti_thread_state;
1910   JvmtiGetLoadedClassesClosure* _jvmti_get_loaded_classes_closure;
1911 
1912   // Used by the interpreter in fullspeed mode for frame pop, method
1913   // entry, method exit and single stepping support. This field is
1914   // only set to non-zero by the VM_EnterInterpOnlyMode VM operation.
1915   // It can be set to zero asynchronously (i.e., without a VM operation
1916   // or a lock) so we have to be very careful.
1917   int               _interp_only_mode;
1918 
1919  public:
1920   // used by the interpreter for fullspeed debugging support (see above)
1921   static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); }
1922   bool is_interp_only_mode()                { return (_interp_only_mode != 0); }
1923   int get_interp_only_mode()                { return _interp_only_mode; }
1924   void increment_interp_only_mode()         { ++_interp_only_mode; }
1925   void decrement_interp_only_mode()         { --_interp_only_mode; }
1926 
1927   // support for cached flag that indicates whether exceptions need to be posted for this thread
1928   // if this is false, we can avoid deoptimizing when events are thrown
1929   // this gets set to reflect whether jvmtiExport::post_exception_throw would actually do anything
1930  private:
1931   int    _should_post_on_exceptions_flag;
1932 
1933  public:
1934   int   should_post_on_exceptions_flag()  { return _should_post_on_exceptions_flag; }
1935   void  set_should_post_on_exceptions_flag(int val)  { _should_post_on_exceptions_flag = val; }
1936 
1937  private:
1938   ThreadStatistics *_thread_stat;
1939 
1940  public:
1941   ThreadStatistics* get_thread_stat() const    { return _thread_stat; }
1942 
1943   // Return a blocker object for which this thread is blocked parking.
1944   oop current_park_blocker();
1945 
1946  private:
1947   static size_t _stack_size_at_create;
1948 
1949  public:
1950   static inline size_t stack_size_at_create(void) {
1951     return _stack_size_at_create;
1952   }
1953   static inline void set_stack_size_at_create(size_t value) {
1954     _stack_size_at_create = value;
1955   }
1956 
1957   // Machine dependent stuff
1958 #include OS_CPU_HEADER(thread)
1959 
1960  public:
1961   void set_blocked_on_compilation(bool value) {
1962     _blocked_on_compilation = value;
1963   }
1964 
1965   bool blocked_on_compilation() {
1966     return _blocked_on_compilation;
1967   }
1968  protected:
1969   bool         _blocked_on_compilation;
1970 
1971 
1972   // JSR166 per-thread parker
1973  private:
1974   Parker*    _parker;
1975  public:
1976   Parker*     parker() { return _parker; }
1977 
1978   // Biased locking support
1979  private:
1980   GrowableArray<MonitorInfo*>* _cached_monitor_info;
1981  public:
1982   GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; }
1983   void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; }
1984 
1985   // clearing/querying jni attach status
1986   bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; }
1987   bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; }
1988   inline void set_done_attaching_via_jni();
1989 };
1990 
1991 // Inline implementation of JavaThread::current
1992 inline JavaThread* JavaThread::current() {
1993   Thread* thread = Thread::current();
1994   assert(thread->is_Java_thread(), "just checking");
1995   return (JavaThread*)thread;
1996 }
1997 
1998 inline CompilerThread* JavaThread::as_CompilerThread() {
1999   assert(is_Compiler_thread(), "just checking");
2000   return (CompilerThread*)this;
2001 }
2002 
2003 // Dedicated thread to sweep the code cache
2004 class CodeCacheSweeperThread : public JavaThread {
2005   CompiledMethod*       _scanned_compiled_method; // nmethod being scanned by the sweeper
2006  public:
2007   CodeCacheSweeperThread();
2008   // Track the nmethod currently being scanned by the sweeper
2009   void set_scanned_compiled_method(CompiledMethod* cm) {
2010     assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value");
2011     _scanned_compiled_method = cm;
2012   }
2013 
2014   // Hide sweeper thread from external view.
2015   bool is_hidden_from_external_view() const { return true; }
2016 
2017   bool is_Code_cache_sweeper_thread() const { return true; }
2018 
2019   // Prevent GC from unloading _scanned_compiled_method
2020   void oops_do(OopClosure* f, CodeBlobClosure* cf);
2021   void nmethods_do(CodeBlobClosure* cf);
2022 };
2023 
2024 // A thread used for Compilation.
2025 class CompilerThread : public JavaThread {
2026   friend class VMStructs;
2027  private:
2028   CompilerCounters* _counters;
2029 
2030   ciEnv*                _env;
2031   CompileLog*           _log;
2032   CompileTask* volatile _task;  // print_threads_compiling can read this concurrently.
2033   CompileQueue*         _queue;
2034   BufferBlob*           _buffer_blob;
2035 
2036   AbstractCompiler*     _compiler;
2037 
2038  public:
2039 
2040   static CompilerThread* current();
2041 
2042   CompilerThread(CompileQueue* queue, CompilerCounters* counters);
2043 
2044   bool is_Compiler_thread() const                { return true; }
2045 
2046   virtual bool can_call_java() const;
2047 
2048   // Hide native compiler threads from external view.
2049   bool is_hidden_from_external_view() const      { return !can_call_java(); }
2050 
2051   void set_compiler(AbstractCompiler* c)         { _compiler = c; }
2052   AbstractCompiler* compiler() const             { return _compiler; }
2053 
2054   CompileQueue* queue()        const             { return _queue; }
2055   CompilerCounters* counters() const             { return _counters; }
2056 
2057   // Get/set the thread's compilation environment.
2058   ciEnv*        env()                            { return _env; }
2059   void          set_env(ciEnv* env)              { _env = env; }
2060 
2061   BufferBlob*   get_buffer_blob() const          { return _buffer_blob; }
2062   void          set_buffer_blob(BufferBlob* b)   { _buffer_blob = b; }
2063 
2064   // Get/set the thread's logging information
2065   CompileLog*   log()                            { return _log; }
2066   void          init_log(CompileLog* log) {
2067     // Set once, for good.
2068     assert(_log == NULL, "set only once");
2069     _log = log;
2070   }
2071 
2072 #ifndef PRODUCT
2073  private:
2074   IdealGraphPrinter *_ideal_graph_printer;
2075  public:
2076   IdealGraphPrinter *ideal_graph_printer()           { return _ideal_graph_printer; }
2077   void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; }
2078 #endif
2079 
2080   // Get/set the thread's current task
2081   CompileTask* task()                      { return _task; }
2082   void         set_task(CompileTask* task) { _task = task; }
2083 };
2084 
2085 inline CompilerThread* CompilerThread::current() {
2086   return JavaThread::current()->as_CompilerThread();
2087 }
2088 
2089 // The active thread queue. It also keeps track of the current used
2090 // thread priorities.
2091 class Threads: AllStatic {
2092   friend class VMStructs;
2093  private:
2094   static JavaThread* _thread_list;
2095   static int         _number_of_threads;
2096   static int         _number_of_non_daemon_threads;
2097   static int         _return_code;
2098   static int         _thread_claim_parity;
2099 #ifdef ASSERT
2100   static bool        _vm_complete;
2101 #endif
2102 
2103   static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS);
2104   static void initialize_jsr292_core_classes(TRAPS);
2105 
2106  public:
2107   // Thread management
2108   // force_daemon is a concession to JNI, where we may need to add a
2109   // thread to the thread list before allocating its thread object
2110   static void add(JavaThread* p, bool force_daemon = false);
2111   static void remove(JavaThread* p);
2112   static void non_java_threads_do(ThreadClosure* tc);
2113   static void threads_do(ThreadClosure* tc);
2114   static void possibly_parallel_threads_do(bool is_par, ThreadClosure* tc);
2115 
2116   // Initializes the vm and creates the vm thread
2117   static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
2118   static void convert_vm_init_libraries_to_agents();
2119   static void create_vm_init_libraries();
2120   static void create_vm_init_agents();
2121   static void shutdown_vm_agents();
2122   static bool destroy_vm();
2123   // Supported VM versions via JNI
2124   // Includes JNI_VERSION_1_1
2125   static jboolean is_supported_jni_version_including_1_1(jint version);
2126   // Does not include JNI_VERSION_1_1
2127   static jboolean is_supported_jni_version(jint version);
2128 
2129   // The "thread claim parity" provides a way for threads to be claimed
2130   // by parallel worker tasks.
2131   //
2132   // Each thread contains a a "parity" field. A task will claim the
2133   // thread only if its parity field is the same as the global parity,
2134   // which is updated by calling change_thread_claim_parity().
2135   //
2136   // For this to work change_thread_claim_parity() needs to be called
2137   // exactly once in sequential code before starting parallel tasks
2138   // that should claim threads.
2139   //
2140   // New threads get their parity set to 0 and change_thread_claim_parity()
2141   // never set the global parity to 0.
2142   static int thread_claim_parity() { return _thread_claim_parity; }
2143   static void change_thread_claim_parity();
2144   static void assert_all_threads_claimed() NOT_DEBUG_RETURN;
2145 
2146   // Apply "f->do_oop" to all root oops in all threads.
2147   // This version may only be called by sequential code.
2148   static void oops_do(OopClosure* f, CodeBlobClosure* cf);
2149   // This version may be called by sequential or parallel code.
2150   static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf);
2151   // This creates a list of GCTasks, one per thread.
2152   static void create_thread_roots_tasks(GCTaskQueue* q);
2153   // This creates a list of GCTasks, one per thread, for marking objects.
2154   static void create_thread_roots_marking_tasks(GCTaskQueue* q);
2155 
2156   // Apply "f->do_oop" to roots in all threads that
2157   // are part of compiled frames
2158   static void compiled_frame_oops_do(OopClosure* f, CodeBlobClosure* cf);
2159 
2160   static void convert_hcode_pointers();
2161   static void restore_hcode_pointers();
2162 
2163   // Sweeper
2164   static void nmethods_do(CodeBlobClosure* cf);
2165 
2166   // RedefineClasses support
2167   static void metadata_do(void f(Metadata*));
2168   static void metadata_handles_do(void f(Metadata*));
2169 
2170 #ifdef ASSERT
2171   static bool is_vm_complete() { return _vm_complete; }
2172 #endif
2173 
2174   // Verification
2175   static void verify();
2176   static void print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks);
2177   static void print(bool print_stacks, bool internal_format) {
2178     // this function is only used by debug.cpp
2179     print_on(tty, print_stacks, internal_format, false /* no concurrent lock printed */);
2180   }
2181   static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen);
2182   static void print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf,
2183                              int buflen, bool* found_current);
2184   static void print_threads_compiling(outputStream* st, char* buf, int buflen);
2185 
2186   // Get Java threads that are waiting to enter a monitor.
2187   static GrowableArray<JavaThread*>* get_pending_threads(ThreadsList * t_list,
2188                                                          int count, address monitor);
2189 
2190   // Get owning Java thread from the monitor's owner field.
2191   static JavaThread *owning_thread_from_monitor_owner(ThreadsList * t_list,
2192                                                       address owner);
2193 
2194   // Number of threads on the active threads list
2195   static int number_of_threads()                 { return _number_of_threads; }
2196   // Number of non-daemon threads on the active threads list
2197   static int number_of_non_daemon_threads()      { return _number_of_non_daemon_threads; }
2198 
2199   // Deoptimizes all frames tied to marked nmethods
2200   static void deoptimized_wrt_marked_nmethods();
2201 };
2202 
2203 
2204 // Thread iterator
2205 class ThreadClosure: public StackObj {
2206  public:
2207   virtual void do_thread(Thread* thread) = 0;
2208 };
2209 
2210 class SignalHandlerMark: public StackObj {
2211  private:
2212   Thread* _thread;
2213  public:
2214   SignalHandlerMark(Thread* t) {
2215     _thread = t;
2216     if (_thread) _thread->enter_signal_handler();
2217   }
2218   ~SignalHandlerMark() {
2219     if (_thread) _thread->leave_signal_handler();
2220     _thread = NULL;
2221   }
2222 };
2223 
2224 
2225 #endif // SHARE_VM_RUNTIME_THREAD_HPP