84 template <class T, MEMFLAGS F> class ChunkedList;
85 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
86
87 DEBUG_ONLY(class ResourceMark;)
88
89 class WorkerThread;
90
91 // Class hierarchy
92 // - Thread
93 // - NamedThread
94 // - VMThread
95 // - ConcurrentGCThread
96 // - WorkerThread
97 // - GangWorker
98 // - GCTaskThread
99 // - JavaThread
100 // - WatcherThread
101
102 class Thread: public ThreadShadow {
103 friend class VMStructs;
104 private:
105 // Exception handling
106 // (Note: _pending_exception and friends are in ThreadShadow)
107 //oop _pending_exception; // pending exception for current thread
108 // const char* _exception_file; // file information for exception (debugging only)
109 // int _exception_line; // line information for exception (debugging only)
110 protected:
111 // Support for forcing alignment of thread objects for biased locking
112 void* _real_malloc_address;
113 public:
114 void* operator new(size_t size) throw() { return allocate(size, true); }
115 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
116 return allocate(size, false); }
117 void operator delete(void* p);
118
119 protected:
120 static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
121 private:
122
123 // ***************************************************************
220 _suspendible_thread = false;
221 }
222
223 bool is_suspendible_thread() { return _suspendible_thread; }
224 #endif
225
226 private:
227 // Active_handles points to a block of handles
228 JNIHandleBlock* _active_handles;
229
230 // One-element thread local free list
231 JNIHandleBlock* _free_handle_block;
232
233 // Point to the last handle mark
234 HandleMark* _last_handle_mark;
235
236 // The parity of the last strong_roots iteration in which this thread was
237 // claimed as a task.
238 jint _oops_do_parity;
239
240 public:
241 void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; }
242 HandleMark* last_handle_mark() const { return _last_handle_mark; }
243 private:
244
245 // debug support for checking if code does allow safepoints or not
246 // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on
247 // mutex, or blocking on an object synchronizer (Java locking).
248 // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
249 // If !allow_allocation(), then an assertion failure will happen during allocation
250 // (Hence, !allow_safepoint() => !allow_allocation()).
251 //
252 // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
253 //
254 NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
255 debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
256
257 // Used by SkipGCALot class.
258 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
259
571 bool allow_allocation() { return _allow_allocation_count == 0; }
572 ResourceMark* current_resource_mark() { return _current_resource_mark; }
573 void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; }
574 #endif
575
576 void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN;
577
578 private:
579 volatile int _jvmti_env_iteration_count;
580
581 public:
582 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
583 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
584 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
585
586 // Code generation
587 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
588 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
589 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
590
591 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
592 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
593
594 #define TLAB_FIELD_OFFSET(name) \
595 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
596
597 TLAB_FIELD_OFFSET(start)
598 TLAB_FIELD_OFFSET(end)
599 TLAB_FIELD_OFFSET(top)
600 TLAB_FIELD_OFFSET(pf_top)
601 TLAB_FIELD_OFFSET(size) // desired_size
602 TLAB_FIELD_OFFSET(refill_waste_limit)
603 TLAB_FIELD_OFFSET(number_of_refills)
604 TLAB_FIELD_OFFSET(fast_refill_waste)
605 TLAB_FIELD_OFFSET(slow_allocations)
606
607 #undef TLAB_FIELD_OFFSET
608
609 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
610
919 // This is set to popframe_pending to signal that top Java frame should be popped immediately
920 int _popframe_condition;
921
922 // If reallocation of scalar replaced objects fails, we throw OOM
923 // and during exception propagation, pop the top
924 // _frames_to_pop_failed_realloc frames, the ones that reference
925 // failed reallocations.
926 int _frames_to_pop_failed_realloc;
927
928 #ifndef PRODUCT
929 int _jmp_ring_index;
930 struct {
931 // We use intptr_t instead of address so debugger doesn't try and display strings
932 intptr_t _target;
933 intptr_t _instruction;
934 const char* _file;
935 int _line;
936 } _jmp_ring[jump_ring_buffer_size];
937 #endif // PRODUCT
938
939 #if INCLUDE_ALL_GCS
940 // Support for G1 barriers
941
942 ObjPtrQueue _satb_mark_queue; // Thread-local log for SATB barrier.
943 // Set of all such queues.
944 static SATBMarkQueueSet _satb_mark_queue_set;
945
946 DirtyCardQueue _dirty_card_queue; // Thread-local log for dirty cards.
947 // Set of all such queues.
948 static DirtyCardQueueSet _dirty_card_queue_set;
949
950 void flush_barrier_queues();
951 #endif // INCLUDE_ALL_GCS
952
953 friend class VMThread;
954 friend class ThreadWaitTransition;
955 friend class VM_Exit;
956
957 void initialize(); // Initialized the instance variables
958
1850 public:
1851 IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; }
1852 void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; }
1853 #endif
1854
1855 // Get/set the thread's current task
1856 CompileTask* task() { return _task; }
1857 void set_task(CompileTask* task) { _task = task; }
1858 };
1859
1860 inline CompilerThread* CompilerThread::current() {
1861 return JavaThread::current()->as_CompilerThread();
1862 }
1863
1864 // The active thread queue. It also keeps track of the current used
1865 // thread priorities.
1866 class Threads: AllStatic {
1867 friend class VMStructs;
1868 private:
1869 static JavaThread* _thread_list;
1870 static int _number_of_threads;
1871 static int _number_of_non_daemon_threads;
1872 static int _return_code;
1873 static int _thread_claim_parity;
1874 #ifdef ASSERT
1875 static bool _vm_complete;
1876 #endif
1877
1878 static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS);
1879 static void initialize_jsr292_core_classes(TRAPS);
1880 public:
1881 // Thread management
1882 // force_daemon is a concession to JNI, where we may need to add a
1883 // thread to the thread list before allocating its thread object
1884 static void add(JavaThread* p, bool force_daemon = false);
1885 static void remove(JavaThread* p);
1886 static bool includes(JavaThread* p);
1887 static JavaThread* first() { return _thread_list; }
1888 static void threads_do(ThreadClosure* tc);
1889
1890 // Initializes the vm and creates the vm thread
1891 static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
1892 static void convert_vm_init_libraries_to_agents();
1893 static void create_vm_init_libraries();
1894 static void create_vm_init_agents();
1895 static void shutdown_vm_agents();
1896 static bool destroy_vm();
1897 // Supported VM versions via JNI
1898 // Includes JNI_VERSION_1_1
1899 static jboolean is_supported_jni_version_including_1_1(jint version);
1900 // Does not include JNI_VERSION_1_1
1901 static jboolean is_supported_jni_version(jint version);
1902
1903 // The "thread claim parity" provides a way for threads to be claimed
1904 // by parallel worker tasks.
1905 //
1906 // Each thread contains a a "parity" field. A task will claim the
1907 // thread only if its parity field is the same as the global parity,
1908 // which is updated by calling change_thread_claim_parity().
1909 //
|
84 template <class T, MEMFLAGS F> class ChunkedList;
85 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
86
87 DEBUG_ONLY(class ResourceMark;)
88
89 class WorkerThread;
90
91 // Class hierarchy
92 // - Thread
93 // - NamedThread
94 // - VMThread
95 // - ConcurrentGCThread
96 // - WorkerThread
97 // - GangWorker
98 // - GCTaskThread
99 // - JavaThread
100 // - WatcherThread
101
102 class Thread: public ThreadShadow {
103 friend class VMStructs;
104 friend class Threads;
105 friend class ScanHazardPointerThreadClosure;
106 friend class ScanHazardPointerThreadsClosure;
107 private:
108 // Exception handling
109 // (Note: _pending_exception and friends are in ThreadShadow)
110 //oop _pending_exception; // pending exception for current thread
111 // const char* _exception_file; // file information for exception (debugging only)
112 // int _exception_line; // line information for exception (debugging only)
113 protected:
114 // Support for forcing alignment of thread objects for biased locking
115 void* _real_malloc_address;
116 public:
117 void* operator new(size_t size) throw() { return allocate(size, true); }
118 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
119 return allocate(size, false); }
120 void operator delete(void* p);
121
122 protected:
123 static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
124 private:
125
126 // ***************************************************************
223 _suspendible_thread = false;
224 }
225
226 bool is_suspendible_thread() { return _suspendible_thread; }
227 #endif
228
229 private:
230 // Active_handles points to a block of handles
231 JNIHandleBlock* _active_handles;
232
233 // One-element thread local free list
234 JNIHandleBlock* _free_handle_block;
235
236 // Point to the last handle mark
237 HandleMark* _last_handle_mark;
238
239 // The parity of the last strong_roots iteration in which this thread was
240 // claimed as a task.
241 jint _oops_do_parity;
242
243 JavaThread **volatile _java_threads_do_hp;
244
245 public:
246 void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; }
247 HandleMark* last_handle_mark() const { return _last_handle_mark; }
248 private:
249
250 // debug support for checking if code does allow safepoints or not
251 // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on
252 // mutex, or blocking on an object synchronizer (Java locking).
253 // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
254 // If !allow_allocation(), then an assertion failure will happen during allocation
255 // (Hence, !allow_safepoint() => !allow_allocation()).
256 //
257 // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
258 //
259 NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
260 debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
261
262 // Used by SkipGCALot class.
263 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
264
576 bool allow_allocation() { return _allow_allocation_count == 0; }
577 ResourceMark* current_resource_mark() { return _current_resource_mark; }
578 void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; }
579 #endif
580
581 void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN;
582
583 private:
584 volatile int _jvmti_env_iteration_count;
585
586 public:
587 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
588 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
589 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
590
591 // Code generation
592 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
593 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
594 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
595
596 static ByteSize yieldpoint_offset() { return byte_offset_of(Thread, _yieldpoint_poll); }
597
598 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
599 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
600
601 #define TLAB_FIELD_OFFSET(name) \
602 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
603
604 TLAB_FIELD_OFFSET(start)
605 TLAB_FIELD_OFFSET(end)
606 TLAB_FIELD_OFFSET(top)
607 TLAB_FIELD_OFFSET(pf_top)
608 TLAB_FIELD_OFFSET(size) // desired_size
609 TLAB_FIELD_OFFSET(refill_waste_limit)
610 TLAB_FIELD_OFFSET(number_of_refills)
611 TLAB_FIELD_OFFSET(fast_refill_waste)
612 TLAB_FIELD_OFFSET(slow_allocations)
613
614 #undef TLAB_FIELD_OFFSET
615
616 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
617
926 // This is set to popframe_pending to signal that top Java frame should be popped immediately
927 int _popframe_condition;
928
929 // If reallocation of scalar replaced objects fails, we throw OOM
930 // and during exception propagation, pop the top
931 // _frames_to_pop_failed_realloc frames, the ones that reference
932 // failed reallocations.
933 int _frames_to_pop_failed_realloc;
934
935 #ifndef PRODUCT
936 int _jmp_ring_index;
937 struct {
938 // We use intptr_t instead of address so debugger doesn't try and display strings
939 intptr_t _target;
940 intptr_t _instruction;
941 const char* _file;
942 int _line;
943 } _jmp_ring[jump_ring_buffer_size];
944 #endif // PRODUCT
945
946 private:
947 volatile int _serialized_memory_version;
948 volatile bool _force_yield;
949
950 public:
951 int serialized_memory_version() { return _serialized_memory_version; }
952 void update_serialized_memory_version();
953
954 void set_force_yield() { _force_yield = true; }
955
956 bool is_online_vm();
957 bool is_online_os();
958
959 #if INCLUDE_ALL_GCS
960 // Support for G1 barriers
961
962 ObjPtrQueue _satb_mark_queue; // Thread-local log for SATB barrier.
963 // Set of all such queues.
964 static SATBMarkQueueSet _satb_mark_queue_set;
965
966 DirtyCardQueue _dirty_card_queue; // Thread-local log for dirty cards.
967 // Set of all such queues.
968 static DirtyCardQueueSet _dirty_card_queue_set;
969
970 void flush_barrier_queues();
971 #endif // INCLUDE_ALL_GCS
972
973 friend class VMThread;
974 friend class ThreadWaitTransition;
975 friend class VM_Exit;
976
977 void initialize(); // Initialized the instance variables
978
1870 public:
1871 IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; }
1872 void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; }
1873 #endif
1874
1875 // Get/set the thread's current task
1876 CompileTask* task() { return _task; }
1877 void set_task(CompileTask* task) { _task = task; }
1878 };
1879
1880 inline CompilerThread* CompilerThread::current() {
1881 return JavaThread::current()->as_CompilerThread();
1882 }
1883
1884 // The active thread queue. It also keeps track of the current used
1885 // thread priorities.
1886 class Threads: AllStatic {
1887 friend class VMStructs;
1888 private:
1889 static JavaThread* _thread_list;
1890 static JavaThread* _thread_smr_list;
1891 static JavaThread** _thread_smr_list_list;
1892 static int _number_of_threads;
1893 static int _number_of_non_daemon_threads;
1894 static int _return_code;
1895 static int _thread_claim_parity;
1896 #ifdef ASSERT
1897 static bool _vm_complete;
1898 #endif
1899
1900 static JavaThread **volatile _fast_java_thread_list;
1901
1902 static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS);
1903 static void initialize_jsr292_core_classes(TRAPS);
1904 static JavaThread *smr_free_work(JavaThread *thread);
1905 static void smr_free_list(JavaThread **threads);
1906 public:
1907 // Thread management
1908 // force_daemon is a concession to JNI, where we may need to add a
1909 // thread to the thread list before allocating its thread object
1910 static void add(JavaThread* p, bool force_daemon = false);
1911 static void remove(JavaThread* p);
1912 static bool includes(JavaThread* p);
1913 static JavaThread* first() { return _thread_list; }
1914 static void threads_do(ThreadClosure* tc);
1915
1916 static void java_threads_do_fast(ThreadClosure *tc, Thread *self);
1917 static void smr_free(JavaThread *thread, bool have_lock);
1918
1919 // Initializes the vm and creates the vm thread
1920 static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
1921 static void convert_vm_init_libraries_to_agents();
1922 static void create_vm_init_libraries();
1923 static void create_vm_init_agents();
1924 static void shutdown_vm_agents();
1925 static bool destroy_vm();
1926 // Supported VM versions via JNI
1927 // Includes JNI_VERSION_1_1
1928 static jboolean is_supported_jni_version_including_1_1(jint version);
1929 // Does not include JNI_VERSION_1_1
1930 static jboolean is_supported_jni_version(jint version);
1931
1932 // The "thread claim parity" provides a way for threads to be claimed
1933 // by parallel worker tasks.
1934 //
1935 // Each thread contains a a "parity" field. A task will claim the
1936 // thread only if its parity field is the same as the global parity,
1937 // which is updated by calling change_thread_claim_parity().
1938 //
|