23 */
24
25 #ifndef SHARE_VM_RUNTIME_THREAD_HPP
26 #define SHARE_VM_RUNTIME_THREAD_HPP
27
28 #include "jni.h"
29 #include "gc/shared/threadLocalAllocBuffer.hpp"
30 #include "memory/allocation.hpp"
31 #include "oops/oop.hpp"
32 #include "prims/jvmtiExport.hpp"
33 #include "runtime/frame.hpp"
34 #include "runtime/handshake.hpp"
35 #include "runtime/javaFrameAnchor.hpp"
36 #include "runtime/jniHandles.hpp"
37 #include "runtime/mutexLocker.hpp"
38 #include "runtime/os.hpp"
39 #include "runtime/osThread.hpp"
40 #include "runtime/park.hpp"
41 #include "runtime/safepoint.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "runtime/threadLocalStorage.hpp"
44 #include "runtime/thread_ext.hpp"
45 #include "runtime/unhandledOops.hpp"
46 #include "trace/traceBackend.hpp"
47 #include "trace/traceMacros.hpp"
48 #include "utilities/align.hpp"
49 #include "utilities/exceptions.hpp"
50 #include "utilities/macros.hpp"
51 #if INCLUDE_ALL_GCS
52 #include "gc/g1/dirtyCardQueue.hpp"
53 #include "gc/g1/satbMarkQueue.hpp"
54 #endif // INCLUDE_ALL_GCS
55 #ifdef ZERO
56 # include "stack_zero.hpp"
57 #endif
58
59 class ThreadSafepointState;
60 class ThreadsList;
61 class ThreadsSMRSupport;
62 class NestedThreadsList;
306 // (Hence, !allow_safepoint() => !allow_allocation()).
307 //
308 // The two classes NoSafepointVerifier and No_Allocation_Verifier are used to set these counters.
309 //
310 NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
311 debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
312
313 // Used by SkipGCALot class.
314 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
315
316 friend class NoAllocVerifier;
317 friend class NoSafepointVerifier;
318 friend class PauseNoSafepointVerifier;
319 friend class GCLocker;
320
321 volatile void* _polling_page; // Thread local polling page
322
323 ThreadLocalAllocBuffer _tlab; // Thread-local eden
324 jlong _allocated_bytes; // Cumulative number of bytes allocated on
325 // the Java heap
326
327 mutable TRACE_DATA _trace_data; // Thread-local data for tracing
328
329 ThreadExt _ext;
330
331 int _vm_operation_started_count; // VM_Operation support
332 int _vm_operation_completed_count; // VM_Operation support
333
334 ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread
335 // is waiting to lock
336 bool _current_pending_monitor_is_from_java; // locking is from Java code
337
338 // ObjectMonitor on which this thread called Object.wait()
339 ObjectMonitor* _current_waiting_monitor;
340
341 // Private thread-local objectmonitor list - a simple cache organized as a SLL.
342 public:
343 ObjectMonitor* omFreeList;
344 int omFreeCount; // length of omFreeList
345 int omFreeProvision; // reload chunk size
487 // Internal handle support
488 HandleArea* handle_area() const { return _handle_area; }
489 void set_handle_area(HandleArea* area) { _handle_area = area; }
490
491 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; }
492 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
493
494 // Thread-Local Allocation Buffer (TLAB) support
495 ThreadLocalAllocBuffer& tlab() { return _tlab; }
496 void initialize_tlab() {
497 if (UseTLAB) {
498 tlab().initialize();
499 }
500 }
501
502 jlong allocated_bytes() { return _allocated_bytes; }
503 void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
504 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
505 inline jlong cooked_allocated_bytes();
506
507 TRACE_DEFINE_THREAD_TRACE_DATA_OFFSET;
508 TRACE_DATA* trace_data() const { return &_trace_data; }
509 bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; }
510
511 const ThreadExt& ext() const { return _ext; }
512 ThreadExt& ext() { return _ext; }
513
514 // VM operation support
515 int vm_operation_ticket() { return ++_vm_operation_started_count; }
516 int vm_operation_completed_count() { return _vm_operation_completed_count; }
517 void increment_vm_operation_completed_count() { _vm_operation_completed_count++; }
518
519 // For tracking the heavyweight monitor the thread is pending on.
520 ObjectMonitor* current_pending_monitor() {
521 return _current_pending_monitor;
522 }
523 void set_current_pending_monitor(ObjectMonitor* monitor) {
524 _current_pending_monitor = monitor;
525 }
526 void set_current_pending_monitor_is_from_java(bool from_java) {
661
662 public:
663 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
664 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
665 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
666
667 // Code generation
668 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
669 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
670 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
671
672 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
673 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
674
675 static ByteSize polling_page_offset() { return byte_offset_of(Thread, _polling_page); }
676
677 #define TLAB_FIELD_OFFSET(name) \
678 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
679
680 TLAB_FIELD_OFFSET(start)
681 TLAB_FIELD_OFFSET(end)
682 TLAB_FIELD_OFFSET(top)
683 TLAB_FIELD_OFFSET(pf_top)
684 TLAB_FIELD_OFFSET(size) // desired_size
685 TLAB_FIELD_OFFSET(refill_waste_limit)
686 TLAB_FIELD_OFFSET(number_of_refills)
687 TLAB_FIELD_OFFSET(fast_refill_waste)
688 TLAB_FIELD_OFFSET(slow_allocations)
689
690 #undef TLAB_FIELD_OFFSET
691
692 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
693
694 public:
695 volatile intptr_t _Stalled;
696 volatile int _TypeTag;
697 ParkEvent * _ParkEvent; // for synchronized()
698 ParkEvent * _SleepEvent; // for Thread.sleep
699 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
700 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
701 int NativeSyncRecursion; // diagnostic
|
23 */
24
25 #ifndef SHARE_VM_RUNTIME_THREAD_HPP
26 #define SHARE_VM_RUNTIME_THREAD_HPP
27
28 #include "jni.h"
29 #include "gc/shared/threadLocalAllocBuffer.hpp"
30 #include "memory/allocation.hpp"
31 #include "oops/oop.hpp"
32 #include "prims/jvmtiExport.hpp"
33 #include "runtime/frame.hpp"
34 #include "runtime/handshake.hpp"
35 #include "runtime/javaFrameAnchor.hpp"
36 #include "runtime/jniHandles.hpp"
37 #include "runtime/mutexLocker.hpp"
38 #include "runtime/os.hpp"
39 #include "runtime/osThread.hpp"
40 #include "runtime/park.hpp"
41 #include "runtime/safepoint.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "runtime/threadHeapSampler.hpp"
44 #include "runtime/threadLocalStorage.hpp"
45 #include "runtime/thread_ext.hpp"
46 #include "runtime/unhandledOops.hpp"
47 #include "trace/traceBackend.hpp"
48 #include "trace/traceMacros.hpp"
49 #include "utilities/align.hpp"
50 #include "utilities/exceptions.hpp"
51 #include "utilities/macros.hpp"
52 #if INCLUDE_ALL_GCS
53 #include "gc/g1/dirtyCardQueue.hpp"
54 #include "gc/g1/satbMarkQueue.hpp"
55 #endif // INCLUDE_ALL_GCS
56 #ifdef ZERO
57 # include "stack_zero.hpp"
58 #endif
59
60 class ThreadSafepointState;
61 class ThreadsList;
62 class ThreadsSMRSupport;
63 class NestedThreadsList;
307 // (Hence, !allow_safepoint() => !allow_allocation()).
308 //
309 // The two classes NoSafepointVerifier and No_Allocation_Verifier are used to set these counters.
310 //
311 NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
312 debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
313
314 // Used by SkipGCALot class.
315 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
316
317 friend class NoAllocVerifier;
318 friend class NoSafepointVerifier;
319 friend class PauseNoSafepointVerifier;
320 friend class GCLocker;
321
322 volatile void* _polling_page; // Thread local polling page
323
324 ThreadLocalAllocBuffer _tlab; // Thread-local eden
325 jlong _allocated_bytes; // Cumulative number of bytes allocated on
326 // the Java heap
327 ThreadHeapSampler _heap_sampler; // For use when sampling the memory.
328
329 mutable TRACE_DATA _trace_data; // Thread-local data for tracing
330
331 ThreadExt _ext;
332
333 int _vm_operation_started_count; // VM_Operation support
334 int _vm_operation_completed_count; // VM_Operation support
335
336 ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread
337 // is waiting to lock
338 bool _current_pending_monitor_is_from_java; // locking is from Java code
339
340 // ObjectMonitor on which this thread called Object.wait()
341 ObjectMonitor* _current_waiting_monitor;
342
343 // Private thread-local objectmonitor list - a simple cache organized as a SLL.
344 public:
345 ObjectMonitor* omFreeList;
346 int omFreeCount; // length of omFreeList
347 int omFreeProvision; // reload chunk size
489 // Internal handle support
490 HandleArea* handle_area() const { return _handle_area; }
491 void set_handle_area(HandleArea* area) { _handle_area = area; }
492
493 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; }
494 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
495
496 // Thread-Local Allocation Buffer (TLAB) support
497 ThreadLocalAllocBuffer& tlab() { return _tlab; }
498 void initialize_tlab() {
499 if (UseTLAB) {
500 tlab().initialize();
501 }
502 }
503
504 jlong allocated_bytes() { return _allocated_bytes; }
505 void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
506 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
507 inline jlong cooked_allocated_bytes();
508
509 ThreadHeapSampler& heap_sampler() { return _heap_sampler; }
510
511 TRACE_DEFINE_THREAD_TRACE_DATA_OFFSET;
512 TRACE_DATA* trace_data() const { return &_trace_data; }
513 bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; }
514
515 const ThreadExt& ext() const { return _ext; }
516 ThreadExt& ext() { return _ext; }
517
518 // VM operation support
519 int vm_operation_ticket() { return ++_vm_operation_started_count; }
520 int vm_operation_completed_count() { return _vm_operation_completed_count; }
521 void increment_vm_operation_completed_count() { _vm_operation_completed_count++; }
522
523 // For tracking the heavyweight monitor the thread is pending on.
524 ObjectMonitor* current_pending_monitor() {
525 return _current_pending_monitor;
526 }
527 void set_current_pending_monitor(ObjectMonitor* monitor) {
528 _current_pending_monitor = monitor;
529 }
530 void set_current_pending_monitor_is_from_java(bool from_java) {
665
666 public:
667 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
668 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
669 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
670
671 // Code generation
672 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
673 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
674 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
675
676 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
677 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
678
679 static ByteSize polling_page_offset() { return byte_offset_of(Thread, _polling_page); }
680
681 #define TLAB_FIELD_OFFSET(name) \
682 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
683
684 TLAB_FIELD_OFFSET(start)
685 TLAB_FIELD_OFFSET(current_end)
686 TLAB_FIELD_OFFSET(top)
687 TLAB_FIELD_OFFSET(pf_top)
688 TLAB_FIELD_OFFSET(size) // desired_size
689 TLAB_FIELD_OFFSET(refill_waste_limit)
690 TLAB_FIELD_OFFSET(number_of_refills)
691 TLAB_FIELD_OFFSET(fast_refill_waste)
692 TLAB_FIELD_OFFSET(slow_allocations)
693
694 #undef TLAB_FIELD_OFFSET
695
696 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
697
698 public:
699 volatile intptr_t _Stalled;
700 volatile int _TypeTag;
701 ParkEvent * _ParkEvent; // for synchronized()
702 ParkEvent * _SleepEvent; // for Thread.sleep
703 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
704 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
705 int NativeSyncRecursion; // diagnostic
|