655
656 public:
657 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
658 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
659 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
660
661 // Code generation
662 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
663 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
664 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
665
666 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
667 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
668
669 static ByteSize polling_page_offset() { return byte_offset_of(Thread, _polling_page); }
670
671 #define TLAB_FIELD_OFFSET(name) \
672 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
673
674 TLAB_FIELD_OFFSET(start)
675 TLAB_FIELD_OFFSET(end)
676 TLAB_FIELD_OFFSET(top)
677 TLAB_FIELD_OFFSET(pf_top)
678 TLAB_FIELD_OFFSET(size) // desired_size
679 TLAB_FIELD_OFFSET(refill_waste_limit)
680 TLAB_FIELD_OFFSET(number_of_refills)
681 TLAB_FIELD_OFFSET(fast_refill_waste)
682 TLAB_FIELD_OFFSET(slow_allocations)
683
684 #undef TLAB_FIELD_OFFSET
685
686 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
687
688 public:
689 volatile intptr_t _Stalled;
690 volatile int _TypeTag;
691 ParkEvent * _ParkEvent; // for synchronized()
692 ParkEvent * _SleepEvent; // for Thread.sleep
693 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
694 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
695 int NativeSyncRecursion; // diagnostic
|
655
656 public:
657 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
658 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
659 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
660
661 // Code generation
662 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
663 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
664 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
665
666 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
667 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
668
669 static ByteSize polling_page_offset() { return byte_offset_of(Thread, _polling_page); }
670
671 #define TLAB_FIELD_OFFSET(name) \
672 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
673
674 TLAB_FIELD_OFFSET(start)
675 TLAB_FIELD_OFFSET(current_end)
676 TLAB_FIELD_OFFSET(top)
677 TLAB_FIELD_OFFSET(pf_top)
678 TLAB_FIELD_OFFSET(size) // desired_size
679 TLAB_FIELD_OFFSET(refill_waste_limit)
680 TLAB_FIELD_OFFSET(number_of_refills)
681 TLAB_FIELD_OFFSET(fast_refill_waste)
682 TLAB_FIELD_OFFSET(slow_allocations)
683
684 #undef TLAB_FIELD_OFFSET
685
686 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
687
688 public:
689 volatile intptr_t _Stalled;
690 volatile int _TypeTag;
691 ParkEvent * _ParkEvent; // for synchronized()
692 ParkEvent * _SleepEvent; // for Thread.sleep
693 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
694 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
695 int NativeSyncRecursion; // diagnostic
|