605 volatile int _jvmti_env_iteration_count;
606
607 public:
608 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
609 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
610 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
611
612 // Code generation
613 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
614 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
615 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
616
617 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
618 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
619
620 #define TLAB_FIELD_OFFSET(name) \
621 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
622
623 TLAB_FIELD_OFFSET(start)
624 TLAB_FIELD_OFFSET(end)
625 TLAB_FIELD_OFFSET(top)
626 TLAB_FIELD_OFFSET(pf_top)
627 TLAB_FIELD_OFFSET(size) // desired_size
628 TLAB_FIELD_OFFSET(refill_waste_limit)
629 TLAB_FIELD_OFFSET(number_of_refills)
630 TLAB_FIELD_OFFSET(fast_refill_waste)
631 TLAB_FIELD_OFFSET(slow_allocations)
632
633 #undef TLAB_FIELD_OFFSET
634
635 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
636
637 public:
638 volatile intptr_t _Stalled;
639 volatile int _TypeTag;
640 ParkEvent * _ParkEvent; // for synchronized()
641 ParkEvent * _SleepEvent; // for Thread.sleep
642 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
643 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
644 int NativeSyncRecursion; // diagnostic
|
605 volatile int _jvmti_env_iteration_count;
606
607 public:
608 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
609 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
610 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
611
612 // Code generation
613 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
614 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
615 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
616
617 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
618 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
619
620 #define TLAB_FIELD_OFFSET(name) \
621 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
622
623 TLAB_FIELD_OFFSET(start)
624 TLAB_FIELD_OFFSET(end)
625 TLAB_FIELD_OFFSET(actual_end)
626 TLAB_FIELD_OFFSET(top)
627 TLAB_FIELD_OFFSET(pf_top)
628 TLAB_FIELD_OFFSET(size) // desired_size
629 TLAB_FIELD_OFFSET(refill_waste_limit)
630 TLAB_FIELD_OFFSET(number_of_refills)
631 TLAB_FIELD_OFFSET(fast_refill_waste)
632 TLAB_FIELD_OFFSET(slow_allocations)
633
634 #undef TLAB_FIELD_OFFSET
635
636 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
637
638 public:
639 volatile intptr_t _Stalled;
640 volatile int _TypeTag;
641 ParkEvent * _ParkEvent; // for synchronized()
642 ParkEvent * _SleepEvent; // for Thread.sleep
643 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
644 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
645 int NativeSyncRecursion; // diagnostic
|