598 volatile int _jvmti_env_iteration_count;
599
600 public:
601 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
602 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
603 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
604
605 // Code generation
606 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
607 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
608 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
609
610 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
611 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
612
613 #define TLAB_FIELD_OFFSET(name) \
614 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
615
616 TLAB_FIELD_OFFSET(start)
617 TLAB_FIELD_OFFSET(end)
618 TLAB_FIELD_OFFSET(top)
619 TLAB_FIELD_OFFSET(pf_top)
620 TLAB_FIELD_OFFSET(size) // desired_size
621 TLAB_FIELD_OFFSET(refill_waste_limit)
622 TLAB_FIELD_OFFSET(number_of_refills)
623 TLAB_FIELD_OFFSET(fast_refill_waste)
624 TLAB_FIELD_OFFSET(slow_allocations)
625
626 #undef TLAB_FIELD_OFFSET
627
628 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
629
630 public:
631 volatile intptr_t _Stalled;
632 volatile int _TypeTag;
633 ParkEvent * _ParkEvent; // for synchronized()
634 ParkEvent * _SleepEvent; // for Thread.sleep
635 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
636 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
637 int NativeSyncRecursion; // diagnostic
|
598 volatile int _jvmti_env_iteration_count;
599
600 public:
601 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
602 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
603 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
604
605 // Code generation
606 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
607 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
608 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
609
610 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
611 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
612
613 #define TLAB_FIELD_OFFSET(name) \
614 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
615
616 TLAB_FIELD_OFFSET(start)
617 TLAB_FIELD_OFFSET(end)
618 TLAB_FIELD_OFFSET(actual_end)
619 TLAB_FIELD_OFFSET(top)
620 TLAB_FIELD_OFFSET(pf_top)
621 TLAB_FIELD_OFFSET(size) // desired_size
622 TLAB_FIELD_OFFSET(refill_waste_limit)
623 TLAB_FIELD_OFFSET(number_of_refills)
624 TLAB_FIELD_OFFSET(fast_refill_waste)
625 TLAB_FIELD_OFFSET(slow_allocations)
626
627 #undef TLAB_FIELD_OFFSET
628
629 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
630
631 public:
632 volatile intptr_t _Stalled;
633 volatile int _TypeTag;
634 ParkEvent * _ParkEvent; // for synchronized()
635 ParkEvent * _SleepEvent; // for Thread.sleep
636 ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
637 ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
638 int NativeSyncRecursion; // diagnostic
|