72 vmx : 1,
73 : 1,
74 est : 1,
75 : 1,
76 ssse3 : 1,
77 cid : 1,
78 : 1,
79 fma : 1,
80 cmpxchg16: 1,
81 : 4,
82 dca : 1,
83 sse4_1 : 1,
84 sse4_2 : 1,
85 : 2,
86 popcnt : 1,
87 : 1,
88 aes : 1,
89 : 1,
90 osxsave : 1,
91 avx : 1,
92 : 3;
93 } bits;
94 };
95
96 union StdCpuid1Edx {
97 uint32_t value;
98 struct {
99 uint32_t : 4,
100 tsc : 1,
101 : 3,
102 cmpxchg8 : 1,
103 : 6,
104 cmov : 1,
105 : 3,
106 clflush : 1,
107 : 3,
108 mmx : 1,
109 fxsr : 1,
110 sse : 1,
111 sse2 : 1,
112 : 1,
331 CPU_AVX512PF = (1 << 28),
332 CPU_AVX512ER = (1 << 29),
333 CPU_AVX512CD = (1 << 30)
334 // Keeping sign bit 31 unassigned.
335 };
336
337 #define CPU_AVX512BW ((uint64_t)UCONST64( 0x100000000)) // enums are limited to 31 bit
338 #define CPU_AVX512VL ((uint64_t)UCONST64( 0x200000000)) // EVEX instructions with smaller vector length
339 #define CPU_SHA ((uint64_t)UCONST64( 0x400000000)) // SHA instructions
340 #define CPU_FMA ((uint64_t)UCONST64( 0x800000000)) // FMA instructions
341 #define CPU_VZEROUPPER ((uint64_t)UCONST64( 0x1000000000)) // Vzeroupper instruction
342 #define CPU_AVX512_VPOPCNTDQ ((uint64_t)UCONST64( 0x2000000000)) // Vector popcount
343 #define CPU_AVX512_VPCLMULQDQ ((uint64_t)UCONST64( 0x4000000000)) // Vector carryless multiplication
344 #define CPU_AVX512_VAES ((uint64_t)UCONST64( 0x8000000000)) // Vector AES instructions
345 #define CPU_AVX512_VNNI ((uint64_t)UCONST64( 0x10000000000)) // Vector Neural Network Instructions
346 #define CPU_FLUSH ((uint64_t)UCONST64( 0x20000000000)) // flush instruction
347 #define CPU_FLUSHOPT ((uint64_t)UCONST64( 0x40000000000)) // flushopt instruction
348 #define CPU_CLWB ((uint64_t)UCONST64( 0x80000000000)) // clwb instruction
349 #define CPU_AVX512_VBMI2 ((uint64_t)UCONST64(0x100000000000)) // VBMI2 shift left double instructions
350 #define CPU_AVX512_VBMI ((uint64_t)UCONST64(0x200000000000)) // Vector BMI instructions
351
352 // NB! When adding new CPU feature detection consider updating vmStructs_x86.hpp, vmStructs_jvmci.hpp, and VM_Version::get_processor_features().
353
354 enum Extended_Family {
355 // AMD
356 CPU_FAMILY_AMD_11H = 0x11,
357 // ZX
358 CPU_FAMILY_ZX_CORE_F6 = 6,
359 CPU_FAMILY_ZX_CORE_F7 = 7,
360 // Intel
361 CPU_FAMILY_INTEL_CORE = 6,
362 CPU_MODEL_NEHALEM = 0x1e,
363 CPU_MODEL_NEHALEM_EP = 0x1a,
364 CPU_MODEL_NEHALEM_EX = 0x2e,
365 CPU_MODEL_WESTMERE = 0x25,
366 CPU_MODEL_WESTMERE_EP = 0x2c,
367 CPU_MODEL_WESTMERE_EX = 0x2f,
368 CPU_MODEL_SANDYBRIDGE = 0x2a,
369 CPU_MODEL_SANDYBRIDGE_EP = 0x2d,
370 CPU_MODEL_IVYBRIDGE_EP = 0x3a,
563 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512er != 0)
564 result |= CPU_AVX512ER;
565 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512bw != 0)
566 result |= CPU_AVX512BW;
567 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512vl != 0)
568 result |= CPU_AVX512VL;
569 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
570 result |= CPU_AVX512_VPOPCNTDQ;
571 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpclmulqdq != 0)
572 result |= CPU_AVX512_VPCLMULQDQ;
573 if (_cpuid_info.sef_cpuid7_ecx.bits.vaes != 0)
574 result |= CPU_AVX512_VAES;
575 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vnni != 0)
576 result |= CPU_AVX512_VNNI;
577 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi != 0)
578 result |= CPU_AVX512_VBMI;
579 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi2 != 0)
580 result |= CPU_AVX512_VBMI2;
581 }
582 }
583 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
584 result |= CPU_BMI1;
585 if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0)
586 result |= CPU_TSC;
587 if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
588 result |= CPU_TSCINV;
589 if (_cpuid_info.std_cpuid1_ecx.bits.aes != 0)
590 result |= CPU_AES;
591 if (_cpuid_info.sef_cpuid7_ebx.bits.erms != 0)
592 result |= CPU_ERMS;
593 if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0)
594 result |= CPU_CLMUL;
595 if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0)
596 result |= CPU_RTM;
597 if (_cpuid_info.sef_cpuid7_ebx.bits.adx != 0)
598 result |= CPU_ADX;
599 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
600 result |= CPU_BMI2;
601 if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0)
602 result |= CPU_SHA;
854 static bool supports_avx512cd() { return (_features & CPU_AVX512CD) != 0; }
855 static bool supports_avx512bw() { return (_features & CPU_AVX512BW) != 0; }
856 static bool supports_avx512vl() { return (_features & CPU_AVX512VL) != 0; }
857 static bool supports_avx512vlbw() { return (supports_evex() && supports_avx512bw() && supports_avx512vl()); }
858 static bool supports_avx512vldq() { return (supports_evex() && supports_avx512dq() && supports_avx512vl()); }
859 static bool supports_avx512vlbwdq() { return (supports_evex() && supports_avx512vl() &&
860 supports_avx512bw() && supports_avx512dq()); }
861 static bool supports_avx512novl() { return (supports_evex() && !supports_avx512vl()); }
862 static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); }
863 static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); }
864 static bool supports_avxonly() { return ((supports_avx2() || supports_avx()) && !supports_evex()); }
865 static bool supports_sha() { return (_features & CPU_SHA) != 0; }
866 static bool supports_fma() { return (_features & CPU_FMA) != 0 && supports_avx(); }
867 static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; }
868 static bool supports_avx512_vpopcntdq() { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; }
869 static bool supports_avx512_vpclmulqdq() { return (_features & CPU_AVX512_VPCLMULQDQ) != 0; }
870 static bool supports_avx512_vaes() { return (_features & CPU_AVX512_VAES) != 0; }
871 static bool supports_avx512_vnni() { return (_features & CPU_AVX512_VNNI) != 0; }
872 static bool supports_avx512_vbmi() { return (_features & CPU_AVX512_VBMI) != 0; }
873 static bool supports_avx512_vbmi2() { return (_features & CPU_AVX512_VBMI2) != 0; }
874
875 // Intel features
876 static bool is_intel_family_core() { return is_intel() &&
877 extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
878
879 static bool is_intel_skylake() { return is_intel_family_core() &&
880 extended_cpu_model() == CPU_MODEL_SKYLAKE; }
881
882 static bool is_intel_tsc_synched_at_init() {
883 if (is_intel_family_core()) {
884 uint32_t ext_model = extended_cpu_model();
885 if (ext_model == CPU_MODEL_NEHALEM_EP ||
886 ext_model == CPU_MODEL_WESTMERE_EP ||
887 ext_model == CPU_MODEL_SANDYBRIDGE_EP ||
888 ext_model == CPU_MODEL_IVYBRIDGE_EP) {
889 // <= 2-socket invariant tsc support. EX versions are usually used
890 // in > 2-socket systems and likely don't synchronize tscs at
891 // initialization.
892 // Code that uses tsc values must be prepared for them to arbitrarily
893 // jump forward or backward.
1006 // to flush the code cache.
1007 // Unfortunately, Assembler::clflush is currently called as part
1008 // of generation of the code cache flush routine. This happens
1009 // under Universe::init before the processor features are set
1010 // up. Assembler::flush calls this routine to check that clflush
1011 // is allowed. So, we give the caller a free pass if Universe init
1012 // is still in progress.
1013 assert ((!Universe::is_fully_initialized() || (_features & CPU_FLUSH) != 0), "clflush should be available");
1014 return true;
1015 }
1016 static bool supports_clflushopt() { return ((_features & CPU_FLUSHOPT) != 0); }
1017 static bool supports_clwb() { return ((_features & CPU_CLWB) != 0); }
1018 #else
1019 static bool supports_clflush() { return ((_features & CPU_FLUSH) != 0); }
1020 static bool supports_clflushopt() { return false; }
1021 static bool supports_clwb() { return false; }
1022 #endif // _LP64
1023
1024 // support functions for virtualization detection
1025 private:
1026 static void check_virt_cpuid(uint32_t idx, uint32_t *regs);
1027 static void check_virtualizations();
1028 };
1029
1030 #endif // CPU_X86_VM_VERSION_X86_HPP
|
72 vmx : 1,
73 : 1,
74 est : 1,
75 : 1,
76 ssse3 : 1,
77 cid : 1,
78 : 1,
79 fma : 1,
80 cmpxchg16: 1,
81 : 4,
82 dca : 1,
83 sse4_1 : 1,
84 sse4_2 : 1,
85 : 2,
86 popcnt : 1,
87 : 1,
88 aes : 1,
89 : 1,
90 osxsave : 1,
91 avx : 1,
92 : 2,
93 hv : 1;
94 } bits;
95 };
96
97 union StdCpuid1Edx {
98 uint32_t value;
99 struct {
100 uint32_t : 4,
101 tsc : 1,
102 : 3,
103 cmpxchg8 : 1,
104 : 6,
105 cmov : 1,
106 : 3,
107 clflush : 1,
108 : 3,
109 mmx : 1,
110 fxsr : 1,
111 sse : 1,
112 sse2 : 1,
113 : 1,
332 CPU_AVX512PF = (1 << 28),
333 CPU_AVX512ER = (1 << 29),
334 CPU_AVX512CD = (1 << 30)
335 // Keeping sign bit 31 unassigned.
336 };
337
338 #define CPU_AVX512BW ((uint64_t)UCONST64( 0x100000000)) // enums are limited to 31 bit
339 #define CPU_AVX512VL ((uint64_t)UCONST64( 0x200000000)) // EVEX instructions with smaller vector length
340 #define CPU_SHA ((uint64_t)UCONST64( 0x400000000)) // SHA instructions
341 #define CPU_FMA ((uint64_t)UCONST64( 0x800000000)) // FMA instructions
342 #define CPU_VZEROUPPER ((uint64_t)UCONST64( 0x1000000000)) // Vzeroupper instruction
343 #define CPU_AVX512_VPOPCNTDQ ((uint64_t)UCONST64( 0x2000000000)) // Vector popcount
344 #define CPU_AVX512_VPCLMULQDQ ((uint64_t)UCONST64( 0x4000000000)) // Vector carryless multiplication
345 #define CPU_AVX512_VAES ((uint64_t)UCONST64( 0x8000000000)) // Vector AES instructions
346 #define CPU_AVX512_VNNI ((uint64_t)UCONST64( 0x10000000000)) // Vector Neural Network Instructions
347 #define CPU_FLUSH ((uint64_t)UCONST64( 0x20000000000)) // flush instruction
348 #define CPU_FLUSHOPT ((uint64_t)UCONST64( 0x40000000000)) // flushopt instruction
349 #define CPU_CLWB ((uint64_t)UCONST64( 0x80000000000)) // clwb instruction
350 #define CPU_AVX512_VBMI2 ((uint64_t)UCONST64(0x100000000000)) // VBMI2 shift left double instructions
351 #define CPU_AVX512_VBMI ((uint64_t)UCONST64(0x200000000000)) // Vector BMI instructions
352 #define CPU_HV_PRESENT ((uint64_t)UCONST64(0x400000000000)) // for hypervisor detection
353
354 // NB! When adding new CPU feature detection consider updating vmStructs_x86.hpp, vmStructs_jvmci.hpp, and VM_Version::get_processor_features().
355
356 enum Extended_Family {
357 // AMD
358 CPU_FAMILY_AMD_11H = 0x11,
359 // ZX
360 CPU_FAMILY_ZX_CORE_F6 = 6,
361 CPU_FAMILY_ZX_CORE_F7 = 7,
362 // Intel
363 CPU_FAMILY_INTEL_CORE = 6,
364 CPU_MODEL_NEHALEM = 0x1e,
365 CPU_MODEL_NEHALEM_EP = 0x1a,
366 CPU_MODEL_NEHALEM_EX = 0x2e,
367 CPU_MODEL_WESTMERE = 0x25,
368 CPU_MODEL_WESTMERE_EP = 0x2c,
369 CPU_MODEL_WESTMERE_EX = 0x2f,
370 CPU_MODEL_SANDYBRIDGE = 0x2a,
371 CPU_MODEL_SANDYBRIDGE_EP = 0x2d,
372 CPU_MODEL_IVYBRIDGE_EP = 0x3a,
565 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512er != 0)
566 result |= CPU_AVX512ER;
567 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512bw != 0)
568 result |= CPU_AVX512BW;
569 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512vl != 0)
570 result |= CPU_AVX512VL;
571 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
572 result |= CPU_AVX512_VPOPCNTDQ;
573 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpclmulqdq != 0)
574 result |= CPU_AVX512_VPCLMULQDQ;
575 if (_cpuid_info.sef_cpuid7_ecx.bits.vaes != 0)
576 result |= CPU_AVX512_VAES;
577 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vnni != 0)
578 result |= CPU_AVX512_VNNI;
579 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi != 0)
580 result |= CPU_AVX512_VBMI;
581 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi2 != 0)
582 result |= CPU_AVX512_VBMI2;
583 }
584 }
585 if (_cpuid_info.std_cpuid1_ecx.bits.hv != 0)
586 result |= CPU_HV_PRESENT;
587 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
588 result |= CPU_BMI1;
589 if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0)
590 result |= CPU_TSC;
591 if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
592 result |= CPU_TSCINV;
593 if (_cpuid_info.std_cpuid1_ecx.bits.aes != 0)
594 result |= CPU_AES;
595 if (_cpuid_info.sef_cpuid7_ebx.bits.erms != 0)
596 result |= CPU_ERMS;
597 if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0)
598 result |= CPU_CLMUL;
599 if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0)
600 result |= CPU_RTM;
601 if (_cpuid_info.sef_cpuid7_ebx.bits.adx != 0)
602 result |= CPU_ADX;
603 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
604 result |= CPU_BMI2;
605 if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0)
606 result |= CPU_SHA;
858 static bool supports_avx512cd() { return (_features & CPU_AVX512CD) != 0; }
859 static bool supports_avx512bw() { return (_features & CPU_AVX512BW) != 0; }
860 static bool supports_avx512vl() { return (_features & CPU_AVX512VL) != 0; }
861 static bool supports_avx512vlbw() { return (supports_evex() && supports_avx512bw() && supports_avx512vl()); }
862 static bool supports_avx512vldq() { return (supports_evex() && supports_avx512dq() && supports_avx512vl()); }
863 static bool supports_avx512vlbwdq() { return (supports_evex() && supports_avx512vl() &&
864 supports_avx512bw() && supports_avx512dq()); }
865 static bool supports_avx512novl() { return (supports_evex() && !supports_avx512vl()); }
866 static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); }
867 static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); }
868 static bool supports_avxonly() { return ((supports_avx2() || supports_avx()) && !supports_evex()); }
869 static bool supports_sha() { return (_features & CPU_SHA) != 0; }
870 static bool supports_fma() { return (_features & CPU_FMA) != 0 && supports_avx(); }
871 static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; }
872 static bool supports_avx512_vpopcntdq() { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; }
873 static bool supports_avx512_vpclmulqdq() { return (_features & CPU_AVX512_VPCLMULQDQ) != 0; }
874 static bool supports_avx512_vaes() { return (_features & CPU_AVX512_VAES) != 0; }
875 static bool supports_avx512_vnni() { return (_features & CPU_AVX512_VNNI) != 0; }
876 static bool supports_avx512_vbmi() { return (_features & CPU_AVX512_VBMI) != 0; }
877 static bool supports_avx512_vbmi2() { return (_features & CPU_AVX512_VBMI2) != 0; }
878 static bool supports_hv() { return (_features & CPU_HV_PRESENT) != 0; }
879
880 // Intel features
881 static bool is_intel_family_core() { return is_intel() &&
882 extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
883
884 static bool is_intel_skylake() { return is_intel_family_core() &&
885 extended_cpu_model() == CPU_MODEL_SKYLAKE; }
886
887 static bool is_intel_tsc_synched_at_init() {
888 if (is_intel_family_core()) {
889 uint32_t ext_model = extended_cpu_model();
890 if (ext_model == CPU_MODEL_NEHALEM_EP ||
891 ext_model == CPU_MODEL_WESTMERE_EP ||
892 ext_model == CPU_MODEL_SANDYBRIDGE_EP ||
893 ext_model == CPU_MODEL_IVYBRIDGE_EP) {
894 // <= 2-socket invariant tsc support. EX versions are usually used
895 // in > 2-socket systems and likely don't synchronize tscs at
896 // initialization.
897 // Code that uses tsc values must be prepared for them to arbitrarily
898 // jump forward or backward.
1011 // to flush the code cache.
1012 // Unfortunately, Assembler::clflush is currently called as part
1013 // of generation of the code cache flush routine. This happens
1014 // under Universe::init before the processor features are set
1015 // up. Assembler::flush calls this routine to check that clflush
1016 // is allowed. So, we give the caller a free pass if Universe init
1017 // is still in progress.
1018 assert ((!Universe::is_fully_initialized() || (_features & CPU_FLUSH) != 0), "clflush should be available");
1019 return true;
1020 }
1021 static bool supports_clflushopt() { return ((_features & CPU_FLUSHOPT) != 0); }
1022 static bool supports_clwb() { return ((_features & CPU_CLWB) != 0); }
1023 #else
1024 static bool supports_clflush() { return ((_features & CPU_FLUSH) != 0); }
1025 static bool supports_clflushopt() { return false; }
1026 static bool supports_clwb() { return false; }
1027 #endif // _LP64
1028
1029 // support functions for virtualization detection
1030 private:
1031 static void check_virt_cpuid(uint32_t *regs);
1032 static void check_virtualizations();
1033
1034 #ifdef _WINDOWS
1035 static bool is_in_VM();
1036 #endif
1037
1038 };
1039
1040 #endif // CPU_X86_VM_VERSION_X86_HPP
|