1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_VM_VERSION_X86_HPP 26 #define CPU_X86_VM_VERSION_X86_HPP 27 28 #include "memory/universe.hpp" 29 #include "runtime/globals_extension.hpp" 30 #include "runtime/vm_version.hpp" 31 32 class VM_Version : public Abstract_VM_Version { 33 friend class VMStructs; 34 friend class JVMCIVMStructs; 35 36 public: 37 // cpuid result register layouts. These are all unions of a uint32_t 38 // (in case anyone wants access to the register as a whole) and a bitfield. 39 40 union StdCpuid1Eax { 41 uint32_t value; 42 struct { 43 uint32_t stepping : 4, 44 model : 4, 45 family : 4, 46 proc_type : 2, 47 : 2, 48 ext_model : 4, 49 ext_family : 8, 50 : 4; 51 } bits; 52 }; 53 54 union StdCpuid1Ebx { // example, unused 55 uint32_t value; 56 struct { 57 uint32_t brand_id : 8, 58 clflush_size : 8, 59 threads_per_cpu : 8, 60 apic_id : 8; 61 } bits; 62 }; 63 64 union StdCpuid1Ecx { 65 uint32_t value; 66 struct { 67 uint32_t sse3 : 1, 68 clmul : 1, 69 : 1, 70 monitor : 1, 71 : 1, 72 vmx : 1, 73 : 1, 74 est : 1, 75 : 1, 76 ssse3 : 1, 77 cid : 1, 78 : 1, 79 fma : 1, 80 cmpxchg16: 1, 81 : 4, 82 dca : 1, 83 sse4_1 : 1, 84 sse4_2 : 1, 85 : 2, 86 popcnt : 1, 87 : 1, 88 aes : 1, 89 : 1, 90 osxsave : 1, 91 avx : 1, 92 : 3; 93 } bits; 94 }; 95 96 union StdCpuid1Edx { 97 uint32_t value; 98 struct { 99 uint32_t : 4, 100 tsc : 1, 101 : 3, 102 cmpxchg8 : 1, 103 : 6, 104 cmov : 1, 105 : 3, 106 clflush : 1, 107 : 3, 108 mmx : 1, 109 fxsr : 1, 110 sse : 1, 111 sse2 : 1, 112 : 1, 113 ht : 1, 114 : 3; 115 } bits; 116 }; 117 118 union DcpCpuid4Eax { 119 uint32_t value; 120 struct { 121 uint32_t cache_type : 5, 122 : 21, 123 cores_per_cpu : 6; 124 } bits; 125 }; 126 127 union DcpCpuid4Ebx { 128 uint32_t value; 129 struct { 130 uint32_t L1_line_size : 12, 131 partitions : 10, 132 associativity : 10; 133 } bits; 134 }; 135 136 union TplCpuidBEbx { 137 uint32_t value; 138 struct { 139 uint32_t logical_cpus : 16, 140 : 16; 141 } bits; 142 }; 143 144 union ExtCpuid1Ecx { 145 uint32_t value; 146 struct { 147 uint32_t LahfSahf : 1, 148 CmpLegacy : 1, 149 : 3, 150 lzcnt_intel : 1, 151 lzcnt : 1, 152 sse4a : 1, 153 misalignsse : 1, 154 prefetchw : 1, 155 : 22; 156 } bits; 157 }; 158 159 union ExtCpuid1Edx { 160 uint32_t value; 161 struct { 162 uint32_t : 22, 163 mmx_amd : 1, 164 mmx : 1, 165 fxsr : 1, 166 : 4, 167 long_mode : 1, 168 tdnow2 : 1, 169 tdnow : 1; 170 } bits; 171 }; 172 173 union ExtCpuid5Ex { 174 uint32_t value; 175 struct { 176 uint32_t L1_line_size : 8, 177 L1_tag_lines : 8, 178 L1_assoc : 8, 179 L1_size : 8; 180 } bits; 181 }; 182 183 union ExtCpuid7Edx { 184 uint32_t value; 185 struct { 186 uint32_t : 8, 187 tsc_invariance : 1, 188 : 23; 189 } bits; 190 }; 191 192 union ExtCpuid8Ecx { 193 uint32_t value; 194 struct { 195 uint32_t cores_per_cpu : 8, 196 : 24; 197 } bits; 198 }; 199 200 union SefCpuid7Eax { 201 uint32_t value; 202 }; 203 204 union SefCpuid7Ebx { 205 uint32_t value; 206 struct { 207 uint32_t fsgsbase : 1, 208 : 2, 209 bmi1 : 1, 210 : 1, 211 avx2 : 1, 212 : 2, 213 bmi2 : 1, 214 erms : 1, 215 : 1, 216 rtm : 1, 217 : 4, 218 avx512f : 1, 219 avx512dq : 1, 220 : 1, 221 adx : 1, 222 : 3, 223 clflushopt : 1, 224 clwb : 1, 225 : 1, 226 avx512pf : 1, 227 avx512er : 1, 228 avx512cd : 1, 229 sha : 1, 230 avx512bw : 1, 231 avx512vl : 1; 232 } bits; 233 }; 234 235 union SefCpuid7Ecx { 236 uint32_t value; 237 struct { 238 uint32_t prefetchwt1 : 1, 239 avx512_vbmi : 1, 240 umip : 1, 241 pku : 1, 242 ospke : 1, 243 : 1, 244 avx512_vbmi2 : 1, 245 : 1, 246 gfni : 1, 247 vaes : 1, 248 vpclmulqdq : 1, 249 avx512_vnni : 1, 250 avx512_bitalg : 1, 251 : 1, 252 avx512_vpopcntdq : 1, 253 : 17; 254 } bits; 255 }; 256 257 union SefCpuid7Edx { 258 uint32_t value; 259 struct { 260 uint32_t : 2, 261 avx512_4vnniw : 1, 262 avx512_4fmaps : 1, 263 : 28; 264 } bits; 265 }; 266 267 union ExtCpuid1EEbx { 268 uint32_t value; 269 struct { 270 uint32_t : 8, 271 threads_per_core : 8, 272 : 16; 273 } bits; 274 }; 275 276 union XemXcr0Eax { 277 uint32_t value; 278 struct { 279 uint32_t x87 : 1, 280 sse : 1, 281 ymm : 1, 282 bndregs : 1, 283 bndcsr : 1, 284 opmask : 1, 285 zmm512 : 1, 286 zmm32 : 1, 287 : 24; 288 } bits; 289 }; 290 291 protected: 292 static int _cpu; 293 static int _model; 294 static int _stepping; 295 296 static address _cpuinfo_segv_addr; // address of instruction which causes SEGV 297 static address _cpuinfo_cont_addr; // address of instruction after the one which causes SEGV 298 299 enum Feature_Flag { 300 CPU_CX8 = (1 << 0), // next bits are from cpuid 1 (EDX) 301 CPU_CMOV = (1 << 1), 302 CPU_FXSR = (1 << 2), 303 CPU_HT = (1 << 3), 304 CPU_MMX = (1 << 4), 305 CPU_3DNOW_PREFETCH = (1 << 5), // Processor supports 3dnow prefetch and prefetchw instructions 306 // may not necessarily support other 3dnow instructions 307 CPU_SSE = (1 << 6), 308 CPU_SSE2 = (1 << 7), 309 CPU_SSE3 = (1 << 8), // SSE3 comes from cpuid 1 (ECX) 310 CPU_SSSE3 = (1 << 9), 311 CPU_SSE4A = (1 << 10), 312 CPU_SSE4_1 = (1 << 11), 313 CPU_SSE4_2 = (1 << 12), 314 CPU_POPCNT = (1 << 13), 315 CPU_LZCNT = (1 << 14), 316 CPU_TSC = (1 << 15), 317 CPU_TSCINV = (1 << 16), 318 CPU_AVX = (1 << 17), 319 CPU_AVX2 = (1 << 18), 320 CPU_AES = (1 << 19), 321 CPU_ERMS = (1 << 20), // enhanced 'rep movsb/stosb' instructions 322 CPU_CLMUL = (1 << 21), // carryless multiply for CRC 323 CPU_BMI1 = (1 << 22), 324 CPU_BMI2 = (1 << 23), 325 CPU_RTM = (1 << 24), // Restricted Transactional Memory instructions 326 CPU_ADX = (1 << 25), 327 CPU_AVX512F = (1 << 26), // AVX 512bit foundation instructions 328 CPU_AVX512DQ = (1 << 27), 329 CPU_AVX512PF = (1 << 28), 330 CPU_AVX512ER = (1 << 29), 331 CPU_AVX512CD = (1 << 30) 332 // Keeping sign bit 31 unassigned. 333 }; 334 335 #define CPU_AVX512BW ((uint64_t)UCONST64(0x100000000)) // enums are limited to 31 bit 336 #define CPU_AVX512VL ((uint64_t)UCONST64(0x200000000)) // EVEX instructions with smaller vector length 337 #define CPU_SHA ((uint64_t)UCONST64(0x400000000)) // SHA instructions 338 #define CPU_FMA ((uint64_t)UCONST64(0x800000000)) // FMA instructions 339 #define CPU_VZEROUPPER ((uint64_t)UCONST64(0x1000000000)) // Vzeroupper instruction 340 #define CPU_AVX512_VPOPCNTDQ ((uint64_t)UCONST64(0x2000000000)) // Vector popcount 341 #define CPU_VPCLMULQDQ ((uint64_t)UCONST64(0x4000000000)) //Vector carryless multiplication 342 #define CPU_VAES ((uint64_t)UCONST64(0x8000000000)) // Vector AES instructions 343 #define CPU_VNNI ((uint64_t)UCONST64(0x10000000000)) // Vector Neural Network Instructions 344 345 #define CPU_FLUSH ((uint64_t)UCONST64(0x20000000000)) // flush instruction 346 #define CPU_FLUSHOPT ((uint64_t)UCONST64(0x40000000000)) // flushopt instruction 347 #define CPU_CLWB ((uint64_t)UCONST64(0x80000000000)) // clwb instruction 348 349 enum Extended_Family { 350 // AMD 351 CPU_FAMILY_AMD_11H = 0x11, 352 // ZX 353 CPU_FAMILY_ZX_CORE_F6 = 6, 354 CPU_FAMILY_ZX_CORE_F7 = 7, 355 // Intel 356 CPU_FAMILY_INTEL_CORE = 6, 357 CPU_MODEL_NEHALEM = 0x1e, 358 CPU_MODEL_NEHALEM_EP = 0x1a, 359 CPU_MODEL_NEHALEM_EX = 0x2e, 360 CPU_MODEL_WESTMERE = 0x25, 361 CPU_MODEL_WESTMERE_EP = 0x2c, 362 CPU_MODEL_WESTMERE_EX = 0x2f, 363 CPU_MODEL_SANDYBRIDGE = 0x2a, 364 CPU_MODEL_SANDYBRIDGE_EP = 0x2d, 365 CPU_MODEL_IVYBRIDGE_EP = 0x3a, 366 CPU_MODEL_HASWELL_E3 = 0x3c, 367 CPU_MODEL_HASWELL_E7 = 0x3f, 368 CPU_MODEL_BROADWELL = 0x3d, 369 CPU_MODEL_SKYLAKE = 0x55 370 }; 371 372 // cpuid information block. All info derived from executing cpuid with 373 // various function numbers is stored here. Intel and AMD info is 374 // merged in this block: accessor methods disentangle it. 375 // 376 // The info block is laid out in subblocks of 4 dwords corresponding to 377 // eax, ebx, ecx and edx, whether or not they contain anything useful. 378 struct CpuidInfo { 379 // cpuid function 0 380 uint32_t std_max_function; 381 uint32_t std_vendor_name_0; 382 uint32_t std_vendor_name_1; 383 uint32_t std_vendor_name_2; 384 385 // cpuid function 1 386 StdCpuid1Eax std_cpuid1_eax; 387 StdCpuid1Ebx std_cpuid1_ebx; 388 StdCpuid1Ecx std_cpuid1_ecx; 389 StdCpuid1Edx std_cpuid1_edx; 390 391 // cpuid function 4 (deterministic cache parameters) 392 DcpCpuid4Eax dcp_cpuid4_eax; 393 DcpCpuid4Ebx dcp_cpuid4_ebx; 394 uint32_t dcp_cpuid4_ecx; // unused currently 395 uint32_t dcp_cpuid4_edx; // unused currently 396 397 // cpuid function 7 (structured extended features) 398 SefCpuid7Eax sef_cpuid7_eax; 399 SefCpuid7Ebx sef_cpuid7_ebx; 400 SefCpuid7Ecx sef_cpuid7_ecx; 401 SefCpuid7Edx sef_cpuid7_edx; 402 403 // cpuid function 0xB (processor topology) 404 // ecx = 0 405 uint32_t tpl_cpuidB0_eax; 406 TplCpuidBEbx tpl_cpuidB0_ebx; 407 uint32_t tpl_cpuidB0_ecx; // unused currently 408 uint32_t tpl_cpuidB0_edx; // unused currently 409 410 // ecx = 1 411 uint32_t tpl_cpuidB1_eax; 412 TplCpuidBEbx tpl_cpuidB1_ebx; 413 uint32_t tpl_cpuidB1_ecx; // unused currently 414 uint32_t tpl_cpuidB1_edx; // unused currently 415 416 // ecx = 2 417 uint32_t tpl_cpuidB2_eax; 418 TplCpuidBEbx tpl_cpuidB2_ebx; 419 uint32_t tpl_cpuidB2_ecx; // unused currently 420 uint32_t tpl_cpuidB2_edx; // unused currently 421 422 // cpuid function 0x80000000 // example, unused 423 uint32_t ext_max_function; 424 uint32_t ext_vendor_name_0; 425 uint32_t ext_vendor_name_1; 426 uint32_t ext_vendor_name_2; 427 428 // cpuid function 0x80000001 429 uint32_t ext_cpuid1_eax; // reserved 430 uint32_t ext_cpuid1_ebx; // reserved 431 ExtCpuid1Ecx ext_cpuid1_ecx; 432 ExtCpuid1Edx ext_cpuid1_edx; 433 434 // cpuid functions 0x80000002 thru 0x80000004: example, unused 435 uint32_t proc_name_0, proc_name_1, proc_name_2, proc_name_3; 436 uint32_t proc_name_4, proc_name_5, proc_name_6, proc_name_7; 437 uint32_t proc_name_8, proc_name_9, proc_name_10,proc_name_11; 438 439 // cpuid function 0x80000005 // AMD L1, Intel reserved 440 uint32_t ext_cpuid5_eax; // unused currently 441 uint32_t ext_cpuid5_ebx; // reserved 442 ExtCpuid5Ex ext_cpuid5_ecx; // L1 data cache info (AMD) 443 ExtCpuid5Ex ext_cpuid5_edx; // L1 instruction cache info (AMD) 444 445 // cpuid function 0x80000007 446 uint32_t ext_cpuid7_eax; // reserved 447 uint32_t ext_cpuid7_ebx; // reserved 448 uint32_t ext_cpuid7_ecx; // reserved 449 ExtCpuid7Edx ext_cpuid7_edx; // tscinv 450 451 // cpuid function 0x80000008 452 uint32_t ext_cpuid8_eax; // unused currently 453 uint32_t ext_cpuid8_ebx; // reserved 454 ExtCpuid8Ecx ext_cpuid8_ecx; 455 uint32_t ext_cpuid8_edx; // reserved 456 457 // cpuid function 0x8000001E // AMD 17h 458 uint32_t ext_cpuid1E_eax; 459 ExtCpuid1EEbx ext_cpuid1E_ebx; // threads per core (AMD17h) 460 uint32_t ext_cpuid1E_ecx; 461 uint32_t ext_cpuid1E_edx; // unused currently 462 463 // extended control register XCR0 (the XFEATURE_ENABLED_MASK register) 464 XemXcr0Eax xem_xcr0_eax; 465 uint32_t xem_xcr0_edx; // reserved 466 467 // Space to save ymm registers after signal handle 468 int ymm_save[8*4]; // Save ymm0, ymm7, ymm8, ymm15 469 470 // Space to save zmm registers after signal handle 471 int zmm_save[16*4]; // Save zmm0, zmm7, zmm8, zmm31 472 }; 473 474 // The actual cpuid info block 475 static CpuidInfo _cpuid_info; 476 477 // Extractors and predicates 478 static uint32_t extended_cpu_family() { 479 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.family; 480 result += _cpuid_info.std_cpuid1_eax.bits.ext_family; 481 return result; 482 } 483 484 static uint32_t extended_cpu_model() { 485 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.model; 486 result |= _cpuid_info.std_cpuid1_eax.bits.ext_model << 4; 487 return result; 488 } 489 490 static uint32_t cpu_stepping() { 491 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.stepping; 492 return result; 493 } 494 495 static uint logical_processor_count() { 496 uint result = threads_per_core(); 497 return result; 498 } 499 500 static uint64_t feature_flags() { 501 uint64_t result = 0; 502 if (_cpuid_info.std_cpuid1_edx.bits.cmpxchg8 != 0) 503 result |= CPU_CX8; 504 if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0) 505 result |= CPU_CMOV; 506 if (_cpuid_info.std_cpuid1_edx.bits.clflush != 0) 507 result |= CPU_FLUSH; 508 #ifdef _LP64 509 // clflush should always be available on x86_64 510 // if not we are in real trouble because we rely on it 511 // to flush the code cache. 512 assert ((result & CPU_FLUSH) != 0, "clflush should be available"); 513 #endif 514 if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() && 515 _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0)) 516 result |= CPU_FXSR; 517 // HT flag is set for multi-core processors also. 518 if (threads_per_core() > 1) 519 result |= CPU_HT; 520 if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd_family() && 521 _cpuid_info.ext_cpuid1_edx.bits.mmx != 0)) 522 result |= CPU_MMX; 523 if (_cpuid_info.std_cpuid1_edx.bits.sse != 0) 524 result |= CPU_SSE; 525 if (_cpuid_info.std_cpuid1_edx.bits.sse2 != 0) 526 result |= CPU_SSE2; 527 if (_cpuid_info.std_cpuid1_ecx.bits.sse3 != 0) 528 result |= CPU_SSE3; 529 if (_cpuid_info.std_cpuid1_ecx.bits.ssse3 != 0) 530 result |= CPU_SSSE3; 531 if (_cpuid_info.std_cpuid1_ecx.bits.sse4_1 != 0) 532 result |= CPU_SSE4_1; 533 if (_cpuid_info.std_cpuid1_ecx.bits.sse4_2 != 0) 534 result |= CPU_SSE4_2; 535 if (_cpuid_info.std_cpuid1_ecx.bits.popcnt != 0) 536 result |= CPU_POPCNT; 537 if (_cpuid_info.std_cpuid1_ecx.bits.avx != 0 && 538 _cpuid_info.std_cpuid1_ecx.bits.osxsave != 0 && 539 _cpuid_info.xem_xcr0_eax.bits.sse != 0 && 540 _cpuid_info.xem_xcr0_eax.bits.ymm != 0) { 541 result |= CPU_AVX; 542 result |= CPU_VZEROUPPER; 543 if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0) 544 result |= CPU_AVX2; 545 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512f != 0 && 546 _cpuid_info.xem_xcr0_eax.bits.opmask != 0 && 547 _cpuid_info.xem_xcr0_eax.bits.zmm512 != 0 && 548 _cpuid_info.xem_xcr0_eax.bits.zmm32 != 0) { 549 result |= CPU_AVX512F; 550 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512cd != 0) 551 result |= CPU_AVX512CD; 552 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512dq != 0) 553 result |= CPU_AVX512DQ; 554 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512pf != 0) 555 result |= CPU_AVX512PF; 556 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512er != 0) 557 result |= CPU_AVX512ER; 558 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512bw != 0) 559 result |= CPU_AVX512BW; 560 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512vl != 0) 561 result |= CPU_AVX512VL; 562 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0) 563 result |= CPU_AVX512_VPOPCNTDQ; 564 if (_cpuid_info.sef_cpuid7_ecx.bits.vpclmulqdq != 0) 565 result |= CPU_VPCLMULQDQ; 566 if (_cpuid_info.sef_cpuid7_ecx.bits.vaes != 0) 567 result |= CPU_VAES; 568 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vnni != 0) 569 result |= CPU_VNNI; 570 } 571 } 572 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0) 573 result |= CPU_BMI1; 574 if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0) 575 result |= CPU_TSC; 576 if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0) 577 result |= CPU_TSCINV; 578 if (_cpuid_info.std_cpuid1_ecx.bits.aes != 0) 579 result |= CPU_AES; 580 if (_cpuid_info.sef_cpuid7_ebx.bits.erms != 0) 581 result |= CPU_ERMS; 582 if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0) 583 result |= CPU_CLMUL; 584 if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0) 585 result |= CPU_RTM; 586 if (_cpuid_info.sef_cpuid7_ebx.bits.adx != 0) 587 result |= CPU_ADX; 588 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0) 589 result |= CPU_BMI2; 590 if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0) 591 result |= CPU_SHA; 592 if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0) 593 result |= CPU_FMA; 594 if (_cpuid_info.sef_cpuid7_ebx.bits.clflushopt != 0) 595 result |= CPU_FLUSHOPT; 596 597 // AMD|Hygon features. 598 if (is_amd_family()) { 599 if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) || 600 (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0)) 601 result |= CPU_3DNOW_PREFETCH; 602 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) 603 result |= CPU_LZCNT; 604 if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0) 605 result |= CPU_SSE4A; 606 } 607 // Intel features. 608 if (is_intel()) { 609 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0) 610 result |= CPU_LZCNT; 611 // for Intel, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw 612 if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) { 613 result |= CPU_3DNOW_PREFETCH; 614 } 615 if (_cpuid_info.sef_cpuid7_ebx.bits.clwb != 0) { 616 result |= CPU_CLWB; 617 } 618 } 619 620 // ZX features. 621 if (is_zx()) { 622 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0) 623 result |= CPU_LZCNT; 624 // for ZX, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw 625 if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) { 626 result |= CPU_3DNOW_PREFETCH; 627 } 628 } 629 630 return result; 631 } 632 633 static bool os_supports_avx_vectors() { 634 bool retVal = false; 635 int nreg = 2 LP64_ONLY(+2); 636 if (supports_evex()) { 637 // Verify that OS save/restore all bits of EVEX registers 638 // during signal processing. 639 retVal = true; 640 for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register 641 if (_cpuid_info.zmm_save[i] != ymm_test_value()) { 642 retVal = false; 643 break; 644 } 645 } 646 } else if (supports_avx()) { 647 // Verify that OS save/restore all bits of AVX registers 648 // during signal processing. 649 retVal = true; 650 for (int i = 0; i < 8 * nreg; i++) { // 32 bytes per ymm register 651 if (_cpuid_info.ymm_save[i] != ymm_test_value()) { 652 retVal = false; 653 break; 654 } 655 } 656 // zmm_save will be set on a EVEX enabled machine even if we choose AVX code gen 657 if (retVal == false) { 658 // Verify that OS save/restore all bits of EVEX registers 659 // during signal processing. 660 retVal = true; 661 for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register 662 if (_cpuid_info.zmm_save[i] != ymm_test_value()) { 663 retVal = false; 664 break; 665 } 666 } 667 } 668 } 669 return retVal; 670 } 671 672 static void get_processor_features(); 673 674 public: 675 // Offsets for cpuid asm stub 676 static ByteSize std_cpuid0_offset() { return byte_offset_of(CpuidInfo, std_max_function); } 677 static ByteSize std_cpuid1_offset() { return byte_offset_of(CpuidInfo, std_cpuid1_eax); } 678 static ByteSize dcp_cpuid4_offset() { return byte_offset_of(CpuidInfo, dcp_cpuid4_eax); } 679 static ByteSize sef_cpuid7_offset() { return byte_offset_of(CpuidInfo, sef_cpuid7_eax); } 680 static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); } 681 static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); } 682 static ByteSize ext_cpuid7_offset() { return byte_offset_of(CpuidInfo, ext_cpuid7_eax); } 683 static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); } 684 static ByteSize ext_cpuid1E_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1E_eax); } 685 static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); } 686 static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); } 687 static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); } 688 static ByteSize xem_xcr0_offset() { return byte_offset_of(CpuidInfo, xem_xcr0_eax); } 689 static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); } 690 static ByteSize zmm_save_offset() { return byte_offset_of(CpuidInfo, zmm_save); } 691 692 // The value used to check ymm register after signal handle 693 static int ymm_test_value() { return 0xCAFEBABE; } 694 695 static void get_cpu_info_wrapper(); 696 static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; } 697 static bool is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; } 698 static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; } 699 static address cpuinfo_cont_addr() { return _cpuinfo_cont_addr; } 700 701 static void clean_cpuFeatures() { _features = 0; } 702 static void set_avx_cpuFeatures() { _features = (CPU_SSE | CPU_SSE2 | CPU_AVX | CPU_VZEROUPPER ); } 703 static void set_evex_cpuFeatures() { _features = (CPU_AVX512F | CPU_SSE | CPU_SSE2 | CPU_VZEROUPPER ); } 704 705 706 // Initialization 707 static void initialize(); 708 709 // Override Abstract_VM_Version implementation 710 static void print_platform_virtualization_info(outputStream*); 711 712 // Override Abstract_VM_Version implementation 713 static bool use_biased_locking(); 714 715 // Asserts 716 static void assert_is_initialized() { 717 assert(_cpuid_info.std_cpuid1_eax.bits.family != 0, "VM_Version not initialized"); 718 } 719 720 // 721 // Processor family: 722 // 3 - 386 723 // 4 - 486 724 // 5 - Pentium 725 // 6 - PentiumPro, Pentium II, Celeron, Xeon, Pentium III, Athlon, 726 // Pentium M, Core Solo, Core Duo, Core2 Duo 727 // family 6 model: 9, 13, 14, 15 728 // 0x0f - Pentium 4, Opteron 729 // 730 // Note: The cpu family should be used to select between 731 // instruction sequences which are valid on all Intel 732 // processors. Use the feature test functions below to 733 // determine whether a particular instruction is supported. 734 // 735 static int cpu_family() { return _cpu;} 736 static bool is_P6() { return cpu_family() >= 6; } 737 static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA' 738 static bool is_hygon() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x6F677948; } // 'ogyH' 739 static bool is_amd_family() { return is_amd() || is_hygon(); } 740 static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG' 741 static bool is_zx() { assert_is_initialized(); return (_cpuid_info.std_vendor_name_0 == 0x746e6543) || (_cpuid_info.std_vendor_name_0 == 0x68532020); } // 'tneC'||'hS ' 742 static bool is_atom_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x36) || (extended_cpu_model() == 0x37) || (extended_cpu_model() == 0x4D))); } //Silvermont and Centerton 743 static bool is_knights_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x57) || (extended_cpu_model() == 0x85))); } // Xeon Phi 3200/5200/7200 and Future Xeon Phi 744 745 static bool supports_processor_topology() { 746 return (_cpuid_info.std_max_function >= 0xB) && 747 // eax[4:0] | ebx[0:15] == 0 indicates invalid topology level. 748 // Some cpus have max cpuid >= 0xB but do not support processor topology. 749 (((_cpuid_info.tpl_cpuidB0_eax & 0x1f) | _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus) != 0); 750 } 751 752 static uint cores_per_cpu() { 753 uint result = 1; 754 if (is_intel()) { 755 bool supports_topology = supports_processor_topology(); 756 if (supports_topology) { 757 result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus / 758 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 759 } 760 if (!supports_topology || result == 0) { 761 result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); 762 } 763 } else if (is_amd_family()) { 764 result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1); 765 } else if (is_zx()) { 766 bool supports_topology = supports_processor_topology(); 767 if (supports_topology) { 768 result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus / 769 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 770 } 771 if (!supports_topology || result == 0) { 772 result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); 773 } 774 } 775 return result; 776 } 777 778 static uint threads_per_core() { 779 uint result = 1; 780 if (is_intel() && supports_processor_topology()) { 781 result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 782 } else if (is_zx() && supports_processor_topology()) { 783 result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 784 } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) { 785 if (cpu_family() >= 0x17) { 786 result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1; 787 } else { 788 result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu / 789 cores_per_cpu(); 790 } 791 } 792 return (result == 0 ? 1 : result); 793 } 794 795 static intx L1_line_size() { 796 intx result = 0; 797 if (is_intel()) { 798 result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1); 799 } else if (is_amd_family()) { 800 result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size; 801 } else if (is_zx()) { 802 result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1); 803 } 804 if (result < 32) // not defined ? 805 result = 32; // 32 bytes by default on x86 and other x64 806 return result; 807 } 808 809 static intx prefetch_data_size() { 810 return L1_line_size(); 811 } 812 813 // 814 // Feature identification 815 // 816 static bool supports_cpuid() { return _features != 0; } 817 static bool supports_cmpxchg8() { return (_features & CPU_CX8) != 0; } 818 static bool supports_cmov() { return (_features & CPU_CMOV) != 0; } 819 static bool supports_fxsr() { return (_features & CPU_FXSR) != 0; } 820 static bool supports_ht() { return (_features & CPU_HT) != 0; } 821 static bool supports_mmx() { return (_features & CPU_MMX) != 0; } 822 static bool supports_sse() { return (_features & CPU_SSE) != 0; } 823 static bool supports_sse2() { return (_features & CPU_SSE2) != 0; } 824 static bool supports_sse3() { return (_features & CPU_SSE3) != 0; } 825 static bool supports_ssse3() { return (_features & CPU_SSSE3)!= 0; } 826 static bool supports_sse4_1() { return (_features & CPU_SSE4_1) != 0; } 827 static bool supports_sse4_2() { return (_features & CPU_SSE4_2) != 0; } 828 static bool supports_popcnt() { return (_features & CPU_POPCNT) != 0; } 829 static bool supports_avx() { return (_features & CPU_AVX) != 0; } 830 static bool supports_avx2() { return (_features & CPU_AVX2) != 0; } 831 static bool supports_tsc() { return (_features & CPU_TSC) != 0; } 832 static bool supports_aes() { return (_features & CPU_AES) != 0; } 833 static bool supports_erms() { return (_features & CPU_ERMS) != 0; } 834 static bool supports_clmul() { return (_features & CPU_CLMUL) != 0; } 835 static bool supports_rtm() { return (_features & CPU_RTM) != 0; } 836 static bool supports_bmi1() { return (_features & CPU_BMI1) != 0; } 837 static bool supports_bmi2() { return (_features & CPU_BMI2) != 0; } 838 static bool supports_adx() { return (_features & CPU_ADX) != 0; } 839 static bool supports_evex() { return (_features & CPU_AVX512F) != 0; } 840 static bool supports_avx512dq() { return (_features & CPU_AVX512DQ) != 0; } 841 static bool supports_avx512pf() { return (_features & CPU_AVX512PF) != 0; } 842 static bool supports_avx512er() { return (_features & CPU_AVX512ER) != 0; } 843 static bool supports_avx512cd() { return (_features & CPU_AVX512CD) != 0; } 844 static bool supports_avx512bw() { return (_features & CPU_AVX512BW) != 0; } 845 static bool supports_avx512vl() { return (_features & CPU_AVX512VL) != 0; } 846 static bool supports_avx512vlbw() { return (supports_evex() && supports_avx512bw() && supports_avx512vl()); } 847 static bool supports_avx512vldq() { return (supports_evex() && supports_avx512dq() && supports_avx512vl()); } 848 static bool supports_avx512vlbwdq() { return (supports_evex() && supports_avx512vl() && 849 supports_avx512bw() && supports_avx512dq()); } 850 static bool supports_avx512novl() { return (supports_evex() && !supports_avx512vl()); } 851 static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); } 852 static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); } 853 static bool supports_avxonly() { return ((supports_avx2() || supports_avx()) && !supports_evex()); } 854 static bool supports_sha() { return (_features & CPU_SHA) != 0; } 855 static bool supports_fma() { return (_features & CPU_FMA) != 0 && supports_avx(); } 856 static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; } 857 static bool supports_vpopcntdq() { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; } 858 static bool supports_vpclmulqdq() { return (_features & CPU_VPCLMULQDQ) != 0; } 859 static bool supports_vaes() { return (_features & CPU_VAES) != 0; } 860 static bool supports_vnni() { return (_features & CPU_VNNI) != 0; } 861 862 // Intel features 863 static bool is_intel_family_core() { return is_intel() && 864 extended_cpu_family() == CPU_FAMILY_INTEL_CORE; } 865 866 static bool is_intel_tsc_synched_at_init() { 867 if (is_intel_family_core()) { 868 uint32_t ext_model = extended_cpu_model(); 869 if (ext_model == CPU_MODEL_NEHALEM_EP || 870 ext_model == CPU_MODEL_WESTMERE_EP || 871 ext_model == CPU_MODEL_SANDYBRIDGE_EP || 872 ext_model == CPU_MODEL_IVYBRIDGE_EP) { 873 // <= 2-socket invariant tsc support. EX versions are usually used 874 // in > 2-socket systems and likely don't synchronize tscs at 875 // initialization. 876 // Code that uses tsc values must be prepared for them to arbitrarily 877 // jump forward or backward. 878 return true; 879 } 880 } 881 return false; 882 } 883 884 // AMD features 885 static bool supports_3dnow_prefetch() { return (_features & CPU_3DNOW_PREFETCH) != 0; } 886 static bool supports_mmx_ext() { return is_amd_family() && _cpuid_info.ext_cpuid1_edx.bits.mmx_amd != 0; } 887 static bool supports_lzcnt() { return (_features & CPU_LZCNT) != 0; } 888 static bool supports_sse4a() { return (_features & CPU_SSE4A) != 0; } 889 890 static bool is_amd_Barcelona() { return is_amd() && 891 extended_cpu_family() == CPU_FAMILY_AMD_11H; } 892 893 // Intel and AMD newer cores support fast timestamps well 894 static bool supports_tscinv_bit() { 895 return (_features & CPU_TSCINV) != 0; 896 } 897 static bool supports_tscinv() { 898 return supports_tscinv_bit() && 899 ((is_amd_family() && !is_amd_Barcelona()) || 900 is_intel_tsc_synched_at_init()); 901 } 902 903 // Intel Core and newer cpus have fast IDIV instruction (excluding Atom). 904 static bool has_fast_idiv() { return is_intel() && cpu_family() == 6 && 905 supports_sse3() && _model != 0x1C; } 906 907 static bool supports_compare_and_exchange() { return true; } 908 909 static intx allocate_prefetch_distance(bool use_watermark_prefetch) { 910 // Hardware prefetching (distance/size in bytes): 911 // Pentium 3 - 64 / 32 912 // Pentium 4 - 256 / 128 913 // Athlon - 64 / 32 ???? 914 // Opteron - 128 / 64 only when 2 sequential cache lines accessed 915 // Core - 128 / 64 916 // 917 // Software prefetching (distance in bytes / instruction with best score): 918 // Pentium 3 - 128 / prefetchnta 919 // Pentium 4 - 512 / prefetchnta 920 // Athlon - 128 / prefetchnta 921 // Opteron - 256 / prefetchnta 922 // Core - 256 / prefetchnta 923 // It will be used only when AllocatePrefetchStyle > 0 924 925 if (is_amd_family()) { // AMD | Hygon 926 if (supports_sse2()) { 927 return 256; // Opteron 928 } else { 929 return 128; // Athlon 930 } 931 } else { // Intel 932 if (supports_sse3() && cpu_family() == 6) { 933 if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus 934 return 192; 935 } else if (use_watermark_prefetch) { // watermark prefetching on Core 936 #ifdef _LP64 937 return 384; 938 #else 939 return 320; 940 #endif 941 } 942 } 943 if (supports_sse2()) { 944 if (cpu_family() == 6) { 945 return 256; // Pentium M, Core, Core2 946 } else { 947 return 512; // Pentium 4 948 } 949 } else { 950 return 128; // Pentium 3 (and all other old CPUs) 951 } 952 } 953 } 954 955 // SSE2 and later processors implement a 'pause' instruction 956 // that can be used for efficient implementation of 957 // the intrinsic for java.lang.Thread.onSpinWait() 958 static bool supports_on_spin_wait() { return supports_sse2(); } 959 960 // x86_64 supports fast class initialization checks for static methods. 961 static bool supports_fast_class_init_checks() { 962 return LP64_ONLY(true) NOT_LP64(false); // not implemented on x86_32 963 } 964 965 // there are several insns to force cache line sync to memory which 966 // we can use to ensure mapped non-volatile memory is up to date with 967 // pending in-cache changes. 968 // 969 // 64 bit cpus always support clflush which writes back and evicts 970 // on 32 bit cpus support is recorded via a feature flag 971 // 972 // clflushopt is optional and acts like clflush except it does 973 // not synchronize with other memory ops. it needs a preceding 974 // and trailing StoreStore fence 975 // 976 // clwb is an optional, intel-specific instruction optional which 977 // writes back without evicting the line. it also does not 978 // synchronize with other memory ops. so, it also needs a preceding 979 // and trailing StoreStore fence. 980 981 #ifdef _LP64 982 static bool supports_clflush() { 983 // clflush should always be available on x86_64 984 // if not we are in real trouble because we rely on it 985 // to flush the code cache. 986 // Unfortunately, Assembler::clflush is currently called as part 987 // of generation of the code cache flush routine. This happens 988 // under Universe::init before the processor features are set 989 // up. Assembler::flush calls this routine to check that clflush 990 // is allowed. So, we give the caller a free pass if Universe init 991 // is still in progress. 992 assert ((!Universe::is_fully_initialized() || (_features & CPU_FLUSH) != 0), "clflush should be available"); 993 return true; 994 } 995 static bool supports_clflushopt() { return ((_features & CPU_FLUSHOPT) != 0); } 996 static bool supports_clwb() { return ((_features & CPU_CLWB) != 0); } 997 #else 998 static bool supports_clflush() { return ((_features & CPU_FLUSH) != 0); } 999 static bool supports_clflushopt() { return false; } 1000 static bool supports_clwb() { return false; } 1001 #endif // _LP64 1002 1003 // support functions for virtualization detection 1004 private: 1005 static void check_virt_cpuid(uint32_t idx, uint32_t *regs); 1006 static void check_virtualizations(); 1007 }; 1008 1009 #endif // CPU_X86_VM_VERSION_X86_HPP