1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "runtime/java.hpp" 30 #include "runtime/stubCodeGenerator.hpp" 31 #include "vm_version_x86.hpp" 32 #ifdef TARGET_OS_FAMILY_linux 33 # include "os_linux.inline.hpp" 34 #endif 35 #ifdef TARGET_OS_FAMILY_solaris 36 # include "os_solaris.inline.hpp" 37 #endif 38 #ifdef TARGET_OS_FAMILY_windows 39 # include "os_windows.inline.hpp" 40 #endif 41 #ifdef TARGET_OS_FAMILY_bsd 42 # include "os_bsd.inline.hpp" 43 #endif 44 45 46 int VM_Version::_cpu; 47 int VM_Version::_model; 48 int VM_Version::_stepping; 49 int VM_Version::_cpuFeatures; 50 const char* VM_Version::_features_str = ""; 51 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; 52 53 // Address of instruction which causes SEGV 54 address VM_Version::_cpuinfo_segv_addr = 0; 55 // Address of instruction after the one which causes SEGV 56 address VM_Version::_cpuinfo_cont_addr = 0; 57 58 static BufferBlob* stub_blob; 59 static const int stub_size = 600; 60 61 extern "C" { 62 typedef void (*get_cpu_info_stub_t)(void*); 63 } 64 static get_cpu_info_stub_t get_cpu_info_stub = NULL; 65 66 67 class VM_Version_StubGenerator: public StubCodeGenerator { 68 public: 69 70 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 71 72 address generate_get_cpu_info() { 73 // Flags to test CPU type. 74 const uint32_t HS_EFL_AC = 0x40000; 75 const uint32_t HS_EFL_ID = 0x200000; 76 // Values for when we don't have a CPUID instruction. 77 const int CPU_FAMILY_SHIFT = 8; 78 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); 79 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); 80 81 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; 82 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done; 83 84 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); 85 # define __ _masm-> 86 87 address start = __ pc(); 88 89 // 90 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); 91 // 92 // LP64: rcx and rdx are first and second argument registers on windows 93 94 __ push(rbp); 95 #ifdef _LP64 96 __ mov(rbp, c_rarg0); // cpuid_info address 97 #else 98 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address 99 #endif 100 __ push(rbx); 101 __ push(rsi); 102 __ pushf(); // preserve rbx, and flags 103 __ pop(rax); 104 __ push(rax); 105 __ mov(rcx, rax); 106 // 107 // if we are unable to change the AC flag, we have a 386 108 // 109 __ xorl(rax, HS_EFL_AC); 110 __ push(rax); 111 __ popf(); 112 __ pushf(); 113 __ pop(rax); 114 __ cmpptr(rax, rcx); 115 __ jccb(Assembler::notEqual, detect_486); 116 117 __ movl(rax, CPU_FAMILY_386); 118 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 119 __ jmp(done); 120 121 // 122 // If we are unable to change the ID flag, we have a 486 which does 123 // not support the "cpuid" instruction. 124 // 125 __ bind(detect_486); 126 __ mov(rax, rcx); 127 __ xorl(rax, HS_EFL_ID); 128 __ push(rax); 129 __ popf(); 130 __ pushf(); 131 __ pop(rax); 132 __ cmpptr(rcx, rax); 133 __ jccb(Assembler::notEqual, detect_586); 134 135 __ bind(cpu486); 136 __ movl(rax, CPU_FAMILY_486); 137 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 138 __ jmp(done); 139 140 // 141 // At this point, we have a chip which supports the "cpuid" instruction 142 // 143 __ bind(detect_586); 144 __ xorl(rax, rax); 145 __ cpuid(); 146 __ orl(rax, rax); 147 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input 148 // value of at least 1, we give up and 149 // assume a 486 150 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 151 __ movl(Address(rsi, 0), rax); 152 __ movl(Address(rsi, 4), rbx); 153 __ movl(Address(rsi, 8), rcx); 154 __ movl(Address(rsi,12), rdx); 155 156 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? 157 __ jccb(Assembler::belowEqual, std_cpuid4); 158 159 // 160 // cpuid(0xB) Processor Topology 161 // 162 __ movl(rax, 0xb); 163 __ xorl(rcx, rcx); // Threads level 164 __ cpuid(); 165 166 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); 167 __ movl(Address(rsi, 0), rax); 168 __ movl(Address(rsi, 4), rbx); 169 __ movl(Address(rsi, 8), rcx); 170 __ movl(Address(rsi,12), rdx); 171 172 __ movl(rax, 0xb); 173 __ movl(rcx, 1); // Cores level 174 __ cpuid(); 175 __ push(rax); 176 __ andl(rax, 0x1f); // Determine if valid topology level 177 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 178 __ andl(rax, 0xffff); 179 __ pop(rax); 180 __ jccb(Assembler::equal, std_cpuid4); 181 182 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); 183 __ movl(Address(rsi, 0), rax); 184 __ movl(Address(rsi, 4), rbx); 185 __ movl(Address(rsi, 8), rcx); 186 __ movl(Address(rsi,12), rdx); 187 188 __ movl(rax, 0xb); 189 __ movl(rcx, 2); // Packages level 190 __ cpuid(); 191 __ push(rax); 192 __ andl(rax, 0x1f); // Determine if valid topology level 193 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 194 __ andl(rax, 0xffff); 195 __ pop(rax); 196 __ jccb(Assembler::equal, std_cpuid4); 197 198 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); 199 __ movl(Address(rsi, 0), rax); 200 __ movl(Address(rsi, 4), rbx); 201 __ movl(Address(rsi, 8), rcx); 202 __ movl(Address(rsi,12), rdx); 203 204 // 205 // cpuid(0x4) Deterministic cache params 206 // 207 __ bind(std_cpuid4); 208 __ movl(rax, 4); 209 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? 210 __ jccb(Assembler::greater, std_cpuid1); 211 212 __ xorl(rcx, rcx); // L1 cache 213 __ cpuid(); 214 __ push(rax); 215 __ andl(rax, 0x1f); // Determine if valid cache parameters used 216 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache 217 __ pop(rax); 218 __ jccb(Assembler::equal, std_cpuid1); 219 220 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); 221 __ movl(Address(rsi, 0), rax); 222 __ movl(Address(rsi, 4), rbx); 223 __ movl(Address(rsi, 8), rcx); 224 __ movl(Address(rsi,12), rdx); 225 226 // 227 // Standard cpuid(0x1) 228 // 229 __ bind(std_cpuid1); 230 __ movl(rax, 1); 231 __ cpuid(); 232 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 233 __ movl(Address(rsi, 0), rax); 234 __ movl(Address(rsi, 4), rbx); 235 __ movl(Address(rsi, 8), rcx); 236 __ movl(Address(rsi,12), rdx); 237 238 // 239 // Check if OS has enabled XGETBV instruction to access XCR0 240 // (OSXSAVE feature flag) and CPU supports AVX 241 // 242 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 243 __ cmpl(rcx, 0x18000000); 244 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported 245 246 // 247 // XCR0, XFEATURE_ENABLED_MASK register 248 // 249 __ xorl(rcx, rcx); // zero for XCR0 register 250 __ xgetbv(); 251 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); 252 __ movl(Address(rsi, 0), rax); 253 __ movl(Address(rsi, 4), rdx); 254 255 __ andl(rax, 0x6); // xcr0 bits sse | ymm 256 __ cmpl(rax, 0x6); 257 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported 258 259 // 260 // Some OSs have a bug when upper 128bits of YMM 261 // registers are not restored after a signal processing. 262 // Generate SEGV here (reference through NULL) 263 // and check upper YMM bits after it. 264 // 265 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 266 intx saved_useavx = UseAVX; 267 intx saved_usesse = UseSSE; 268 UseAVX = 1; 269 UseSSE = 2; 270 271 // load value into all 32 bytes of ymm7 register 272 __ movl(rcx, VM_Version::ymm_test_value()); 273 274 __ movdl(xmm0, rcx); 275 __ pshufd(xmm0, xmm0, 0x00); 276 __ vinsertf128h(xmm0, xmm0, xmm0); 277 __ vmovdqu(xmm7, xmm0); 278 #ifdef _LP64 279 __ vmovdqu(xmm8, xmm0); 280 __ vmovdqu(xmm15, xmm0); 281 #endif 282 283 __ xorl(rsi, rsi); 284 VM_Version::set_cpuinfo_segv_addr( __ pc() ); 285 // Generate SEGV 286 __ movl(rax, Address(rsi, 0)); 287 288 VM_Version::set_cpuinfo_cont_addr( __ pc() ); 289 // Returns here after signal. Save xmm0 to check it later. 290 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); 291 __ vmovdqu(Address(rsi, 0), xmm0); 292 __ vmovdqu(Address(rsi, 32), xmm7); 293 #ifdef _LP64 294 __ vmovdqu(Address(rsi, 64), xmm8); 295 __ vmovdqu(Address(rsi, 96), xmm15); 296 #endif 297 298 VM_Version::clean_cpuFeatures(); 299 UseAVX = saved_useavx; 300 UseSSE = saved_usesse; 301 302 // 303 // cpuid(0x7) Structured Extended Features 304 // 305 __ bind(sef_cpuid); 306 __ movl(rax, 7); 307 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported? 308 __ jccb(Assembler::greater, ext_cpuid); 309 310 __ xorl(rcx, rcx); 311 __ cpuid(); 312 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 313 __ movl(Address(rsi, 0), rax); 314 __ movl(Address(rsi, 4), rbx); 315 316 // 317 // Extended cpuid(0x80000000) 318 // 319 __ bind(ext_cpuid); 320 __ movl(rax, 0x80000000); 321 __ cpuid(); 322 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? 323 __ jcc(Assembler::belowEqual, done); 324 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? 325 __ jccb(Assembler::belowEqual, ext_cpuid1); 326 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? 327 __ jccb(Assembler::belowEqual, ext_cpuid5); 328 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? 329 __ jccb(Assembler::belowEqual, ext_cpuid7); 330 // 331 // Extended cpuid(0x80000008) 332 // 333 __ movl(rax, 0x80000008); 334 __ cpuid(); 335 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); 336 __ movl(Address(rsi, 0), rax); 337 __ movl(Address(rsi, 4), rbx); 338 __ movl(Address(rsi, 8), rcx); 339 __ movl(Address(rsi,12), rdx); 340 341 // 342 // Extended cpuid(0x80000007) 343 // 344 __ bind(ext_cpuid7); 345 __ movl(rax, 0x80000007); 346 __ cpuid(); 347 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset()))); 348 __ movl(Address(rsi, 0), rax); 349 __ movl(Address(rsi, 4), rbx); 350 __ movl(Address(rsi, 8), rcx); 351 __ movl(Address(rsi,12), rdx); 352 353 // 354 // Extended cpuid(0x80000005) 355 // 356 __ bind(ext_cpuid5); 357 __ movl(rax, 0x80000005); 358 __ cpuid(); 359 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); 360 __ movl(Address(rsi, 0), rax); 361 __ movl(Address(rsi, 4), rbx); 362 __ movl(Address(rsi, 8), rcx); 363 __ movl(Address(rsi,12), rdx); 364 365 // 366 // Extended cpuid(0x80000001) 367 // 368 __ bind(ext_cpuid1); 369 __ movl(rax, 0x80000001); 370 __ cpuid(); 371 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); 372 __ movl(Address(rsi, 0), rax); 373 __ movl(Address(rsi, 4), rbx); 374 __ movl(Address(rsi, 8), rcx); 375 __ movl(Address(rsi,12), rdx); 376 377 // 378 // return 379 // 380 __ bind(done); 381 __ popf(); 382 __ pop(rsi); 383 __ pop(rbx); 384 __ pop(rbp); 385 __ ret(0); 386 387 # undef __ 388 389 return start; 390 }; 391 }; 392 393 394 void VM_Version::get_cpu_info_wrapper() { 395 get_cpu_info_stub(&_cpuid_info); 396 } 397 398 #ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED 399 #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f() 400 #endif 401 402 void VM_Version::get_processor_features() { 403 404 _cpu = 4; // 486 by default 405 _model = 0; 406 _stepping = 0; 407 _cpuFeatures = 0; 408 _logical_processors_per_package = 1; 409 // i486 internal cache is both I&D and has a 16-byte line size 410 _L1_data_cache_line_size = 16; 411 412 if (!Use486InstrsOnly) { 413 // Get raw processor info 414 415 // Some platforms (like Win*) need a wrapper around here 416 // in order to properly handle SEGV for YMM registers test. 417 CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(get_cpu_info_wrapper); 418 419 assert_is_initialized(); 420 _cpu = extended_cpu_family(); 421 _model = extended_cpu_model(); 422 _stepping = cpu_stepping(); 423 424 if (cpu_family() > 4) { // it supports CPUID 425 _cpuFeatures = feature_flags(); 426 // Logical processors are only available on P4s and above, 427 // and only if hyperthreading is available. 428 _logical_processors_per_package = logical_processor_count(); 429 _L1_data_cache_line_size = L1_line_size(); 430 } 431 } 432 433 _supports_cx8 = supports_cmpxchg8(); 434 // xchg and xadd instructions 435 _supports_atomic_getset4 = true; 436 _supports_atomic_getadd4 = true; 437 LP64_ONLY(_supports_atomic_getset8 = true); 438 LP64_ONLY(_supports_atomic_getadd8 = true); 439 440 #ifdef _LP64 441 // OS should support SSE for x64 and hardware should support at least SSE2. 442 if (!VM_Version::supports_sse2()) { 443 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); 444 } 445 // in 64 bit the use of SSE2 is the minimum 446 if (UseSSE < 2) UseSSE = 2; 447 #endif 448 449 #ifdef AMD64 450 // flush_icache_stub have to be generated first. 451 // That is why Icache line size is hard coded in ICache class, 452 // see icache_x86.hpp. It is also the reason why we can't use 453 // clflush instruction in 32-bit VM since it could be running 454 // on CPU which does not support it. 455 // 456 // The only thing we can do is to verify that flushed 457 // ICache::line_size has correct value. 458 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); 459 // clflush_size is size in quadwords (8 bytes). 460 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); 461 #endif 462 463 // If the OS doesn't support SSE, we can't use this feature even if the HW does 464 if (!os::supports_sse()) 465 _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); 466 467 if (UseSSE < 4) { 468 _cpuFeatures &= ~CPU_SSE4_1; 469 _cpuFeatures &= ~CPU_SSE4_2; 470 } 471 472 if (UseSSE < 3) { 473 _cpuFeatures &= ~CPU_SSE3; 474 _cpuFeatures &= ~CPU_SSSE3; 475 _cpuFeatures &= ~CPU_SSE4A; 476 } 477 478 if (UseSSE < 2) 479 _cpuFeatures &= ~CPU_SSE2; 480 481 if (UseSSE < 1) 482 _cpuFeatures &= ~CPU_SSE; 483 484 if (UseAVX < 2) 485 _cpuFeatures &= ~CPU_AVX2; 486 487 if (UseAVX < 1) 488 _cpuFeatures &= ~CPU_AVX; 489 490 if (!UseAES && !FLAG_IS_DEFAULT(UseAES)) 491 _cpuFeatures &= ~CPU_AES; 492 493 if (logical_processors_per_package() == 1) { 494 // HT processor could be installed on a system which doesn't support HT. 495 _cpuFeatures &= ~CPU_HT; 496 } 497 498 char buf[256]; 499 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 500 cores_per_cpu(), threads_per_core(), 501 cpu_family(), _model, _stepping, 502 (supports_cmov() ? ", cmov" : ""), 503 (supports_cmpxchg8() ? ", cx8" : ""), 504 (supports_fxsr() ? ", fxsr" : ""), 505 (supports_mmx() ? ", mmx" : ""), 506 (supports_sse() ? ", sse" : ""), 507 (supports_sse2() ? ", sse2" : ""), 508 (supports_sse3() ? ", sse3" : ""), 509 (supports_ssse3()? ", ssse3": ""), 510 (supports_sse4_1() ? ", sse4.1" : ""), 511 (supports_sse4_2() ? ", sse4.2" : ""), 512 (supports_popcnt() ? ", popcnt" : ""), 513 (supports_avx() ? ", avx" : ""), 514 (supports_avx2() ? ", avx2" : ""), 515 (supports_aes() ? ", aes" : ""), 516 (supports_clmul() ? ", clmul" : ""), 517 (supports_erms() ? ", erms" : ""), 518 (supports_rtm() ? ", rtm" : ""), 519 (supports_mmx_ext() ? ", mmxext" : ""), 520 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), 521 (supports_lzcnt() ? ", lzcnt": ""), 522 (supports_sse4a() ? ", sse4a": ""), 523 (supports_ht() ? ", ht": ""), 524 (supports_tsc() ? ", tsc": ""), 525 (supports_tscinv_bit() ? ", tscinvbit": ""), 526 (supports_tscinv() ? ", tscinv": ""), 527 (supports_bmi1() ? ", bmi1" : ""), 528 (supports_bmi2() ? ", bmi2" : ""), 529 (supports_adx() ? ", adx" : "")); 530 _features_str = strdup(buf); 531 532 // UseSSE is set to the smaller of what hardware supports and what 533 // the command line requires. I.e., you cannot set UseSSE to 2 on 534 // older Pentiums which do not support it. 535 if (UseSSE > 4) UseSSE=4; 536 if (UseSSE < 0) UseSSE=0; 537 if (!supports_sse4_1()) // Drop to 3 if no SSE4 support 538 UseSSE = MIN2((intx)3,UseSSE); 539 if (!supports_sse3()) // Drop to 2 if no SSE3 support 540 UseSSE = MIN2((intx)2,UseSSE); 541 if (!supports_sse2()) // Drop to 1 if no SSE2 support 542 UseSSE = MIN2((intx)1,UseSSE); 543 if (!supports_sse ()) // Drop to 0 if no SSE support 544 UseSSE = 0; 545 546 if (UseAVX > 2) UseAVX=2; 547 if (UseAVX < 0) UseAVX=0; 548 if (!supports_avx2()) // Drop to 1 if no AVX2 support 549 UseAVX = MIN2((intx)1,UseAVX); 550 if (!supports_avx ()) // Drop to 0 if no AVX support 551 UseAVX = 0; 552 553 // Use AES instructions if available. 554 if (supports_aes()) { 555 if (FLAG_IS_DEFAULT(UseAES)) { 556 UseAES = true; 557 } 558 } else if (UseAES) { 559 if (!FLAG_IS_DEFAULT(UseAES)) 560 warning("AES instructions are not available on this CPU"); 561 FLAG_SET_DEFAULT(UseAES, false); 562 } 563 564 // Use CLMUL instructions if available. 565 if (supports_clmul()) { 566 if (FLAG_IS_DEFAULT(UseCLMUL)) { 567 UseCLMUL = true; 568 } 569 } else if (UseCLMUL) { 570 if (!FLAG_IS_DEFAULT(UseCLMUL)) 571 warning("CLMUL instructions not available on this CPU (AVX may also be required)"); 572 FLAG_SET_DEFAULT(UseCLMUL, false); 573 } 574 575 if (UseCLMUL && (UseSSE > 2)) { 576 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 577 UseCRC32Intrinsics = true; 578 } 579 } else if (UseCRC32Intrinsics) { 580 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 581 warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)"); 582 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 583 } 584 585 // The AES intrinsic stubs require AES instruction support (of course) 586 // but also require sse3 mode for instructions it use. 587 if (UseAES && (UseSSE > 2)) { 588 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 589 UseAESIntrinsics = true; 590 } 591 } else if (UseAESIntrinsics) { 592 if (!FLAG_IS_DEFAULT(UseAESIntrinsics)) 593 warning("AES intrinsics are not available on this CPU"); 594 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 595 } 596 597 if (UseSHA) { 598 warning("SHA instructions are not available on this CPU"); 599 FLAG_SET_DEFAULT(UseSHA, false); 600 } 601 if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) { 602 warning("SHA intrinsics are not available on this CPU"); 603 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 604 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 605 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 606 } 607 608 // Adjust RTM (Restricted Transactional Memory) flags 609 if (!supports_rtm() && UseRTMLocking) { 610 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 611 // setting during arguments processing. See use_biased_locking(). 612 // VM_Version_init() is executed after UseBiasedLocking is used 613 // in Thread::allocate(). 614 vm_exit_during_initialization("RTM instructions are not available on this CPU"); 615 } 616 617 #if INCLUDE_RTM_OPT 618 if (UseRTMLocking) { 619 if (is_intel_family_core()) { 620 if ((_model == CPU_MODEL_HASWELL_E3) || 621 (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) || 622 (_model == CPU_MODEL_BROADWELL && _stepping < 4)) { 623 if (!UnlockExperimentalVMOptions) { 624 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); 625 } else { 626 warning("UseRTMLocking is only available as experimental option on this platform."); 627 } 628 } 629 } 630 if (!FLAG_IS_CMDLINE(UseRTMLocking)) { 631 // RTM locking should be used only for applications with 632 // high lock contention. For now we do not use it by default. 633 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); 634 } 635 if (!is_power_of_2(RTMTotalCountIncrRate)) { 636 warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64"); 637 FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64); 638 } 639 if (RTMAbortRatio < 0 || RTMAbortRatio > 100) { 640 warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50"); 641 FLAG_SET_DEFAULT(RTMAbortRatio, 50); 642 } 643 } else { // !UseRTMLocking 644 if (UseRTMForStackLocks) { 645 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { 646 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); 647 } 648 FLAG_SET_DEFAULT(UseRTMForStackLocks, false); 649 } 650 if (UseRTMDeopt) { 651 FLAG_SET_DEFAULT(UseRTMDeopt, false); 652 } 653 if (PrintPreciseRTMLockingStatistics) { 654 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); 655 } 656 } 657 #else 658 if (UseRTMLocking) { 659 // Only C2 does RTM locking optimization. 660 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 661 // setting during arguments processing. See use_biased_locking(). 662 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 663 } 664 #endif 665 666 #ifdef COMPILER2 667 if (UseFPUForSpilling) { 668 if (UseSSE < 2) { 669 // Only supported with SSE2+ 670 FLAG_SET_DEFAULT(UseFPUForSpilling, false); 671 } 672 } 673 if (MaxVectorSize > 0) { 674 if (!is_power_of_2(MaxVectorSize)) { 675 warning("MaxVectorSize must be a power of 2"); 676 FLAG_SET_DEFAULT(MaxVectorSize, 32); 677 } 678 if (MaxVectorSize > 32) { 679 FLAG_SET_DEFAULT(MaxVectorSize, 32); 680 } 681 if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) { 682 // 32 bytes vectors (in YMM) are only supported with AVX+ 683 FLAG_SET_DEFAULT(MaxVectorSize, 16); 684 } 685 if (UseSSE < 2) { 686 // Vectors (in XMM) are only supported with SSE2+ 687 FLAG_SET_DEFAULT(MaxVectorSize, 0); 688 } 689 #ifdef ASSERT 690 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { 691 tty->print_cr("State of YMM registers after signal handle:"); 692 int nreg = 2 LP64_ONLY(+2); 693 const char* ymm_name[4] = {"0", "7", "8", "15"}; 694 for (int i = 0; i < nreg; i++) { 695 tty->print("YMM%s:", ymm_name[i]); 696 for (int j = 7; j >=0; j--) { 697 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); 698 } 699 tty->cr(); 700 } 701 } 702 #endif 703 } 704 705 #ifdef _LP64 706 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 707 UseMultiplyToLenIntrinsic = true; 708 } 709 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 710 UseSquareToLenIntrinsic = true; 711 } 712 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 713 UseMulAddIntrinsic = true; 714 } 715 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 716 UseMontgomeryMultiplyIntrinsic = true; 717 } 718 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 719 UseMontgomerySquareIntrinsic = true; 720 } 721 #else 722 if (UseMultiplyToLenIntrinsic) { 723 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 724 warning("multiplyToLen intrinsic is not available in 32-bit VM"); 725 } 726 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); 727 } 728 if (UseSquareToLenIntrinsic) { 729 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 730 warning("squareToLen intrinsic is not available in 32-bit VM"); 731 } 732 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false); 733 } 734 if (UseMulAddIntrinsic) { 735 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 736 warning("mulAdd intrinsic is not available in 32-bit VM"); 737 } 738 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false); 739 } 740 if (UseMontgomeryMultiplyIntrinsic) { 741 if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 742 warning("montgomeryMultiply intrinsic is not available in 32-bit VM"); 743 } 744 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false); 745 } 746 if (UseMontgomerySquareIntrinsic) { 747 if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 748 warning("montgomerySquare intrinsic is not available in 32-bit VM"); 749 } 750 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false); 751 } 752 #endif 753 #endif // COMPILER2 754 755 // On new cpus instructions which update whole XMM register should be used 756 // to prevent partial register stall due to dependencies on high half. 757 // 758 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) 759 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) 760 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). 761 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). 762 763 if( is_amd() ) { // AMD cpus specific settings 764 if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) { 765 // Use it on new AMD cpus starting from Opteron. 766 UseAddressNop = true; 767 } 768 if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) { 769 // Use it on new AMD cpus starting from Opteron. 770 UseNewLongLShift = true; 771 } 772 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 773 if( supports_sse4a() ) { 774 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron 775 } else { 776 UseXmmLoadAndClearUpper = false; 777 } 778 } 779 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 780 if( supports_sse4a() ) { 781 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' 782 } else { 783 UseXmmRegToRegMoveAll = false; 784 } 785 } 786 if( FLAG_IS_DEFAULT(UseXmmI2F) ) { 787 if( supports_sse4a() ) { 788 UseXmmI2F = true; 789 } else { 790 UseXmmI2F = false; 791 } 792 } 793 if( FLAG_IS_DEFAULT(UseXmmI2D) ) { 794 if( supports_sse4a() ) { 795 UseXmmI2D = true; 796 } else { 797 UseXmmI2D = false; 798 } 799 } 800 if( FLAG_IS_DEFAULT(UseSSE42Intrinsics) ) { 801 if( supports_sse4_2() && UseSSE >= 4 ) { 802 UseSSE42Intrinsics = true; 803 } 804 } 805 806 // some defaults for AMD family 15h 807 if ( cpu_family() == 0x15 ) { 808 // On family 15h processors default is no sw prefetch 809 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 810 AllocatePrefetchStyle = 0; 811 } 812 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW 813 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 814 AllocatePrefetchInstr = 3; 815 } 816 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy 817 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 818 UseXMMForArrayCopy = true; 819 } 820 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 821 UseUnalignedLoadStores = true; 822 } 823 } 824 825 #ifdef COMPILER2 826 if (MaxVectorSize > 16) { 827 // Limit vectors size to 16 bytes on current AMD cpus. 828 FLAG_SET_DEFAULT(MaxVectorSize, 16); 829 } 830 #endif // COMPILER2 831 } 832 833 if( is_intel() ) { // Intel cpus specific settings 834 if( FLAG_IS_DEFAULT(UseStoreImmI16) ) { 835 UseStoreImmI16 = false; // don't use it on Intel cpus 836 } 837 if( cpu_family() == 6 || cpu_family() == 15 ) { 838 if( FLAG_IS_DEFAULT(UseAddressNop) ) { 839 // Use it on all Intel cpus starting from PentiumPro 840 UseAddressNop = true; 841 } 842 } 843 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 844 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus 845 } 846 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 847 if( supports_sse3() ) { 848 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus 849 } else { 850 UseXmmRegToRegMoveAll = false; 851 } 852 } 853 if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus 854 #ifdef COMPILER2 855 if( FLAG_IS_DEFAULT(MaxLoopPad) ) { 856 // For new Intel cpus do the next optimization: 857 // don't align the beginning of a loop if there are enough instructions 858 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 859 // in current fetch line (OptoLoopAlignment) or the padding 860 // is big (> MaxLoopPad). 861 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of 862 // generated NOP instructions. 11 is the largest size of one 863 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 864 MaxLoopPad = 11; 865 } 866 #endif // COMPILER2 867 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 868 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus 869 } 870 if (supports_sse4_2() && supports_ht()) { // Newest Intel cpus 871 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 872 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 873 } 874 } 875 if (supports_sse4_2() && UseSSE >= 4) { 876 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 877 UseSSE42Intrinsics = true; 878 } 879 } 880 } 881 if ((cpu_family() == 0x06) && 882 ((extended_cpu_model() == 0x36) || // Centerton 883 (extended_cpu_model() == 0x37) || // Silvermont 884 (extended_cpu_model() == 0x4D))) { 885 #ifdef COMPILER2 886 if (FLAG_IS_DEFAULT(OptoScheduling)) { 887 OptoScheduling = true; 888 } 889 #endif 890 if (supports_sse4_2()) { // Silvermont 891 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 892 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 893 } 894 } 895 } 896 if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 897 AllocatePrefetchInstr = 3; 898 } 899 } 900 901 // Use count leading zeros count instruction if available. 902 if (supports_lzcnt()) { 903 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { 904 UseCountLeadingZerosInstruction = true; 905 } 906 } else if (UseCountLeadingZerosInstruction) { 907 warning("lzcnt instruction is not available on this CPU"); 908 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); 909 } 910 911 // Use count trailing zeros instruction if available 912 if (supports_bmi1()) { 913 // tzcnt does not require VEX prefix 914 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { 915 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { 916 // Don't use tzcnt if BMI1 is switched off on command line. 917 UseCountTrailingZerosInstruction = false; 918 } else { 919 UseCountTrailingZerosInstruction = true; 920 } 921 } 922 } else if (UseCountTrailingZerosInstruction) { 923 warning("tzcnt instruction is not available on this CPU"); 924 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); 925 } 926 927 // BMI instructions (except tzcnt) use an encoding with VEX prefix. 928 // VEX prefix is generated only when AVX > 0. 929 if (supports_bmi1() && supports_avx()) { 930 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { 931 UseBMI1Instructions = true; 932 } 933 } else if (UseBMI1Instructions) { 934 warning("BMI1 instructions are not available on this CPU (AVX is also required)"); 935 FLAG_SET_DEFAULT(UseBMI1Instructions, false); 936 } 937 938 if (supports_bmi2() && supports_avx()) { 939 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) { 940 UseBMI2Instructions = true; 941 } 942 } else if (UseBMI2Instructions) { 943 warning("BMI2 instructions are not available on this CPU (AVX is also required)"); 944 FLAG_SET_DEFAULT(UseBMI2Instructions, false); 945 } 946 947 // Use population count instruction if available. 948 if (supports_popcnt()) { 949 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 950 UsePopCountInstruction = true; 951 } 952 } else if (UsePopCountInstruction) { 953 warning("POPCNT instruction is not available on this CPU"); 954 FLAG_SET_DEFAULT(UsePopCountInstruction, false); 955 } 956 957 // Use fast-string operations if available. 958 if (supports_erms()) { 959 if (FLAG_IS_DEFAULT(UseFastStosb)) { 960 UseFastStosb = true; 961 } 962 } else if (UseFastStosb) { 963 warning("fast-string operations are not available on this CPU"); 964 FLAG_SET_DEFAULT(UseFastStosb, false); 965 } 966 967 #ifdef COMPILER2 968 if (FLAG_IS_DEFAULT(AlignVector)) { 969 // Modern processors allow misaligned memory operations for vectors. 970 AlignVector = !UseUnalignedLoadStores; 971 } 972 #endif // COMPILER2 973 974 assert(0 <= ReadPrefetchInstr && ReadPrefetchInstr <= 3, "invalid value"); 975 assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value"); 976 977 // set valid Prefetch instruction 978 if( ReadPrefetchInstr < 0 ) ReadPrefetchInstr = 0; 979 if( ReadPrefetchInstr > 3 ) ReadPrefetchInstr = 3; 980 if( ReadPrefetchInstr == 3 && !supports_3dnow_prefetch() ) ReadPrefetchInstr = 0; 981 if( !supports_sse() && supports_3dnow_prefetch() ) ReadPrefetchInstr = 3; 982 983 if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0; 984 if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3; 985 if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0; 986 if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3; 987 988 // Allocation prefetch settings 989 intx cache_line_size = prefetch_data_size(); 990 if( cache_line_size > AllocatePrefetchStepSize ) 991 AllocatePrefetchStepSize = cache_line_size; 992 993 assert(AllocatePrefetchLines > 0, "invalid value"); 994 if( AllocatePrefetchLines < 1 ) // set valid value in product VM 995 AllocatePrefetchLines = 3; 996 assert(AllocateInstancePrefetchLines > 0, "invalid value"); 997 if( AllocateInstancePrefetchLines < 1 ) // set valid value in product VM 998 AllocateInstancePrefetchLines = 1; 999 1000 AllocatePrefetchDistance = allocate_prefetch_distance(); 1001 AllocatePrefetchStyle = allocate_prefetch_style(); 1002 1003 if (is_intel() && cpu_family() == 6 && supports_sse3()) { 1004 if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core 1005 #ifdef _LP64 1006 AllocatePrefetchDistance = 384; 1007 #else 1008 AllocatePrefetchDistance = 320; 1009 #endif 1010 } 1011 if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus 1012 AllocatePrefetchDistance = 192; 1013 AllocatePrefetchLines = 4; 1014 } 1015 #ifdef COMPILER2 1016 if (supports_sse4_2()) { 1017 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1018 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1019 } 1020 } 1021 #endif 1022 } 1023 assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value"); 1024 1025 #ifdef _LP64 1026 // Prefetch settings 1027 PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); 1028 PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes(); 1029 PrefetchFieldsAhead = prefetch_fields_ahead(); 1030 #endif 1031 1032 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && 1033 (cache_line_size > ContendedPaddingWidth)) 1034 ContendedPaddingWidth = cache_line_size; 1035 1036 #ifndef PRODUCT 1037 if (PrintMiscellaneous && Verbose) { 1038 tty->print_cr("Logical CPUs per core: %u", 1039 logical_processors_per_package()); 1040 tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); 1041 tty->print("UseSSE=%d", (int) UseSSE); 1042 if (UseAVX > 0) { 1043 tty->print(" UseAVX=%d", (int) UseAVX); 1044 } 1045 if (UseAES) { 1046 tty->print(" UseAES=1"); 1047 } 1048 #ifdef COMPILER2 1049 if (MaxVectorSize > 0) { 1050 tty->print(" MaxVectorSize=%d", (int) MaxVectorSize); 1051 } 1052 #endif 1053 tty->cr(); 1054 tty->print("Allocation"); 1055 if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { 1056 tty->print_cr(": no prefetching"); 1057 } else { 1058 tty->print(" prefetching: "); 1059 if (UseSSE == 0 && supports_3dnow_prefetch()) { 1060 tty->print("PREFETCHW"); 1061 } else if (UseSSE >= 1) { 1062 if (AllocatePrefetchInstr == 0) { 1063 tty->print("PREFETCHNTA"); 1064 } else if (AllocatePrefetchInstr == 1) { 1065 tty->print("PREFETCHT0"); 1066 } else if (AllocatePrefetchInstr == 2) { 1067 tty->print("PREFETCHT2"); 1068 } else if (AllocatePrefetchInstr == 3) { 1069 tty->print("PREFETCHW"); 1070 } 1071 } 1072 if (AllocatePrefetchLines > 1) { 1073 tty->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize); 1074 } else { 1075 tty->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize); 1076 } 1077 } 1078 1079 if (PrefetchCopyIntervalInBytes > 0) { 1080 tty->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes); 1081 } 1082 if (PrefetchScanIntervalInBytes > 0) { 1083 tty->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes); 1084 } 1085 if (PrefetchFieldsAhead > 0) { 1086 tty->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead); 1087 } 1088 if (ContendedPaddingWidth > 0) { 1089 tty->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth); 1090 } 1091 } 1092 #endif // !PRODUCT 1093 } 1094 1095 void VM_Version::print_platform_virtualization_info(outputStream* st) { 1096 VirtualizationType vrt = VM_Version::get_detected_virtualization(); 1097 if (vrt == XenHVM) { 1098 st->print_cr("Xen hardware-assisted virtualization detected"); 1099 } else if (vrt == KVM) { 1100 st->print_cr("KVM virtualization detected"); 1101 } else if (vrt == VMWare) { 1102 st->print_cr("VMWare virtualization detected"); 1103 } else if (vrt == HyperV) { 1104 st->print_cr("HyperV virtualization detected"); 1105 } 1106 } 1107 1108 void VM_Version::check_virt_cpuid(uint32_t idx, uint32_t *regs) { 1109 // TODO support 32 bit 1110 #if defined(_LP64) 1111 #if defined(_MSC_VER) 1112 // Allocate space for the code 1113 const int code_size = 100; 1114 ResourceMark rm; 1115 CodeBuffer cb("detect_virt", code_size, 0); 1116 MacroAssembler* a = new MacroAssembler(&cb); 1117 address code = a->pc(); 1118 void (*test)(uint32_t idx, uint32_t *regs) = (void(*)(uint32_t idx, uint32_t *regs))code; 1119 1120 a->movq(r9, rbx); // save nonvolatile register 1121 1122 // next line would not work on 32-bit 1123 a->movq(rax, c_rarg0 /* rcx */); 1124 a->movq(r8, c_rarg1 /* rdx */); 1125 a->cpuid(); 1126 a->movl(Address(r8, 0), rax); 1127 a->movl(Address(r8, 4), rbx); 1128 a->movl(Address(r8, 8), rcx); 1129 a->movl(Address(r8, 12), rdx); 1130 1131 a->movq(rbx, r9); // restore nonvolatile register 1132 a->ret(0); 1133 1134 uint32_t *code_end = (uint32_t *)a->pc(); 1135 a->flush(); 1136 1137 // execute code 1138 (*test)(idx, regs); 1139 #elif defined(__GNUC__) 1140 __asm__ volatile ( 1141 " cpuid;" 1142 " mov %%eax,(%1);" 1143 " mov %%ebx,4(%1);" 1144 " mov %%ecx,8(%1);" 1145 " mov %%edx,12(%1);" 1146 : "+a" (idx) 1147 : "S" (regs) 1148 : "ebx", "ecx", "edx", "memory" ); 1149 #endif 1150 #endif 1151 } 1152 1153 1154 bool VM_Version::use_biased_locking() { 1155 #if INCLUDE_RTM_OPT 1156 // RTM locking is most useful when there is high lock contention and 1157 // low data contention. With high lock contention the lock is usually 1158 // inflated and biased locking is not suitable for that case. 1159 // RTM locking code requires that biased locking is off. 1160 // Note: we can't switch off UseBiasedLocking in get_processor_features() 1161 // because it is used by Thread::allocate() which is called before 1162 // VM_Version::initialize(). 1163 if (UseRTMLocking && UseBiasedLocking) { 1164 if (FLAG_IS_DEFAULT(UseBiasedLocking)) { 1165 FLAG_SET_DEFAULT(UseBiasedLocking, false); 1166 } else { 1167 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); 1168 UseBiasedLocking = false; 1169 } 1170 } 1171 #endif 1172 return UseBiasedLocking; 1173 } 1174 1175 // On Xen, the cpuid instruction returns 1176 // eax / registers[0]: Version of Xen 1177 // ebx / registers[1]: chars 'XenV' 1178 // ecx / registers[2]: chars 'MMXe' 1179 // edx / registers[3]: chars 'nVMM' 1180 // 1181 // On KVM / VMWare / MS Hyper-V, the cpuid instruction returns 1182 // ebx / registers[1]: chars 'KVMK' / 'VMwa' / 'Micr' 1183 // ecx / registers[2]: chars 'VMKV' / 'reVM' / 'osof' 1184 // edx / registers[3]: chars 'M' / 'ware' / 't Hv' 1185 // 1186 // more information : 1187 // https://kb.vmware.com/s/article/1009458 1188 // 1189 void VM_Version::check_virtualizations() { 1190 #if defined(_LP64) 1191 uint32_t registers[4]; 1192 char signature[13]; 1193 uint32_t base; 1194 signature[12] = '\0'; 1195 memset((void*)registers, 0, 4*sizeof(uint32_t)); 1196 1197 for (base = 0x40000000; base < 0x40010000; base += 0x100) { 1198 check_virt_cpuid(base, registers); 1199 1200 *(uint32_t *)(signature + 0) = registers[1]; 1201 *(uint32_t *)(signature + 4) = registers[2]; 1202 *(uint32_t *)(signature + 8) = registers[3]; 1203 1204 if (strncmp("VMwareVMware", signature, 12) == 0) { 1205 Abstract_VM_Version::_detected_virtualization = VMWare; 1206 } 1207 1208 if (strncmp("Microsoft Hv", signature, 12) == 0) { 1209 Abstract_VM_Version::_detected_virtualization = HyperV; 1210 } 1211 1212 if (strncmp("KVMKVMKVM", signature, 9) == 0) { 1213 Abstract_VM_Version::_detected_virtualization = KVM; 1214 } 1215 1216 if (strncmp("XenVMMXenVMM", signature, 12) == 0) { 1217 Abstract_VM_Version::_detected_virtualization = XenHVM; 1218 } 1219 } 1220 #endif 1221 } 1222 1223 void VM_Version::initialize() { 1224 ResourceMark rm; 1225 // Making this stub must be FIRST use of assembler 1226 1227 stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size); 1228 if (stub_blob == NULL) { 1229 vm_exit_during_initialization("Unable to allocate get_cpu_info_stub"); 1230 } 1231 CodeBuffer c(stub_blob); 1232 VM_Version_StubGenerator g(&c); 1233 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, 1234 g.generate_get_cpu_info()); 1235 1236 get_processor_features(); 1237 if (cpu_family() > 4) { // it supports CPUID 1238 check_virtualizations(); 1239 } 1240 }