1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "runtime/java.hpp" 30 #include "runtime/os.hpp" 31 #include "runtime/stubCodeGenerator.hpp" 32 #include "vm_version_x86.hpp" 33 34 35 int VM_Version::_cpu; 36 int VM_Version::_model; 37 int VM_Version::_stepping; 38 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; 39 40 // Address of instruction which causes SEGV 41 address VM_Version::_cpuinfo_segv_addr = 0; 42 // Address of instruction after the one which causes SEGV 43 address VM_Version::_cpuinfo_cont_addr = 0; 44 45 static BufferBlob* stub_blob; 46 static const int stub_size = 1000; 47 48 extern "C" { 49 typedef void (*get_cpu_info_stub_t)(void*); 50 } 51 static get_cpu_info_stub_t get_cpu_info_stub = NULL; 52 53 54 class VM_Version_StubGenerator: public StubCodeGenerator { 55 public: 56 57 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 58 59 address generate_get_cpu_info() { 60 // Flags to test CPU type. 61 const uint32_t HS_EFL_AC = 0x40000; 62 const uint32_t HS_EFL_ID = 0x200000; 63 // Values for when we don't have a CPUID instruction. 64 const int CPU_FAMILY_SHIFT = 8; 65 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); 66 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); 67 68 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; 69 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done, wrapup; 70 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check; 71 72 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); 73 # define __ _masm-> 74 75 address start = __ pc(); 76 77 // 78 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); 79 // 80 // LP64: rcx and rdx are first and second argument registers on windows 81 82 __ push(rbp); 83 #ifdef _LP64 84 __ mov(rbp, c_rarg0); // cpuid_info address 85 #else 86 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address 87 #endif 88 __ push(rbx); 89 __ push(rsi); 90 __ pushf(); // preserve rbx, and flags 91 __ pop(rax); 92 __ push(rax); 93 __ mov(rcx, rax); 94 // 95 // if we are unable to change the AC flag, we have a 386 96 // 97 __ xorl(rax, HS_EFL_AC); 98 __ push(rax); 99 __ popf(); 100 __ pushf(); 101 __ pop(rax); 102 __ cmpptr(rax, rcx); 103 __ jccb(Assembler::notEqual, detect_486); 104 105 __ movl(rax, CPU_FAMILY_386); 106 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 107 __ jmp(done); 108 109 // 110 // If we are unable to change the ID flag, we have a 486 which does 111 // not support the "cpuid" instruction. 112 // 113 __ bind(detect_486); 114 __ mov(rax, rcx); 115 __ xorl(rax, HS_EFL_ID); 116 __ push(rax); 117 __ popf(); 118 __ pushf(); 119 __ pop(rax); 120 __ cmpptr(rcx, rax); 121 __ jccb(Assembler::notEqual, detect_586); 122 123 __ bind(cpu486); 124 __ movl(rax, CPU_FAMILY_486); 125 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 126 __ jmp(done); 127 128 // 129 // At this point, we have a chip which supports the "cpuid" instruction 130 // 131 __ bind(detect_586); 132 __ xorl(rax, rax); 133 __ cpuid(); 134 __ orl(rax, rax); 135 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input 136 // value of at least 1, we give up and 137 // assume a 486 138 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 139 __ movl(Address(rsi, 0), rax); 140 __ movl(Address(rsi, 4), rbx); 141 __ movl(Address(rsi, 8), rcx); 142 __ movl(Address(rsi,12), rdx); 143 144 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? 145 __ jccb(Assembler::belowEqual, std_cpuid4); 146 147 // 148 // cpuid(0xB) Processor Topology 149 // 150 __ movl(rax, 0xb); 151 __ xorl(rcx, rcx); // Threads level 152 __ cpuid(); 153 154 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); 155 __ movl(Address(rsi, 0), rax); 156 __ movl(Address(rsi, 4), rbx); 157 __ movl(Address(rsi, 8), rcx); 158 __ movl(Address(rsi,12), rdx); 159 160 __ movl(rax, 0xb); 161 __ movl(rcx, 1); // Cores level 162 __ cpuid(); 163 __ push(rax); 164 __ andl(rax, 0x1f); // Determine if valid topology level 165 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 166 __ andl(rax, 0xffff); 167 __ pop(rax); 168 __ jccb(Assembler::equal, std_cpuid4); 169 170 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); 171 __ movl(Address(rsi, 0), rax); 172 __ movl(Address(rsi, 4), rbx); 173 __ movl(Address(rsi, 8), rcx); 174 __ movl(Address(rsi,12), rdx); 175 176 __ movl(rax, 0xb); 177 __ movl(rcx, 2); // Packages level 178 __ cpuid(); 179 __ push(rax); 180 __ andl(rax, 0x1f); // Determine if valid topology level 181 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 182 __ andl(rax, 0xffff); 183 __ pop(rax); 184 __ jccb(Assembler::equal, std_cpuid4); 185 186 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); 187 __ movl(Address(rsi, 0), rax); 188 __ movl(Address(rsi, 4), rbx); 189 __ movl(Address(rsi, 8), rcx); 190 __ movl(Address(rsi,12), rdx); 191 192 // 193 // cpuid(0x4) Deterministic cache params 194 // 195 __ bind(std_cpuid4); 196 __ movl(rax, 4); 197 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? 198 __ jccb(Assembler::greater, std_cpuid1); 199 200 __ xorl(rcx, rcx); // L1 cache 201 __ cpuid(); 202 __ push(rax); 203 __ andl(rax, 0x1f); // Determine if valid cache parameters used 204 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache 205 __ pop(rax); 206 __ jccb(Assembler::equal, std_cpuid1); 207 208 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); 209 __ movl(Address(rsi, 0), rax); 210 __ movl(Address(rsi, 4), rbx); 211 __ movl(Address(rsi, 8), rcx); 212 __ movl(Address(rsi,12), rdx); 213 214 // 215 // Standard cpuid(0x1) 216 // 217 __ bind(std_cpuid1); 218 __ movl(rax, 1); 219 __ cpuid(); 220 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 221 __ movl(Address(rsi, 0), rax); 222 __ movl(Address(rsi, 4), rbx); 223 __ movl(Address(rsi, 8), rcx); 224 __ movl(Address(rsi,12), rdx); 225 226 // 227 // Check if OS has enabled XGETBV instruction to access XCR0 228 // (OSXSAVE feature flag) and CPU supports AVX 229 // 230 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 231 __ cmpl(rcx, 0x18000000); 232 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported 233 234 // 235 // XCR0, XFEATURE_ENABLED_MASK register 236 // 237 __ xorl(rcx, rcx); // zero for XCR0 register 238 __ xgetbv(); 239 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); 240 __ movl(Address(rsi, 0), rax); 241 __ movl(Address(rsi, 4), rdx); 242 243 // 244 // cpuid(0x7) Structured Extended Features 245 // 246 __ bind(sef_cpuid); 247 __ movl(rax, 7); 248 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported? 249 __ jccb(Assembler::greater, ext_cpuid); 250 251 __ xorl(rcx, rcx); 252 __ cpuid(); 253 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 254 __ movl(Address(rsi, 0), rax); 255 __ movl(Address(rsi, 4), rbx); 256 257 // 258 // Extended cpuid(0x80000000) 259 // 260 __ bind(ext_cpuid); 261 __ movl(rax, 0x80000000); 262 __ cpuid(); 263 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? 264 __ jcc(Assembler::belowEqual, done); 265 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? 266 __ jccb(Assembler::belowEqual, ext_cpuid1); 267 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? 268 __ jccb(Assembler::belowEqual, ext_cpuid5); 269 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? 270 __ jccb(Assembler::belowEqual, ext_cpuid7); 271 // 272 // Extended cpuid(0x80000008) 273 // 274 __ movl(rax, 0x80000008); 275 __ cpuid(); 276 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); 277 __ movl(Address(rsi, 0), rax); 278 __ movl(Address(rsi, 4), rbx); 279 __ movl(Address(rsi, 8), rcx); 280 __ movl(Address(rsi,12), rdx); 281 282 // 283 // Extended cpuid(0x80000007) 284 // 285 __ bind(ext_cpuid7); 286 __ movl(rax, 0x80000007); 287 __ cpuid(); 288 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset()))); 289 __ movl(Address(rsi, 0), rax); 290 __ movl(Address(rsi, 4), rbx); 291 __ movl(Address(rsi, 8), rcx); 292 __ movl(Address(rsi,12), rdx); 293 294 // 295 // Extended cpuid(0x80000005) 296 // 297 __ bind(ext_cpuid5); 298 __ movl(rax, 0x80000005); 299 __ cpuid(); 300 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); 301 __ movl(Address(rsi, 0), rax); 302 __ movl(Address(rsi, 4), rbx); 303 __ movl(Address(rsi, 8), rcx); 304 __ movl(Address(rsi,12), rdx); 305 306 // 307 // Extended cpuid(0x80000001) 308 // 309 __ bind(ext_cpuid1); 310 __ movl(rax, 0x80000001); 311 __ cpuid(); 312 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); 313 __ movl(Address(rsi, 0), rax); 314 __ movl(Address(rsi, 4), rbx); 315 __ movl(Address(rsi, 8), rcx); 316 __ movl(Address(rsi,12), rdx); 317 318 // 319 // Check if OS has enabled XGETBV instruction to access XCR0 320 // (OSXSAVE feature flag) and CPU supports AVX 321 // 322 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 323 __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 324 __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx 325 __ cmpl(rcx, 0x18000000); 326 __ jccb(Assembler::notEqual, done); // jump if AVX is not supported 327 328 __ movl(rax, 0x6); 329 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 330 __ cmpl(rax, 0x6); 331 __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported 332 333 // we need to bridge farther than imm8, so we use this island as a thunk 334 __ bind(done); 335 __ jmp(wrapup); 336 337 __ bind(start_simd_check); 338 // 339 // Some OSs have a bug when upper 128/256bits of YMM/ZMM 340 // registers are not restored after a signal processing. 341 // Generate SEGV here (reference through NULL) 342 // and check upper YMM/ZMM bits after it. 343 // 344 intx saved_useavx = UseAVX; 345 intx saved_usesse = UseSSE; 346 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 347 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 348 __ movl(rax, 0x10000); 349 __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm 350 __ cmpl(rax, 0x10000); 351 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 352 // check _cpuid_info.xem_xcr0_eax.bits.opmask 353 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 354 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 355 __ movl(rax, 0xE0); 356 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 357 __ cmpl(rax, 0xE0); 358 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 359 360 // EVEX setup: run in lowest evex mode 361 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 362 UseAVX = 3; 363 UseSSE = 2; 364 // load value into all 64 bytes of zmm7 register 365 __ movl(rcx, VM_Version::ymm_test_value()); 366 __ movdl(xmm0, rcx); 367 __ movl(rcx, 0xffff); 368 __ kmovwl(k1, rcx); 369 __ evpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit); 370 __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit); 371 #ifdef _LP64 372 __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit); 373 __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit); 374 #endif 375 VM_Version::clean_cpuFeatures(); 376 __ jmp(save_restore_except); 377 378 __ bind(legacy_setup); 379 // AVX setup 380 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 381 UseAVX = 1; 382 UseSSE = 2; 383 // load value into all 32 bytes of ymm7 register 384 __ movl(rcx, VM_Version::ymm_test_value()); 385 386 __ movdl(xmm0, rcx); 387 __ pshufd(xmm0, xmm0, 0x00); 388 __ vinsertf128h(xmm0, xmm0, xmm0); 389 __ vmovdqu(xmm7, xmm0); 390 #ifdef _LP64 391 __ vmovdqu(xmm8, xmm0); 392 __ vmovdqu(xmm15, xmm0); 393 #endif 394 VM_Version::clean_cpuFeatures(); 395 396 __ bind(save_restore_except); 397 __ xorl(rsi, rsi); 398 VM_Version::set_cpuinfo_segv_addr(__ pc()); 399 // Generate SEGV 400 __ movl(rax, Address(rsi, 0)); 401 402 VM_Version::set_cpuinfo_cont_addr(__ pc()); 403 // Returns here after signal. Save xmm0 to check it later. 404 405 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 406 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 407 __ movl(rax, 0x10000); 408 __ andl(rax, Address(rsi, 4)); 409 __ cmpl(rax, 0x10000); 410 __ jccb(Assembler::notEqual, legacy_save_restore); 411 // check _cpuid_info.xem_xcr0_eax.bits.opmask 412 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 413 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 414 __ movl(rax, 0xE0); 415 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 416 __ cmpl(rax, 0xE0); 417 __ jccb(Assembler::notEqual, legacy_save_restore); 418 419 // EVEX check: run in lowest evex mode 420 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 421 UseAVX = 3; 422 UseSSE = 2; 423 __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset()))); 424 __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit); 425 __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit); 426 #ifdef _LP64 427 __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit); 428 __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit); 429 #endif 430 VM_Version::clean_cpuFeatures(); 431 UseAVX = saved_useavx; 432 UseSSE = saved_usesse; 433 __ jmp(wrapup); 434 435 __ bind(legacy_save_restore); 436 // AVX check 437 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 438 UseAVX = 1; 439 UseSSE = 2; 440 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); 441 __ vmovdqu(Address(rsi, 0), xmm0); 442 __ vmovdqu(Address(rsi, 32), xmm7); 443 #ifdef _LP64 444 __ vmovdqu(Address(rsi, 64), xmm8); 445 __ vmovdqu(Address(rsi, 96), xmm15); 446 #endif 447 VM_Version::clean_cpuFeatures(); 448 UseAVX = saved_useavx; 449 UseSSE = saved_usesse; 450 451 __ bind(wrapup); 452 __ popf(); 453 __ pop(rsi); 454 __ pop(rbx); 455 __ pop(rbp); 456 __ ret(0); 457 458 # undef __ 459 460 return start; 461 }; 462 }; 463 464 void VM_Version::get_processor_features() { 465 466 _cpu = 4; // 486 by default 467 _model = 0; 468 _stepping = 0; 469 _features = 0; 470 _logical_processors_per_package = 1; 471 // i486 internal cache is both I&D and has a 16-byte line size 472 _L1_data_cache_line_size = 16; 473 474 // Get raw processor info 475 476 get_cpu_info_stub(&_cpuid_info); 477 478 assert_is_initialized(); 479 _cpu = extended_cpu_family(); 480 _model = extended_cpu_model(); 481 _stepping = cpu_stepping(); 482 483 if (cpu_family() > 4) { // it supports CPUID 484 _features = feature_flags(); 485 // Logical processors are only available on P4s and above, 486 // and only if hyperthreading is available. 487 _logical_processors_per_package = logical_processor_count(); 488 _L1_data_cache_line_size = L1_line_size(); 489 } 490 491 _supports_cx8 = supports_cmpxchg8(); 492 // xchg and xadd instructions 493 _supports_atomic_getset4 = true; 494 _supports_atomic_getadd4 = true; 495 LP64_ONLY(_supports_atomic_getset8 = true); 496 LP64_ONLY(_supports_atomic_getadd8 = true); 497 498 #ifdef _LP64 499 // OS should support SSE for x64 and hardware should support at least SSE2. 500 if (!VM_Version::supports_sse2()) { 501 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); 502 } 503 // in 64 bit the use of SSE2 is the minimum 504 if (UseSSE < 2) UseSSE = 2; 505 #endif 506 507 #ifdef AMD64 508 // flush_icache_stub have to be generated first. 509 // That is why Icache line size is hard coded in ICache class, 510 // see icache_x86.hpp. It is also the reason why we can't use 511 // clflush instruction in 32-bit VM since it could be running 512 // on CPU which does not support it. 513 // 514 // The only thing we can do is to verify that flushed 515 // ICache::line_size has correct value. 516 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); 517 // clflush_size is size in quadwords (8 bytes). 518 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); 519 #endif 520 521 // If the OS doesn't support SSE, we can't use this feature even if the HW does 522 if (!os::supports_sse()) 523 _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); 524 525 if (UseSSE < 4) { 526 _features &= ~CPU_SSE4_1; 527 _features &= ~CPU_SSE4_2; 528 } 529 530 if (UseSSE < 3) { 531 _features &= ~CPU_SSE3; 532 _features &= ~CPU_SSSE3; 533 _features &= ~CPU_SSE4A; 534 } 535 536 if (UseSSE < 2) 537 _features &= ~CPU_SSE2; 538 539 if (UseSSE < 1) 540 _features &= ~CPU_SSE; 541 542 // first try initial setting and detect what we can support 543 if (UseAVX > 0) { 544 if (UseAVX > 2 && supports_evex()) { 545 UseAVX = 3; 546 } else if (UseAVX > 1 && supports_avx2()) { 547 UseAVX = 2; 548 } else if (UseAVX > 0 && supports_avx()) { 549 UseAVX = 1; 550 } else { 551 UseAVX = 0; 552 } 553 } else if (UseAVX < 0) { 554 UseAVX = 0; 555 } 556 557 if (UseAVX < 3) { 558 _features &= ~CPU_AVX512F; 559 _features &= ~CPU_AVX512DQ; 560 _features &= ~CPU_AVX512CD; 561 _features &= ~CPU_AVX512BW; 562 _features &= ~CPU_AVX512VL; 563 } 564 565 if (UseAVX < 2) 566 _features &= ~CPU_AVX2; 567 568 if (UseAVX < 1) 569 _features &= ~CPU_AVX; 570 571 if (!UseAES && !FLAG_IS_DEFAULT(UseAES)) 572 _features &= ~CPU_AES; 573 574 if (logical_processors_per_package() == 1) { 575 // HT processor could be installed on a system which doesn't support HT. 576 _features &= ~CPU_HT; 577 } 578 579 char buf[256]; 580 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 581 cores_per_cpu(), threads_per_core(), 582 cpu_family(), _model, _stepping, 583 (supports_cmov() ? ", cmov" : ""), 584 (supports_cmpxchg8() ? ", cx8" : ""), 585 (supports_fxsr() ? ", fxsr" : ""), 586 (supports_mmx() ? ", mmx" : ""), 587 (supports_sse() ? ", sse" : ""), 588 (supports_sse2() ? ", sse2" : ""), 589 (supports_sse3() ? ", sse3" : ""), 590 (supports_ssse3()? ", ssse3": ""), 591 (supports_sse4_1() ? ", sse4.1" : ""), 592 (supports_sse4_2() ? ", sse4.2" : ""), 593 (supports_popcnt() ? ", popcnt" : ""), 594 (supports_avx() ? ", avx" : ""), 595 (supports_avx2() ? ", avx2" : ""), 596 (supports_aes() ? ", aes" : ""), 597 (supports_clmul() ? ", clmul" : ""), 598 (supports_erms() ? ", erms" : ""), 599 (supports_rtm() ? ", rtm" : ""), 600 (supports_mmx_ext() ? ", mmxext" : ""), 601 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), 602 (supports_lzcnt() ? ", lzcnt": ""), 603 (supports_sse4a() ? ", sse4a": ""), 604 (supports_ht() ? ", ht": ""), 605 (supports_tsc() ? ", tsc": ""), 606 (supports_tscinv_bit() ? ", tscinvbit": ""), 607 (supports_tscinv() ? ", tscinv": ""), 608 (supports_bmi1() ? ", bmi1" : ""), 609 (supports_bmi2() ? ", bmi2" : ""), 610 (supports_adx() ? ", adx" : ""), 611 (supports_evex() ? ", evex" : ""), 612 (supports_sha() ? ", sha" : "")); 613 _features_string = os::strdup(buf); 614 615 // UseSSE is set to the smaller of what hardware supports and what 616 // the command line requires. I.e., you cannot set UseSSE to 2 on 617 // older Pentiums which do not support it. 618 if (UseSSE > 4) UseSSE=4; 619 if (UseSSE < 0) UseSSE=0; 620 if (!supports_sse4_1()) // Drop to 3 if no SSE4 support 621 UseSSE = MIN2((intx)3,UseSSE); 622 if (!supports_sse3()) // Drop to 2 if no SSE3 support 623 UseSSE = MIN2((intx)2,UseSSE); 624 if (!supports_sse2()) // Drop to 1 if no SSE2 support 625 UseSSE = MIN2((intx)1,UseSSE); 626 if (!supports_sse ()) // Drop to 0 if no SSE support 627 UseSSE = 0; 628 629 // Use AES instructions if available. 630 if (supports_aes()) { 631 if (FLAG_IS_DEFAULT(UseAES)) { 632 FLAG_SET_DEFAULT(UseAES, true); 633 } 634 if (!UseAES) { 635 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 636 warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); 637 } 638 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 639 } else { 640 if (UseSSE > 2) { 641 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 642 FLAG_SET_DEFAULT(UseAESIntrinsics, true); 643 } 644 } else { 645 // The AES intrinsic stubs require AES instruction support (of course) 646 // but also require sse3 mode or higher for instructions it use. 647 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 648 warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled."); 649 } 650 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 651 } 652 653 // --AES-CTR begins-- 654 if (!UseAESIntrinsics) { 655 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 656 warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled."); 657 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 658 } 659 } else { 660 if(supports_sse4_1() && UseSSE >= 4) { 661 if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 662 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true); 663 } 664 } else { 665 // The AES-CTR intrinsic stubs require AES instruction support (of course) 666 // but also require sse4.1 mode or higher for instructions it use. 667 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 668 warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled."); 669 } 670 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 671 } 672 } 673 // --AES-CTR ends-- 674 } 675 } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) { 676 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 677 warning("AES instructions are not available on this CPU"); 678 FLAG_SET_DEFAULT(UseAES, false); 679 } 680 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 681 warning("AES intrinsics are not available on this CPU"); 682 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 683 } 684 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 685 warning("AES-CTR intrinsics are not available on this CPU"); 686 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 687 } 688 } 689 690 // Use CLMUL instructions if available. 691 if (supports_clmul()) { 692 if (FLAG_IS_DEFAULT(UseCLMUL)) { 693 UseCLMUL = true; 694 } 695 } else if (UseCLMUL) { 696 if (!FLAG_IS_DEFAULT(UseCLMUL)) 697 warning("CLMUL instructions not available on this CPU (AVX may also be required)"); 698 FLAG_SET_DEFAULT(UseCLMUL, false); 699 } 700 701 if (UseCLMUL && (UseSSE > 2)) { 702 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 703 UseCRC32Intrinsics = true; 704 } 705 } else if (UseCRC32Intrinsics) { 706 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 707 warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)"); 708 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 709 } 710 711 if (supports_sse4_2()) { 712 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 713 UseCRC32CIntrinsics = true; 714 } 715 } 716 else if (UseCRC32CIntrinsics) { 717 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 718 warning("CRC32C intrinsics are not available on this CPU"); 719 } 720 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 721 } 722 723 // GHASH/GCM intrinsics 724 if (UseCLMUL && (UseSSE > 2)) { 725 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 726 UseGHASHIntrinsics = true; 727 } 728 } else if (UseGHASHIntrinsics) { 729 if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) 730 warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU"); 731 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 732 } 733 734 if (supports_sha()) { 735 if (FLAG_IS_DEFAULT(UseSHA)) { 736 UseSHA = true; 737 } 738 } 739 else if (UseSHA) { 740 if (!FLAG_IS_DEFAULT(UseSHA)) 741 warning("SHA instructions are not available on this CPU"); 742 FLAG_SET_DEFAULT(UseSHA, false); 743 } 744 745 if (UseSHA && supports_sha()) { 746 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { 747 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); 748 } 749 } else if (UseSHA1Intrinsics) { 750 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 751 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 752 } 753 754 if (UseSHA && supports_sha()) { 755 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { 756 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); 757 } 758 } else if (UseSHA256Intrinsics) { 759 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 760 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 761 } 762 763 if (UseSHA512Intrinsics) { 764 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 765 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 766 } 767 768 if (UseAdler32Intrinsics) { 769 warning("Adler32Intrinsics not available on this CPU."); 770 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 771 } 772 773 // Adjust RTM (Restricted Transactional Memory) flags 774 if (!supports_rtm() && UseRTMLocking) { 775 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 776 // setting during arguments processing. See use_biased_locking(). 777 // VM_Version_init() is executed after UseBiasedLocking is used 778 // in Thread::allocate(). 779 vm_exit_during_initialization("RTM instructions are not available on this CPU"); 780 } 781 782 #if INCLUDE_RTM_OPT 783 if (UseRTMLocking) { 784 if (is_intel_family_core()) { 785 if ((_model == CPU_MODEL_HASWELL_E3) || 786 (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) || 787 (_model == CPU_MODEL_BROADWELL && _stepping < 4)) { 788 // currently a collision between SKL and HSW_E3 789 if (!UnlockExperimentalVMOptions && UseAVX < 3) { 790 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); 791 } else { 792 warning("UseRTMLocking is only available as experimental option on this platform."); 793 } 794 } 795 } 796 if (!FLAG_IS_CMDLINE(UseRTMLocking)) { 797 // RTM locking should be used only for applications with 798 // high lock contention. For now we do not use it by default. 799 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); 800 } 801 if (!is_power_of_2(RTMTotalCountIncrRate)) { 802 warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64"); 803 FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64); 804 } 805 if (RTMAbortRatio < 0 || RTMAbortRatio > 100) { 806 warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50"); 807 FLAG_SET_DEFAULT(RTMAbortRatio, 50); 808 } 809 } else { // !UseRTMLocking 810 if (UseRTMForStackLocks) { 811 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { 812 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); 813 } 814 FLAG_SET_DEFAULT(UseRTMForStackLocks, false); 815 } 816 if (UseRTMDeopt) { 817 FLAG_SET_DEFAULT(UseRTMDeopt, false); 818 } 819 if (PrintPreciseRTMLockingStatistics) { 820 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); 821 } 822 } 823 #else 824 if (UseRTMLocking) { 825 // Only C2 does RTM locking optimization. 826 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 827 // setting during arguments processing. See use_biased_locking(). 828 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 829 } 830 #endif 831 832 #ifdef COMPILER2 833 if (UseFPUForSpilling) { 834 if (UseSSE < 2) { 835 // Only supported with SSE2+ 836 FLAG_SET_DEFAULT(UseFPUForSpilling, false); 837 } 838 } 839 #endif 840 #if defined(COMPILER2) || INCLUDE_JVMCI 841 if (MaxVectorSize > 0) { 842 if (!is_power_of_2(MaxVectorSize)) { 843 warning("MaxVectorSize must be a power of 2"); 844 FLAG_SET_DEFAULT(MaxVectorSize, 64); 845 } 846 if (MaxVectorSize > 64) { 847 FLAG_SET_DEFAULT(MaxVectorSize, 64); 848 } 849 if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) { 850 // 32 bytes vectors (in YMM) are only supported with AVX+ 851 FLAG_SET_DEFAULT(MaxVectorSize, 16); 852 } 853 if (UseSSE < 2) { 854 // Vectors (in XMM) are only supported with SSE2+ 855 FLAG_SET_DEFAULT(MaxVectorSize, 0); 856 } 857 #if defined(COMPILER2) && defined(ASSERT) 858 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { 859 tty->print_cr("State of YMM registers after signal handle:"); 860 int nreg = 2 LP64_ONLY(+2); 861 const char* ymm_name[4] = {"0", "7", "8", "15"}; 862 for (int i = 0; i < nreg; i++) { 863 tty->print("YMM%s:", ymm_name[i]); 864 for (int j = 7; j >=0; j--) { 865 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); 866 } 867 tty->cr(); 868 } 869 } 870 #endif // COMPILER2 && ASSERT 871 } 872 #endif // COMPILER2 || INCLUDE_JVMCI 873 874 #ifdef COMPILER2 875 #ifdef _LP64 876 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 877 UseMultiplyToLenIntrinsic = true; 878 } 879 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 880 UseSquareToLenIntrinsic = true; 881 } 882 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 883 UseMulAddIntrinsic = true; 884 } 885 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 886 UseMontgomeryMultiplyIntrinsic = true; 887 } 888 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 889 UseMontgomerySquareIntrinsic = true; 890 } 891 #else 892 if (UseMultiplyToLenIntrinsic) { 893 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 894 warning("multiplyToLen intrinsic is not available in 32-bit VM"); 895 } 896 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); 897 } 898 if (UseMontgomeryMultiplyIntrinsic) { 899 if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 900 warning("montgomeryMultiply intrinsic is not available in 32-bit VM"); 901 } 902 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false); 903 } 904 if (UseMontgomerySquareIntrinsic) { 905 if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 906 warning("montgomerySquare intrinsic is not available in 32-bit VM"); 907 } 908 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false); 909 } 910 if (UseSquareToLenIntrinsic) { 911 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 912 warning("squareToLen intrinsic is not available in 32-bit VM"); 913 } 914 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false); 915 } 916 if (UseMulAddIntrinsic) { 917 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 918 warning("mulAdd intrinsic is not available in 32-bit VM"); 919 } 920 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false); 921 } 922 #endif 923 #endif // COMPILER2 924 925 // On new cpus instructions which update whole XMM register should be used 926 // to prevent partial register stall due to dependencies on high half. 927 // 928 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) 929 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) 930 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). 931 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). 932 933 if( is_amd() ) { // AMD cpus specific settings 934 if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) { 935 // Use it on new AMD cpus starting from Opteron. 936 UseAddressNop = true; 937 } 938 if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) { 939 // Use it on new AMD cpus starting from Opteron. 940 UseNewLongLShift = true; 941 } 942 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 943 if (supports_sse4a()) { 944 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron 945 } else { 946 UseXmmLoadAndClearUpper = false; 947 } 948 } 949 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 950 if( supports_sse4a() ) { 951 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' 952 } else { 953 UseXmmRegToRegMoveAll = false; 954 } 955 } 956 if( FLAG_IS_DEFAULT(UseXmmI2F) ) { 957 if( supports_sse4a() ) { 958 UseXmmI2F = true; 959 } else { 960 UseXmmI2F = false; 961 } 962 } 963 if( FLAG_IS_DEFAULT(UseXmmI2D) ) { 964 if( supports_sse4a() ) { 965 UseXmmI2D = true; 966 } else { 967 UseXmmI2D = false; 968 } 969 } 970 if (supports_sse4_2() && UseSSE >= 4) { 971 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 972 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 973 } 974 } else { 975 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 976 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 977 } 978 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 979 } 980 981 // some defaults for AMD family 15h 982 if ( cpu_family() == 0x15 ) { 983 // On family 15h processors default is no sw prefetch 984 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 985 AllocatePrefetchStyle = 0; 986 } 987 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW 988 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 989 AllocatePrefetchInstr = 3; 990 } 991 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy 992 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 993 UseXMMForArrayCopy = true; 994 } 995 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 996 UseUnalignedLoadStores = true; 997 } 998 } 999 1000 #ifdef COMPILER2 1001 if (MaxVectorSize > 16) { 1002 // Limit vectors size to 16 bytes on current AMD cpus. 1003 FLAG_SET_DEFAULT(MaxVectorSize, 16); 1004 } 1005 #endif // COMPILER2 1006 } 1007 1008 if( is_intel() ) { // Intel cpus specific settings 1009 if( FLAG_IS_DEFAULT(UseStoreImmI16) ) { 1010 UseStoreImmI16 = false; // don't use it on Intel cpus 1011 } 1012 if( cpu_family() == 6 || cpu_family() == 15 ) { 1013 if( FLAG_IS_DEFAULT(UseAddressNop) ) { 1014 // Use it on all Intel cpus starting from PentiumPro 1015 UseAddressNop = true; 1016 } 1017 } 1018 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 1019 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus 1020 } 1021 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 1022 if( supports_sse3() ) { 1023 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus 1024 } else { 1025 UseXmmRegToRegMoveAll = false; 1026 } 1027 } 1028 if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus 1029 #ifdef COMPILER2 1030 if( FLAG_IS_DEFAULT(MaxLoopPad) ) { 1031 // For new Intel cpus do the next optimization: 1032 // don't align the beginning of a loop if there are enough instructions 1033 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 1034 // in current fetch line (OptoLoopAlignment) or the padding 1035 // is big (> MaxLoopPad). 1036 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of 1037 // generated NOP instructions. 11 is the largest size of one 1038 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 1039 MaxLoopPad = 11; 1040 } 1041 #endif // COMPILER2 1042 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1043 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus 1044 } 1045 if (supports_sse4_2() && supports_ht()) { // Newest Intel cpus 1046 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1047 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1048 } 1049 } 1050 if (supports_sse4_2() && UseSSE >= 4) { 1051 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1052 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1053 } 1054 } else { 1055 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1056 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1057 } 1058 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1059 } 1060 } 1061 if ((cpu_family() == 0x06) && 1062 ((extended_cpu_model() == 0x36) || // Centerton 1063 (extended_cpu_model() == 0x37) || // Silvermont 1064 (extended_cpu_model() == 0x4D))) { 1065 #ifdef COMPILER2 1066 if (FLAG_IS_DEFAULT(OptoScheduling)) { 1067 OptoScheduling = true; 1068 } 1069 #endif 1070 if (supports_sse4_2()) { // Silvermont 1071 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1072 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1073 } 1074 } 1075 } 1076 if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 1077 AllocatePrefetchInstr = 3; 1078 } 1079 } 1080 1081 #ifdef _LP64 1082 if (UseSSE42Intrinsics) { 1083 if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1084 UseVectorizedMismatchIntrinsic = true; 1085 } 1086 } else if (UseVectorizedMismatchIntrinsic) { 1087 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) 1088 warning("vectorizedMismatch intrinsics are not available on this CPU"); 1089 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1090 } 1091 #else 1092 if (UseVectorizedMismatchIntrinsic) { 1093 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1094 warning("vectorizedMismatch intrinsic is not available in 32-bit VM"); 1095 } 1096 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1097 } 1098 #endif // _LP64 1099 1100 // Use count leading zeros count instruction if available. 1101 if (supports_lzcnt()) { 1102 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { 1103 UseCountLeadingZerosInstruction = true; 1104 } 1105 } else if (UseCountLeadingZerosInstruction) { 1106 warning("lzcnt instruction is not available on this CPU"); 1107 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); 1108 } 1109 1110 // Use count trailing zeros instruction if available 1111 if (supports_bmi1()) { 1112 // tzcnt does not require VEX prefix 1113 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { 1114 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1115 // Don't use tzcnt if BMI1 is switched off on command line. 1116 UseCountTrailingZerosInstruction = false; 1117 } else { 1118 UseCountTrailingZerosInstruction = true; 1119 } 1120 } 1121 } else if (UseCountTrailingZerosInstruction) { 1122 warning("tzcnt instruction is not available on this CPU"); 1123 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); 1124 } 1125 1126 // BMI instructions (except tzcnt) use an encoding with VEX prefix. 1127 // VEX prefix is generated only when AVX > 0. 1128 if (supports_bmi1() && supports_avx()) { 1129 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1130 UseBMI1Instructions = true; 1131 } 1132 } else if (UseBMI1Instructions) { 1133 warning("BMI1 instructions are not available on this CPU (AVX is also required)"); 1134 FLAG_SET_DEFAULT(UseBMI1Instructions, false); 1135 } 1136 1137 if (supports_bmi2() && supports_avx()) { 1138 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) { 1139 UseBMI2Instructions = true; 1140 } 1141 } else if (UseBMI2Instructions) { 1142 warning("BMI2 instructions are not available on this CPU (AVX is also required)"); 1143 FLAG_SET_DEFAULT(UseBMI2Instructions, false); 1144 } 1145 1146 // Use population count instruction if available. 1147 if (supports_popcnt()) { 1148 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 1149 UsePopCountInstruction = true; 1150 } 1151 } else if (UsePopCountInstruction) { 1152 warning("POPCNT instruction is not available on this CPU"); 1153 FLAG_SET_DEFAULT(UsePopCountInstruction, false); 1154 } 1155 1156 // Use fast-string operations if available. 1157 if (supports_erms()) { 1158 if (FLAG_IS_DEFAULT(UseFastStosb)) { 1159 UseFastStosb = true; 1160 } 1161 } else if (UseFastStosb) { 1162 warning("fast-string operations are not available on this CPU"); 1163 FLAG_SET_DEFAULT(UseFastStosb, false); 1164 } 1165 1166 #ifdef COMPILER2 1167 if (FLAG_IS_DEFAULT(AlignVector)) { 1168 // Modern processors allow misaligned memory operations for vectors. 1169 AlignVector = !UseUnalignedLoadStores; 1170 } 1171 #endif // COMPILER2 1172 1173 if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0; 1174 if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3; 1175 1176 // Allocation prefetch settings 1177 intx cache_line_size = prefetch_data_size(); 1178 if( cache_line_size > AllocatePrefetchStepSize ) 1179 AllocatePrefetchStepSize = cache_line_size; 1180 1181 AllocatePrefetchDistance = allocate_prefetch_distance(); 1182 AllocatePrefetchStyle = allocate_prefetch_style(); 1183 1184 if (is_intel() && cpu_family() == 6 && supports_sse3()) { 1185 if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core 1186 #ifdef _LP64 1187 AllocatePrefetchDistance = 384; 1188 #else 1189 AllocatePrefetchDistance = 320; 1190 #endif 1191 } 1192 if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus 1193 AllocatePrefetchDistance = 192; 1194 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) { 1195 FLAG_SET_DEFAULT(AllocatePrefetchLines, 4); 1196 } 1197 } 1198 #ifdef COMPILER2 1199 if (supports_sse4_2()) { 1200 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1201 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1202 } 1203 } 1204 #endif 1205 } 1206 1207 #ifdef _LP64 1208 // Prefetch settings 1209 PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); 1210 PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes(); 1211 PrefetchFieldsAhead = prefetch_fields_ahead(); 1212 #endif 1213 1214 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && 1215 (cache_line_size > ContendedPaddingWidth)) 1216 ContendedPaddingWidth = cache_line_size; 1217 1218 // This machine allows unaligned memory accesses 1219 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 1220 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 1221 } 1222 1223 #ifndef PRODUCT 1224 if (PrintMiscellaneous && Verbose) { 1225 tty->print_cr("Logical CPUs per core: %u", 1226 logical_processors_per_package()); 1227 tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); 1228 tty->print("UseSSE=%d", (int) UseSSE); 1229 if (UseAVX > 0) { 1230 tty->print(" UseAVX=%d", (int) UseAVX); 1231 } 1232 if (UseAES) { 1233 tty->print(" UseAES=1"); 1234 } 1235 #ifdef COMPILER2 1236 if (MaxVectorSize > 0) { 1237 tty->print(" MaxVectorSize=%d", (int) MaxVectorSize); 1238 } 1239 #endif 1240 tty->cr(); 1241 tty->print("Allocation"); 1242 if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { 1243 tty->print_cr(": no prefetching"); 1244 } else { 1245 tty->print(" prefetching: "); 1246 if (UseSSE == 0 && supports_3dnow_prefetch()) { 1247 tty->print("PREFETCHW"); 1248 } else if (UseSSE >= 1) { 1249 if (AllocatePrefetchInstr == 0) { 1250 tty->print("PREFETCHNTA"); 1251 } else if (AllocatePrefetchInstr == 1) { 1252 tty->print("PREFETCHT0"); 1253 } else if (AllocatePrefetchInstr == 2) { 1254 tty->print("PREFETCHT2"); 1255 } else if (AllocatePrefetchInstr == 3) { 1256 tty->print("PREFETCHW"); 1257 } 1258 } 1259 if (AllocatePrefetchLines > 1) { 1260 tty->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize); 1261 } else { 1262 tty->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize); 1263 } 1264 } 1265 1266 if (PrefetchCopyIntervalInBytes > 0) { 1267 tty->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes); 1268 } 1269 if (PrefetchScanIntervalInBytes > 0) { 1270 tty->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes); 1271 } 1272 if (PrefetchFieldsAhead > 0) { 1273 tty->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead); 1274 } 1275 if (ContendedPaddingWidth > 0) { 1276 tty->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth); 1277 } 1278 } 1279 #endif // !PRODUCT 1280 } 1281 1282 bool VM_Version::use_biased_locking() { 1283 #if INCLUDE_RTM_OPT 1284 // RTM locking is most useful when there is high lock contention and 1285 // low data contention. With high lock contention the lock is usually 1286 // inflated and biased locking is not suitable for that case. 1287 // RTM locking code requires that biased locking is off. 1288 // Note: we can't switch off UseBiasedLocking in get_processor_features() 1289 // because it is used by Thread::allocate() which is called before 1290 // VM_Version::initialize(). 1291 if (UseRTMLocking && UseBiasedLocking) { 1292 if (FLAG_IS_DEFAULT(UseBiasedLocking)) { 1293 FLAG_SET_DEFAULT(UseBiasedLocking, false); 1294 } else { 1295 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); 1296 UseBiasedLocking = false; 1297 } 1298 } 1299 #endif 1300 return UseBiasedLocking; 1301 } 1302 1303 void VM_Version::initialize() { 1304 ResourceMark rm; 1305 // Making this stub must be FIRST use of assembler 1306 1307 stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size); 1308 if (stub_blob == NULL) { 1309 vm_exit_during_initialization("Unable to allocate get_cpu_info_stub"); 1310 } 1311 CodeBuffer c(stub_blob); 1312 VM_Version_StubGenerator g(&c); 1313 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, 1314 g.generate_get_cpu_info()); 1315 1316 get_processor_features(); 1317 }