1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "logging/log.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "runtime/java.hpp" 31 #include "runtime/os.hpp" 32 #include "runtime/stubCodeGenerator.hpp" 33 #include "vm_version_x86.hpp" 34 35 36 int VM_Version::_cpu; 37 int VM_Version::_model; 38 int VM_Version::_stepping; 39 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; 40 41 // Address of instruction which causes SEGV 42 address VM_Version::_cpuinfo_segv_addr = 0; 43 // Address of instruction after the one which causes SEGV 44 address VM_Version::_cpuinfo_cont_addr = 0; 45 46 static BufferBlob* stub_blob; 47 static const int stub_size = 1000; 48 49 extern "C" { 50 typedef void (*get_cpu_info_stub_t)(void*); 51 } 52 static get_cpu_info_stub_t get_cpu_info_stub = NULL; 53 54 55 class VM_Version_StubGenerator: public StubCodeGenerator { 56 public: 57 58 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 59 60 address generate_get_cpu_info() { 61 // Flags to test CPU type. 62 const uint32_t HS_EFL_AC = 0x40000; 63 const uint32_t HS_EFL_ID = 0x200000; 64 // Values for when we don't have a CPUID instruction. 65 const int CPU_FAMILY_SHIFT = 8; 66 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); 67 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); 68 69 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; 70 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done, wrapup; 71 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check; 72 73 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); 74 # define __ _masm-> 75 76 address start = __ pc(); 77 78 // 79 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); 80 // 81 // LP64: rcx and rdx are first and second argument registers on windows 82 83 __ push(rbp); 84 #ifdef _LP64 85 __ mov(rbp, c_rarg0); // cpuid_info address 86 #else 87 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address 88 #endif 89 __ push(rbx); 90 __ push(rsi); 91 __ pushf(); // preserve rbx, and flags 92 __ pop(rax); 93 __ push(rax); 94 __ mov(rcx, rax); 95 // 96 // if we are unable to change the AC flag, we have a 386 97 // 98 __ xorl(rax, HS_EFL_AC); 99 __ push(rax); 100 __ popf(); 101 __ pushf(); 102 __ pop(rax); 103 __ cmpptr(rax, rcx); 104 __ jccb(Assembler::notEqual, detect_486); 105 106 __ movl(rax, CPU_FAMILY_386); 107 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 108 __ jmp(done); 109 110 // 111 // If we are unable to change the ID flag, we have a 486 which does 112 // not support the "cpuid" instruction. 113 // 114 __ bind(detect_486); 115 __ mov(rax, rcx); 116 __ xorl(rax, HS_EFL_ID); 117 __ push(rax); 118 __ popf(); 119 __ pushf(); 120 __ pop(rax); 121 __ cmpptr(rcx, rax); 122 __ jccb(Assembler::notEqual, detect_586); 123 124 __ bind(cpu486); 125 __ movl(rax, CPU_FAMILY_486); 126 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 127 __ jmp(done); 128 129 // 130 // At this point, we have a chip which supports the "cpuid" instruction 131 // 132 __ bind(detect_586); 133 __ xorl(rax, rax); 134 __ cpuid(); 135 __ orl(rax, rax); 136 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input 137 // value of at least 1, we give up and 138 // assume a 486 139 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 140 __ movl(Address(rsi, 0), rax); 141 __ movl(Address(rsi, 4), rbx); 142 __ movl(Address(rsi, 8), rcx); 143 __ movl(Address(rsi,12), rdx); 144 145 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? 146 __ jccb(Assembler::belowEqual, std_cpuid4); 147 148 // 149 // cpuid(0xB) Processor Topology 150 // 151 __ movl(rax, 0xb); 152 __ xorl(rcx, rcx); // Threads level 153 __ cpuid(); 154 155 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); 156 __ movl(Address(rsi, 0), rax); 157 __ movl(Address(rsi, 4), rbx); 158 __ movl(Address(rsi, 8), rcx); 159 __ movl(Address(rsi,12), rdx); 160 161 __ movl(rax, 0xb); 162 __ movl(rcx, 1); // Cores level 163 __ cpuid(); 164 __ push(rax); 165 __ andl(rax, 0x1f); // Determine if valid topology level 166 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 167 __ andl(rax, 0xffff); 168 __ pop(rax); 169 __ jccb(Assembler::equal, std_cpuid4); 170 171 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); 172 __ movl(Address(rsi, 0), rax); 173 __ movl(Address(rsi, 4), rbx); 174 __ movl(Address(rsi, 8), rcx); 175 __ movl(Address(rsi,12), rdx); 176 177 __ movl(rax, 0xb); 178 __ movl(rcx, 2); // Packages level 179 __ cpuid(); 180 __ push(rax); 181 __ andl(rax, 0x1f); // Determine if valid topology level 182 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 183 __ andl(rax, 0xffff); 184 __ pop(rax); 185 __ jccb(Assembler::equal, std_cpuid4); 186 187 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); 188 __ movl(Address(rsi, 0), rax); 189 __ movl(Address(rsi, 4), rbx); 190 __ movl(Address(rsi, 8), rcx); 191 __ movl(Address(rsi,12), rdx); 192 193 // 194 // cpuid(0x4) Deterministic cache params 195 // 196 __ bind(std_cpuid4); 197 __ movl(rax, 4); 198 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? 199 __ jccb(Assembler::greater, std_cpuid1); 200 201 __ xorl(rcx, rcx); // L1 cache 202 __ cpuid(); 203 __ push(rax); 204 __ andl(rax, 0x1f); // Determine if valid cache parameters used 205 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache 206 __ pop(rax); 207 __ jccb(Assembler::equal, std_cpuid1); 208 209 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); 210 __ movl(Address(rsi, 0), rax); 211 __ movl(Address(rsi, 4), rbx); 212 __ movl(Address(rsi, 8), rcx); 213 __ movl(Address(rsi,12), rdx); 214 215 // 216 // Standard cpuid(0x1) 217 // 218 __ bind(std_cpuid1); 219 __ movl(rax, 1); 220 __ cpuid(); 221 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 222 __ movl(Address(rsi, 0), rax); 223 __ movl(Address(rsi, 4), rbx); 224 __ movl(Address(rsi, 8), rcx); 225 __ movl(Address(rsi,12), rdx); 226 227 // 228 // Check if OS has enabled XGETBV instruction to access XCR0 229 // (OSXSAVE feature flag) and CPU supports AVX 230 // 231 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 232 __ cmpl(rcx, 0x18000000); 233 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported 234 235 // 236 // XCR0, XFEATURE_ENABLED_MASK register 237 // 238 __ xorl(rcx, rcx); // zero for XCR0 register 239 __ xgetbv(); 240 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); 241 __ movl(Address(rsi, 0), rax); 242 __ movl(Address(rsi, 4), rdx); 243 244 // 245 // cpuid(0x7) Structured Extended Features 246 // 247 __ bind(sef_cpuid); 248 __ movl(rax, 7); 249 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported? 250 __ jccb(Assembler::greater, ext_cpuid); 251 252 __ xorl(rcx, rcx); 253 __ cpuid(); 254 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 255 __ movl(Address(rsi, 0), rax); 256 __ movl(Address(rsi, 4), rbx); 257 258 // 259 // Extended cpuid(0x80000000) 260 // 261 __ bind(ext_cpuid); 262 __ movl(rax, 0x80000000); 263 __ cpuid(); 264 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? 265 __ jcc(Assembler::belowEqual, done); 266 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? 267 __ jccb(Assembler::belowEqual, ext_cpuid1); 268 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? 269 __ jccb(Assembler::belowEqual, ext_cpuid5); 270 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? 271 __ jccb(Assembler::belowEqual, ext_cpuid7); 272 // 273 // Extended cpuid(0x80000008) 274 // 275 __ movl(rax, 0x80000008); 276 __ cpuid(); 277 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); 278 __ movl(Address(rsi, 0), rax); 279 __ movl(Address(rsi, 4), rbx); 280 __ movl(Address(rsi, 8), rcx); 281 __ movl(Address(rsi,12), rdx); 282 283 // 284 // Extended cpuid(0x80000007) 285 // 286 __ bind(ext_cpuid7); 287 __ movl(rax, 0x80000007); 288 __ cpuid(); 289 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset()))); 290 __ movl(Address(rsi, 0), rax); 291 __ movl(Address(rsi, 4), rbx); 292 __ movl(Address(rsi, 8), rcx); 293 __ movl(Address(rsi,12), rdx); 294 295 // 296 // Extended cpuid(0x80000005) 297 // 298 __ bind(ext_cpuid5); 299 __ movl(rax, 0x80000005); 300 __ cpuid(); 301 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); 302 __ movl(Address(rsi, 0), rax); 303 __ movl(Address(rsi, 4), rbx); 304 __ movl(Address(rsi, 8), rcx); 305 __ movl(Address(rsi,12), rdx); 306 307 // 308 // Extended cpuid(0x80000001) 309 // 310 __ bind(ext_cpuid1); 311 __ movl(rax, 0x80000001); 312 __ cpuid(); 313 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); 314 __ movl(Address(rsi, 0), rax); 315 __ movl(Address(rsi, 4), rbx); 316 __ movl(Address(rsi, 8), rcx); 317 __ movl(Address(rsi,12), rdx); 318 319 // 320 // Check if OS has enabled XGETBV instruction to access XCR0 321 // (OSXSAVE feature flag) and CPU supports AVX 322 // 323 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 324 __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 325 __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx 326 __ cmpl(rcx, 0x18000000); 327 __ jccb(Assembler::notEqual, done); // jump if AVX is not supported 328 329 __ movl(rax, 0x6); 330 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 331 __ cmpl(rax, 0x6); 332 __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported 333 334 // we need to bridge farther than imm8, so we use this island as a thunk 335 __ bind(done); 336 __ jmp(wrapup); 337 338 __ bind(start_simd_check); 339 // 340 // Some OSs have a bug when upper 128/256bits of YMM/ZMM 341 // registers are not restored after a signal processing. 342 // Generate SEGV here (reference through NULL) 343 // and check upper YMM/ZMM bits after it. 344 // 345 intx saved_useavx = UseAVX; 346 intx saved_usesse = UseSSE; 347 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 348 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 349 __ movl(rax, 0x10000); 350 __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm 351 __ cmpl(rax, 0x10000); 352 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 353 // check _cpuid_info.xem_xcr0_eax.bits.opmask 354 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 355 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 356 __ movl(rax, 0xE0); 357 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 358 __ cmpl(rax, 0xE0); 359 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 360 361 // EVEX setup: run in lowest evex mode 362 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 363 UseAVX = 3; 364 UseSSE = 2; 365 // load value into all 64 bytes of zmm7 register 366 __ movl(rcx, VM_Version::ymm_test_value()); 367 __ movdl(xmm0, rcx); 368 __ movl(rcx, 0xffff); 369 __ kmovwl(k1, rcx); 370 __ evpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit); 371 __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit); 372 #ifdef _LP64 373 __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit); 374 __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit); 375 #endif 376 VM_Version::clean_cpuFeatures(); 377 __ jmp(save_restore_except); 378 379 __ bind(legacy_setup); 380 // AVX setup 381 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 382 UseAVX = 1; 383 UseSSE = 2; 384 // load value into all 32 bytes of ymm7 register 385 __ movl(rcx, VM_Version::ymm_test_value()); 386 387 __ movdl(xmm0, rcx); 388 __ pshufd(xmm0, xmm0, 0x00); 389 __ vinsertf128_high(xmm0, xmm0); 390 __ vmovdqu(xmm7, xmm0); 391 #ifdef _LP64 392 __ vmovdqu(xmm8, xmm0); 393 __ vmovdqu(xmm15, xmm0); 394 #endif 395 VM_Version::clean_cpuFeatures(); 396 397 __ bind(save_restore_except); 398 __ xorl(rsi, rsi); 399 VM_Version::set_cpuinfo_segv_addr(__ pc()); 400 // Generate SEGV 401 __ movl(rax, Address(rsi, 0)); 402 403 VM_Version::set_cpuinfo_cont_addr(__ pc()); 404 // Returns here after signal. Save xmm0 to check it later. 405 406 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 407 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 408 __ movl(rax, 0x10000); 409 __ andl(rax, Address(rsi, 4)); 410 __ cmpl(rax, 0x10000); 411 __ jccb(Assembler::notEqual, legacy_save_restore); 412 // check _cpuid_info.xem_xcr0_eax.bits.opmask 413 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 414 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 415 __ movl(rax, 0xE0); 416 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 417 __ cmpl(rax, 0xE0); 418 __ jccb(Assembler::notEqual, legacy_save_restore); 419 420 // EVEX check: run in lowest evex mode 421 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 422 UseAVX = 3; 423 UseSSE = 2; 424 __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset()))); 425 __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit); 426 __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit); 427 #ifdef _LP64 428 __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit); 429 __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit); 430 #endif 431 VM_Version::clean_cpuFeatures(); 432 UseAVX = saved_useavx; 433 UseSSE = saved_usesse; 434 __ jmp(wrapup); 435 436 __ bind(legacy_save_restore); 437 // AVX check 438 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 439 UseAVX = 1; 440 UseSSE = 2; 441 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); 442 __ vmovdqu(Address(rsi, 0), xmm0); 443 __ vmovdqu(Address(rsi, 32), xmm7); 444 #ifdef _LP64 445 __ vmovdqu(Address(rsi, 64), xmm8); 446 __ vmovdqu(Address(rsi, 96), xmm15); 447 #endif 448 VM_Version::clean_cpuFeatures(); 449 UseAVX = saved_useavx; 450 UseSSE = saved_usesse; 451 452 __ bind(wrapup); 453 __ popf(); 454 __ pop(rsi); 455 __ pop(rbx); 456 __ pop(rbp); 457 __ ret(0); 458 459 # undef __ 460 461 return start; 462 }; 463 }; 464 465 void VM_Version::get_processor_features() { 466 467 _cpu = 4; // 486 by default 468 _model = 0; 469 _stepping = 0; 470 _features = 0; 471 _logical_processors_per_package = 1; 472 // i486 internal cache is both I&D and has a 16-byte line size 473 _L1_data_cache_line_size = 16; 474 475 // Get raw processor info 476 477 get_cpu_info_stub(&_cpuid_info); 478 479 assert_is_initialized(); 480 _cpu = extended_cpu_family(); 481 _model = extended_cpu_model(); 482 _stepping = cpu_stepping(); 483 484 if (cpu_family() > 4) { // it supports CPUID 485 _features = feature_flags(); 486 // Logical processors are only available on P4s and above, 487 // and only if hyperthreading is available. 488 _logical_processors_per_package = logical_processor_count(); 489 _L1_data_cache_line_size = L1_line_size(); 490 } 491 492 _supports_cx8 = supports_cmpxchg8(); 493 // xchg and xadd instructions 494 _supports_atomic_getset4 = true; 495 _supports_atomic_getadd4 = true; 496 LP64_ONLY(_supports_atomic_getset8 = true); 497 LP64_ONLY(_supports_atomic_getadd8 = true); 498 499 #ifdef _LP64 500 // OS should support SSE for x64 and hardware should support at least SSE2. 501 if (!VM_Version::supports_sse2()) { 502 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); 503 } 504 // in 64 bit the use of SSE2 is the minimum 505 if (UseSSE < 2) UseSSE = 2; 506 #endif 507 508 #ifdef AMD64 509 // flush_icache_stub have to be generated first. 510 // That is why Icache line size is hard coded in ICache class, 511 // see icache_x86.hpp. It is also the reason why we can't use 512 // clflush instruction in 32-bit VM since it could be running 513 // on CPU which does not support it. 514 // 515 // The only thing we can do is to verify that flushed 516 // ICache::line_size has correct value. 517 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); 518 // clflush_size is size in quadwords (8 bytes). 519 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); 520 #endif 521 522 // If the OS doesn't support SSE, we can't use this feature even if the HW does 523 if (!os::supports_sse()) 524 _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); 525 526 if (UseSSE < 4) { 527 _features &= ~CPU_SSE4_1; 528 _features &= ~CPU_SSE4_2; 529 } 530 531 if (UseSSE < 3) { 532 _features &= ~CPU_SSE3; 533 _features &= ~CPU_SSSE3; 534 _features &= ~CPU_SSE4A; 535 } 536 537 if (UseSSE < 2) 538 _features &= ~CPU_SSE2; 539 540 if (UseSSE < 1) 541 _features &= ~CPU_SSE; 542 543 // first try initial setting and detect what we can support 544 if (UseAVX > 0) { 545 if (UseAVX > 2 && supports_evex()) { 546 UseAVX = 3; 547 } else if (UseAVX > 1 && supports_avx2()) { 548 UseAVX = 2; 549 } else if (UseAVX > 0 && supports_avx()) { 550 UseAVX = 1; 551 } else { 552 UseAVX = 0; 553 } 554 } else if (UseAVX < 0) { 555 UseAVX = 0; 556 } 557 558 if (UseAVX < 3) { 559 _features &= ~CPU_AVX512F; 560 _features &= ~CPU_AVX512DQ; 561 _features &= ~CPU_AVX512CD; 562 _features &= ~CPU_AVX512BW; 563 _features &= ~CPU_AVX512VL; 564 } 565 566 if (UseAVX < 2) 567 _features &= ~CPU_AVX2; 568 569 if (UseAVX < 1) 570 _features &= ~CPU_AVX; 571 572 if (!UseAES && !FLAG_IS_DEFAULT(UseAES)) 573 _features &= ~CPU_AES; 574 575 if (logical_processors_per_package() == 1) { 576 // HT processor could be installed on a system which doesn't support HT. 577 _features &= ~CPU_HT; 578 } 579 580 char buf[256]; 581 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 582 cores_per_cpu(), threads_per_core(), 583 cpu_family(), _model, _stepping, 584 (supports_cmov() ? ", cmov" : ""), 585 (supports_cmpxchg8() ? ", cx8" : ""), 586 (supports_fxsr() ? ", fxsr" : ""), 587 (supports_mmx() ? ", mmx" : ""), 588 (supports_sse() ? ", sse" : ""), 589 (supports_sse2() ? ", sse2" : ""), 590 (supports_sse3() ? ", sse3" : ""), 591 (supports_ssse3()? ", ssse3": ""), 592 (supports_sse4_1() ? ", sse4.1" : ""), 593 (supports_sse4_2() ? ", sse4.2" : ""), 594 (supports_popcnt() ? ", popcnt" : ""), 595 (supports_avx() ? ", avx" : ""), 596 (supports_avx2() ? ", avx2" : ""), 597 (supports_aes() ? ", aes" : ""), 598 (supports_clmul() ? ", clmul" : ""), 599 (supports_erms() ? ", erms" : ""), 600 (supports_rtm() ? ", rtm" : ""), 601 (supports_mmx_ext() ? ", mmxext" : ""), 602 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), 603 (supports_lzcnt() ? ", lzcnt": ""), 604 (supports_sse4a() ? ", sse4a": ""), 605 (supports_ht() ? ", ht": ""), 606 (supports_tsc() ? ", tsc": ""), 607 (supports_tscinv_bit() ? ", tscinvbit": ""), 608 (supports_tscinv() ? ", tscinv": ""), 609 (supports_bmi1() ? ", bmi1" : ""), 610 (supports_bmi2() ? ", bmi2" : ""), 611 (supports_adx() ? ", adx" : ""), 612 (supports_evex() ? ", evex" : ""), 613 (supports_sha() ? ", sha" : "")); 614 _features_string = os::strdup(buf); 615 616 // UseSSE is set to the smaller of what hardware supports and what 617 // the command line requires. I.e., you cannot set UseSSE to 2 on 618 // older Pentiums which do not support it. 619 if (UseSSE > 4) UseSSE=4; 620 if (UseSSE < 0) UseSSE=0; 621 if (!supports_sse4_1()) // Drop to 3 if no SSE4 support 622 UseSSE = MIN2((intx)3,UseSSE); 623 if (!supports_sse3()) // Drop to 2 if no SSE3 support 624 UseSSE = MIN2((intx)2,UseSSE); 625 if (!supports_sse2()) // Drop to 1 if no SSE2 support 626 UseSSE = MIN2((intx)1,UseSSE); 627 if (!supports_sse ()) // Drop to 0 if no SSE support 628 UseSSE = 0; 629 630 // Use AES instructions if available. 631 if (supports_aes()) { 632 if (FLAG_IS_DEFAULT(UseAES)) { 633 FLAG_SET_DEFAULT(UseAES, true); 634 } 635 if (!UseAES) { 636 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 637 warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); 638 } 639 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 640 } else { 641 if (UseSSE > 2) { 642 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 643 FLAG_SET_DEFAULT(UseAESIntrinsics, true); 644 } 645 } else { 646 // The AES intrinsic stubs require AES instruction support (of course) 647 // but also require sse3 mode or higher for instructions it use. 648 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 649 warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled."); 650 } 651 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 652 } 653 654 // --AES-CTR begins-- 655 if (!UseAESIntrinsics) { 656 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 657 warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled."); 658 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 659 } 660 } else { 661 if(supports_sse4_1()) { 662 if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 663 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true); 664 } 665 } else { 666 // The AES-CTR intrinsic stubs require AES instruction support (of course) 667 // but also require sse4.1 mode or higher for instructions it use. 668 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 669 warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled."); 670 } 671 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 672 } 673 } 674 // --AES-CTR ends-- 675 } 676 } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) { 677 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 678 warning("AES instructions are not available on this CPU"); 679 FLAG_SET_DEFAULT(UseAES, false); 680 } 681 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 682 warning("AES intrinsics are not available on this CPU"); 683 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 684 } 685 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 686 warning("AES-CTR intrinsics are not available on this CPU"); 687 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 688 } 689 } 690 691 // Use CLMUL instructions if available. 692 if (supports_clmul()) { 693 if (FLAG_IS_DEFAULT(UseCLMUL)) { 694 UseCLMUL = true; 695 } 696 } else if (UseCLMUL) { 697 if (!FLAG_IS_DEFAULT(UseCLMUL)) 698 warning("CLMUL instructions not available on this CPU (AVX may also be required)"); 699 FLAG_SET_DEFAULT(UseCLMUL, false); 700 } 701 702 if (UseCLMUL && supports_sse4_1()) { 703 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 704 UseCRC32Intrinsics = true; 705 } 706 } else if (UseCRC32Intrinsics) { 707 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 708 warning("CRC32 intrinsics are not available on this CPU"); 709 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 710 } 711 712 if (supports_sse4_2()) { 713 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 714 UseCRC32CIntrinsics = true; 715 } 716 } 717 else if (UseCRC32CIntrinsics) { 718 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 719 warning("CRC32C intrinsics are not available on this CPU"); 720 } 721 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 722 } 723 724 // GHASH/GCM intrinsics 725 if (UseCLMUL && (UseSSE > 2)) { 726 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 727 UseGHASHIntrinsics = true; 728 } 729 } else if (UseGHASHIntrinsics) { 730 if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) 731 warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU"); 732 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 733 } 734 735 if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) { 736 if (FLAG_IS_DEFAULT(UseSHA)) { 737 UseSHA = true; 738 } 739 } else if (UseSHA) { 740 warning("SHA instructions are not available on this CPU"); 741 FLAG_SET_DEFAULT(UseSHA, false); 742 } 743 744 if (supports_sha() && UseSHA) { 745 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { 746 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); 747 } 748 } else if (UseSHA1Intrinsics) { 749 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 750 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 751 } 752 753 if (UseSHA) { 754 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { 755 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); 756 } 757 } else if (UseSHA256Intrinsics) { 758 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 759 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 760 } 761 762 if (UseSHA512Intrinsics) { 763 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 764 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 765 } 766 767 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { 768 FLAG_SET_DEFAULT(UseSHA, false); 769 } 770 771 if (UseAdler32Intrinsics) { 772 warning("Adler32Intrinsics not available on this CPU."); 773 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 774 } 775 776 // Adjust RTM (Restricted Transactional Memory) flags 777 if (!supports_rtm() && UseRTMLocking) { 778 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 779 // setting during arguments processing. See use_biased_locking(). 780 // VM_Version_init() is executed after UseBiasedLocking is used 781 // in Thread::allocate(). 782 vm_exit_during_initialization("RTM instructions are not available on this CPU"); 783 } 784 785 #if INCLUDE_RTM_OPT 786 if (UseRTMLocking) { 787 if (is_intel_family_core()) { 788 if ((_model == CPU_MODEL_HASWELL_E3) || 789 (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) || 790 (_model == CPU_MODEL_BROADWELL && _stepping < 4)) { 791 // currently a collision between SKL and HSW_E3 792 if (!UnlockExperimentalVMOptions && UseAVX < 3) { 793 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); 794 } else { 795 warning("UseRTMLocking is only available as experimental option on this platform."); 796 } 797 } 798 } 799 if (!FLAG_IS_CMDLINE(UseRTMLocking)) { 800 // RTM locking should be used only for applications with 801 // high lock contention. For now we do not use it by default. 802 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); 803 } 804 if (!is_power_of_2(RTMTotalCountIncrRate)) { 805 warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64"); 806 FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64); 807 } 808 if (RTMAbortRatio < 0 || RTMAbortRatio > 100) { 809 warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50"); 810 FLAG_SET_DEFAULT(RTMAbortRatio, 50); 811 } 812 } else { // !UseRTMLocking 813 if (UseRTMForStackLocks) { 814 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { 815 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); 816 } 817 FLAG_SET_DEFAULT(UseRTMForStackLocks, false); 818 } 819 if (UseRTMDeopt) { 820 FLAG_SET_DEFAULT(UseRTMDeopt, false); 821 } 822 if (PrintPreciseRTMLockingStatistics) { 823 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); 824 } 825 } 826 #else 827 if (UseRTMLocking) { 828 // Only C2 does RTM locking optimization. 829 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 830 // setting during arguments processing. See use_biased_locking(). 831 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 832 } 833 #endif 834 835 #ifdef COMPILER2 836 if (UseFPUForSpilling) { 837 if (UseSSE < 2) { 838 // Only supported with SSE2+ 839 FLAG_SET_DEFAULT(UseFPUForSpilling, false); 840 } 841 } 842 #endif 843 #if defined(COMPILER2) || INCLUDE_JVMCI 844 if (MaxVectorSize > 0) { 845 if (!is_power_of_2(MaxVectorSize)) { 846 warning("MaxVectorSize must be a power of 2"); 847 FLAG_SET_DEFAULT(MaxVectorSize, 64); 848 } 849 if (MaxVectorSize > 64) { 850 FLAG_SET_DEFAULT(MaxVectorSize, 64); 851 } 852 if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) { 853 // 32 bytes vectors (in YMM) are only supported with AVX+ 854 FLAG_SET_DEFAULT(MaxVectorSize, 16); 855 } 856 if (UseSSE < 2) { 857 // Vectors (in XMM) are only supported with SSE2+ 858 FLAG_SET_DEFAULT(MaxVectorSize, 0); 859 } 860 #if defined(COMPILER2) && defined(ASSERT) 861 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { 862 tty->print_cr("State of YMM registers after signal handle:"); 863 int nreg = 2 LP64_ONLY(+2); 864 const char* ymm_name[4] = {"0", "7", "8", "15"}; 865 for (int i = 0; i < nreg; i++) { 866 tty->print("YMM%s:", ymm_name[i]); 867 for (int j = 7; j >=0; j--) { 868 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); 869 } 870 tty->cr(); 871 } 872 } 873 #endif // COMPILER2 && ASSERT 874 } 875 #endif // COMPILER2 || INCLUDE_JVMCI 876 877 #ifdef COMPILER2 878 #ifdef _LP64 879 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 880 UseMultiplyToLenIntrinsic = true; 881 } 882 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 883 UseSquareToLenIntrinsic = true; 884 } 885 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 886 UseMulAddIntrinsic = true; 887 } 888 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 889 UseMontgomeryMultiplyIntrinsic = true; 890 } 891 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 892 UseMontgomerySquareIntrinsic = true; 893 } 894 #else 895 if (UseMultiplyToLenIntrinsic) { 896 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 897 warning("multiplyToLen intrinsic is not available in 32-bit VM"); 898 } 899 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); 900 } 901 if (UseMontgomeryMultiplyIntrinsic) { 902 if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 903 warning("montgomeryMultiply intrinsic is not available in 32-bit VM"); 904 } 905 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false); 906 } 907 if (UseMontgomerySquareIntrinsic) { 908 if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 909 warning("montgomerySquare intrinsic is not available in 32-bit VM"); 910 } 911 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false); 912 } 913 if (UseSquareToLenIntrinsic) { 914 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 915 warning("squareToLen intrinsic is not available in 32-bit VM"); 916 } 917 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false); 918 } 919 if (UseMulAddIntrinsic) { 920 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 921 warning("mulAdd intrinsic is not available in 32-bit VM"); 922 } 923 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false); 924 } 925 #endif 926 #endif // COMPILER2 927 928 // On new cpus instructions which update whole XMM register should be used 929 // to prevent partial register stall due to dependencies on high half. 930 // 931 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) 932 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) 933 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). 934 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). 935 936 if( is_amd() ) { // AMD cpus specific settings 937 if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) { 938 // Use it on new AMD cpus starting from Opteron. 939 UseAddressNop = true; 940 } 941 if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) { 942 // Use it on new AMD cpus starting from Opteron. 943 UseNewLongLShift = true; 944 } 945 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 946 if (supports_sse4a()) { 947 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron 948 } else { 949 UseXmmLoadAndClearUpper = false; 950 } 951 } 952 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 953 if( supports_sse4a() ) { 954 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' 955 } else { 956 UseXmmRegToRegMoveAll = false; 957 } 958 } 959 if( FLAG_IS_DEFAULT(UseXmmI2F) ) { 960 if( supports_sse4a() ) { 961 UseXmmI2F = true; 962 } else { 963 UseXmmI2F = false; 964 } 965 } 966 if( FLAG_IS_DEFAULT(UseXmmI2D) ) { 967 if( supports_sse4a() ) { 968 UseXmmI2D = true; 969 } else { 970 UseXmmI2D = false; 971 } 972 } 973 if (supports_sse4_2()) { 974 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 975 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 976 } 977 } else { 978 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 979 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 980 } 981 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 982 } 983 984 // some defaults for AMD family 15h 985 if ( cpu_family() == 0x15 ) { 986 // On family 15h processors default is no sw prefetch 987 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 988 AllocatePrefetchStyle = 0; 989 } 990 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW 991 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 992 AllocatePrefetchInstr = 3; 993 } 994 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy 995 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 996 UseXMMForArrayCopy = true; 997 } 998 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 999 UseUnalignedLoadStores = true; 1000 } 1001 } 1002 1003 #ifdef COMPILER2 1004 if (MaxVectorSize > 16) { 1005 // Limit vectors size to 16 bytes on current AMD cpus. 1006 FLAG_SET_DEFAULT(MaxVectorSize, 16); 1007 } 1008 #endif // COMPILER2 1009 } 1010 1011 if( is_intel() ) { // Intel cpus specific settings 1012 if( FLAG_IS_DEFAULT(UseStoreImmI16) ) { 1013 UseStoreImmI16 = false; // don't use it on Intel cpus 1014 } 1015 if( cpu_family() == 6 || cpu_family() == 15 ) { 1016 if( FLAG_IS_DEFAULT(UseAddressNop) ) { 1017 // Use it on all Intel cpus starting from PentiumPro 1018 UseAddressNop = true; 1019 } 1020 } 1021 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 1022 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus 1023 } 1024 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 1025 if( supports_sse3() ) { 1026 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus 1027 } else { 1028 UseXmmRegToRegMoveAll = false; 1029 } 1030 } 1031 if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus 1032 #ifdef COMPILER2 1033 if( FLAG_IS_DEFAULT(MaxLoopPad) ) { 1034 // For new Intel cpus do the next optimization: 1035 // don't align the beginning of a loop if there are enough instructions 1036 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 1037 // in current fetch line (OptoLoopAlignment) or the padding 1038 // is big (> MaxLoopPad). 1039 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of 1040 // generated NOP instructions. 11 is the largest size of one 1041 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 1042 MaxLoopPad = 11; 1043 } 1044 #endif // COMPILER2 1045 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1046 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus 1047 } 1048 if (supports_sse4_2() && supports_ht()) { // Newest Intel cpus 1049 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1050 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1051 } 1052 } 1053 if (supports_sse4_2()) { 1054 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1055 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1056 } 1057 } else { 1058 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1059 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1060 } 1061 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1062 } 1063 } 1064 if ((cpu_family() == 0x06) && 1065 ((extended_cpu_model() == 0x36) || // Centerton 1066 (extended_cpu_model() == 0x37) || // Silvermont 1067 (extended_cpu_model() == 0x4D))) { 1068 #ifdef COMPILER2 1069 if (FLAG_IS_DEFAULT(OptoScheduling)) { 1070 OptoScheduling = true; 1071 } 1072 #endif 1073 if (supports_sse4_2()) { // Silvermont 1074 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1075 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1076 } 1077 } 1078 } 1079 if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 1080 AllocatePrefetchInstr = 3; 1081 } 1082 } 1083 1084 #ifdef _LP64 1085 if (UseSSE42Intrinsics) { 1086 if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1087 UseVectorizedMismatchIntrinsic = true; 1088 } 1089 } else if (UseVectorizedMismatchIntrinsic) { 1090 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) 1091 warning("vectorizedMismatch intrinsics are not available on this CPU"); 1092 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1093 } 1094 #else 1095 if (UseVectorizedMismatchIntrinsic) { 1096 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1097 warning("vectorizedMismatch intrinsic is not available in 32-bit VM"); 1098 } 1099 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1100 } 1101 #endif // _LP64 1102 1103 // Use count leading zeros count instruction if available. 1104 if (supports_lzcnt()) { 1105 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { 1106 UseCountLeadingZerosInstruction = true; 1107 } 1108 } else if (UseCountLeadingZerosInstruction) { 1109 warning("lzcnt instruction is not available on this CPU"); 1110 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); 1111 } 1112 1113 // Use count trailing zeros instruction if available 1114 if (supports_bmi1()) { 1115 // tzcnt does not require VEX prefix 1116 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { 1117 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1118 // Don't use tzcnt if BMI1 is switched off on command line. 1119 UseCountTrailingZerosInstruction = false; 1120 } else { 1121 UseCountTrailingZerosInstruction = true; 1122 } 1123 } 1124 } else if (UseCountTrailingZerosInstruction) { 1125 warning("tzcnt instruction is not available on this CPU"); 1126 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); 1127 } 1128 1129 // BMI instructions (except tzcnt) use an encoding with VEX prefix. 1130 // VEX prefix is generated only when AVX > 0. 1131 if (supports_bmi1() && supports_avx()) { 1132 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1133 UseBMI1Instructions = true; 1134 } 1135 } else if (UseBMI1Instructions) { 1136 warning("BMI1 instructions are not available on this CPU (AVX is also required)"); 1137 FLAG_SET_DEFAULT(UseBMI1Instructions, false); 1138 } 1139 1140 if (supports_bmi2() && supports_avx()) { 1141 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) { 1142 UseBMI2Instructions = true; 1143 } 1144 } else if (UseBMI2Instructions) { 1145 warning("BMI2 instructions are not available on this CPU (AVX is also required)"); 1146 FLAG_SET_DEFAULT(UseBMI2Instructions, false); 1147 } 1148 1149 // Use population count instruction if available. 1150 if (supports_popcnt()) { 1151 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 1152 UsePopCountInstruction = true; 1153 } 1154 } else if (UsePopCountInstruction) { 1155 warning("POPCNT instruction is not available on this CPU"); 1156 FLAG_SET_DEFAULT(UsePopCountInstruction, false); 1157 } 1158 1159 // Use fast-string operations if available. 1160 if (supports_erms()) { 1161 if (FLAG_IS_DEFAULT(UseFastStosb)) { 1162 UseFastStosb = true; 1163 } 1164 } else if (UseFastStosb) { 1165 warning("fast-string operations are not available on this CPU"); 1166 FLAG_SET_DEFAULT(UseFastStosb, false); 1167 } 1168 1169 #ifdef COMPILER2 1170 if (FLAG_IS_DEFAULT(AlignVector)) { 1171 // Modern processors allow misaligned memory operations for vectors. 1172 AlignVector = !UseUnalignedLoadStores; 1173 } 1174 #endif // COMPILER2 1175 1176 if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0; 1177 if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3; 1178 1179 // Allocation prefetch settings 1180 intx cache_line_size = prefetch_data_size(); 1181 if( cache_line_size > AllocatePrefetchStepSize ) 1182 AllocatePrefetchStepSize = cache_line_size; 1183 1184 AllocatePrefetchDistance = allocate_prefetch_distance(); 1185 AllocatePrefetchStyle = allocate_prefetch_style(); 1186 1187 if (is_intel() && cpu_family() == 6 && supports_sse3()) { 1188 if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core 1189 #ifdef _LP64 1190 AllocatePrefetchDistance = 384; 1191 #else 1192 AllocatePrefetchDistance = 320; 1193 #endif 1194 } 1195 if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus 1196 AllocatePrefetchDistance = 192; 1197 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) { 1198 FLAG_SET_DEFAULT(AllocatePrefetchLines, 4); 1199 } 1200 } 1201 #ifdef COMPILER2 1202 if (supports_sse4_2()) { 1203 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1204 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1205 } 1206 } 1207 #endif 1208 } 1209 1210 #ifdef _LP64 1211 // Prefetch settings 1212 PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); 1213 PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes(); 1214 PrefetchFieldsAhead = prefetch_fields_ahead(); 1215 #endif 1216 1217 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && 1218 (cache_line_size > ContendedPaddingWidth)) 1219 ContendedPaddingWidth = cache_line_size; 1220 1221 // This machine allows unaligned memory accesses 1222 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 1223 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 1224 } 1225 1226 #ifndef PRODUCT 1227 if (log_is_enabled(Info, os, cpu)) { 1228 outputStream* log = Log(os, cpu)::info_stream(); 1229 log->print_cr("Logical CPUs per core: %u", 1230 logical_processors_per_package()); 1231 log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); 1232 log->print("UseSSE=%d", (int) UseSSE); 1233 if (UseAVX > 0) { 1234 log->print(" UseAVX=%d", (int) UseAVX); 1235 } 1236 if (UseAES) { 1237 log->print(" UseAES=1"); 1238 } 1239 #ifdef COMPILER2 1240 if (MaxVectorSize > 0) { 1241 log->print(" MaxVectorSize=%d", (int) MaxVectorSize); 1242 } 1243 #endif 1244 log->cr(); 1245 log->print("Allocation"); 1246 if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { 1247 log->print_cr(": no prefetching"); 1248 } else { 1249 log->print(" prefetching: "); 1250 if (UseSSE == 0 && supports_3dnow_prefetch()) { 1251 log->print("PREFETCHW"); 1252 } else if (UseSSE >= 1) { 1253 if (AllocatePrefetchInstr == 0) { 1254 log->print("PREFETCHNTA"); 1255 } else if (AllocatePrefetchInstr == 1) { 1256 log->print("PREFETCHT0"); 1257 } else if (AllocatePrefetchInstr == 2) { 1258 log->print("PREFETCHT2"); 1259 } else if (AllocatePrefetchInstr == 3) { 1260 log->print("PREFETCHW"); 1261 } 1262 } 1263 if (AllocatePrefetchLines > 1) { 1264 log->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize); 1265 } else { 1266 log->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize); 1267 } 1268 } 1269 1270 if (PrefetchCopyIntervalInBytes > 0) { 1271 log->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes); 1272 } 1273 if (PrefetchScanIntervalInBytes > 0) { 1274 log->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes); 1275 } 1276 if (PrefetchFieldsAhead > 0) { 1277 log->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead); 1278 } 1279 if (ContendedPaddingWidth > 0) { 1280 log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth); 1281 } 1282 } 1283 #endif // !PRODUCT 1284 } 1285 1286 bool VM_Version::use_biased_locking() { 1287 #if INCLUDE_RTM_OPT 1288 // RTM locking is most useful when there is high lock contention and 1289 // low data contention. With high lock contention the lock is usually 1290 // inflated and biased locking is not suitable for that case. 1291 // RTM locking code requires that biased locking is off. 1292 // Note: we can't switch off UseBiasedLocking in get_processor_features() 1293 // because it is used by Thread::allocate() which is called before 1294 // VM_Version::initialize(). 1295 if (UseRTMLocking && UseBiasedLocking) { 1296 if (FLAG_IS_DEFAULT(UseBiasedLocking)) { 1297 FLAG_SET_DEFAULT(UseBiasedLocking, false); 1298 } else { 1299 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); 1300 UseBiasedLocking = false; 1301 } 1302 } 1303 #endif 1304 return UseBiasedLocking; 1305 } 1306 1307 void VM_Version::initialize() { 1308 ResourceMark rm; 1309 // Making this stub must be FIRST use of assembler 1310 1311 stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size); 1312 if (stub_blob == NULL) { 1313 vm_exit_during_initialization("Unable to allocate get_cpu_info_stub"); 1314 } 1315 CodeBuffer c(stub_blob); 1316 VM_Version_StubGenerator g(&c); 1317 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, 1318 g.generate_get_cpu_info()); 1319 1320 get_processor_features(); 1321 }