1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "logging/log.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "runtime/java.hpp" 31 #include "runtime/os.hpp" 32 #include "runtime/stubCodeGenerator.hpp" 33 #include "vm_version_x86.hpp" 34 35 36 int VM_Version::_cpu; 37 int VM_Version::_model; 38 int VM_Version::_stepping; 39 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; 40 41 // Address of instruction which causes SEGV 42 address VM_Version::_cpuinfo_segv_addr = 0; 43 // Address of instruction after the one which causes SEGV 44 address VM_Version::_cpuinfo_cont_addr = 0; 45 46 static BufferBlob* stub_blob; 47 static const int stub_size = 1000; 48 49 extern "C" { 50 typedef void (*get_cpu_info_stub_t)(void*); 51 } 52 static get_cpu_info_stub_t get_cpu_info_stub = NULL; 53 54 55 class VM_Version_StubGenerator: public StubCodeGenerator { 56 public: 57 58 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 59 60 address generate_get_cpu_info() { 61 // Flags to test CPU type. 62 const uint32_t HS_EFL_AC = 0x40000; 63 const uint32_t HS_EFL_ID = 0x200000; 64 // Values for when we don't have a CPUID instruction. 65 const int CPU_FAMILY_SHIFT = 8; 66 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); 67 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); 68 bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2); 69 70 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; 71 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done, wrapup; 72 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check; 73 74 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); 75 # define __ _masm-> 76 77 address start = __ pc(); 78 79 // 80 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); 81 // 82 // LP64: rcx and rdx are first and second argument registers on windows 83 84 __ push(rbp); 85 #ifdef _LP64 86 __ mov(rbp, c_rarg0); // cpuid_info address 87 #else 88 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address 89 #endif 90 __ push(rbx); 91 __ push(rsi); 92 __ pushf(); // preserve rbx, and flags 93 __ pop(rax); 94 __ push(rax); 95 __ mov(rcx, rax); 96 // 97 // if we are unable to change the AC flag, we have a 386 98 // 99 __ xorl(rax, HS_EFL_AC); 100 __ push(rax); 101 __ popf(); 102 __ pushf(); 103 __ pop(rax); 104 __ cmpptr(rax, rcx); 105 __ jccb(Assembler::notEqual, detect_486); 106 107 __ movl(rax, CPU_FAMILY_386); 108 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 109 __ jmp(done); 110 111 // 112 // If we are unable to change the ID flag, we have a 486 which does 113 // not support the "cpuid" instruction. 114 // 115 __ bind(detect_486); 116 __ mov(rax, rcx); 117 __ xorl(rax, HS_EFL_ID); 118 __ push(rax); 119 __ popf(); 120 __ pushf(); 121 __ pop(rax); 122 __ cmpptr(rcx, rax); 123 __ jccb(Assembler::notEqual, detect_586); 124 125 __ bind(cpu486); 126 __ movl(rax, CPU_FAMILY_486); 127 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 128 __ jmp(done); 129 130 // 131 // At this point, we have a chip which supports the "cpuid" instruction 132 // 133 __ bind(detect_586); 134 __ xorl(rax, rax); 135 __ cpuid(); 136 __ orl(rax, rax); 137 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input 138 // value of at least 1, we give up and 139 // assume a 486 140 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 141 __ movl(Address(rsi, 0), rax); 142 __ movl(Address(rsi, 4), rbx); 143 __ movl(Address(rsi, 8), rcx); 144 __ movl(Address(rsi,12), rdx); 145 146 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? 147 __ jccb(Assembler::belowEqual, std_cpuid4); 148 149 // 150 // cpuid(0xB) Processor Topology 151 // 152 __ movl(rax, 0xb); 153 __ xorl(rcx, rcx); // Threads level 154 __ cpuid(); 155 156 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); 157 __ movl(Address(rsi, 0), rax); 158 __ movl(Address(rsi, 4), rbx); 159 __ movl(Address(rsi, 8), rcx); 160 __ movl(Address(rsi,12), rdx); 161 162 __ movl(rax, 0xb); 163 __ movl(rcx, 1); // Cores level 164 __ cpuid(); 165 __ push(rax); 166 __ andl(rax, 0x1f); // Determine if valid topology level 167 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 168 __ andl(rax, 0xffff); 169 __ pop(rax); 170 __ jccb(Assembler::equal, std_cpuid4); 171 172 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); 173 __ movl(Address(rsi, 0), rax); 174 __ movl(Address(rsi, 4), rbx); 175 __ movl(Address(rsi, 8), rcx); 176 __ movl(Address(rsi,12), rdx); 177 178 __ movl(rax, 0xb); 179 __ movl(rcx, 2); // Packages level 180 __ cpuid(); 181 __ push(rax); 182 __ andl(rax, 0x1f); // Determine if valid topology level 183 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 184 __ andl(rax, 0xffff); 185 __ pop(rax); 186 __ jccb(Assembler::equal, std_cpuid4); 187 188 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); 189 __ movl(Address(rsi, 0), rax); 190 __ movl(Address(rsi, 4), rbx); 191 __ movl(Address(rsi, 8), rcx); 192 __ movl(Address(rsi,12), rdx); 193 194 // 195 // cpuid(0x4) Deterministic cache params 196 // 197 __ bind(std_cpuid4); 198 __ movl(rax, 4); 199 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? 200 __ jccb(Assembler::greater, std_cpuid1); 201 202 __ xorl(rcx, rcx); // L1 cache 203 __ cpuid(); 204 __ push(rax); 205 __ andl(rax, 0x1f); // Determine if valid cache parameters used 206 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache 207 __ pop(rax); 208 __ jccb(Assembler::equal, std_cpuid1); 209 210 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); 211 __ movl(Address(rsi, 0), rax); 212 __ movl(Address(rsi, 4), rbx); 213 __ movl(Address(rsi, 8), rcx); 214 __ movl(Address(rsi,12), rdx); 215 216 // 217 // Standard cpuid(0x1) 218 // 219 __ bind(std_cpuid1); 220 __ movl(rax, 1); 221 __ cpuid(); 222 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 223 __ movl(Address(rsi, 0), rax); 224 __ movl(Address(rsi, 4), rbx); 225 __ movl(Address(rsi, 8), rcx); 226 __ movl(Address(rsi,12), rdx); 227 228 // 229 // Check if OS has enabled XGETBV instruction to access XCR0 230 // (OSXSAVE feature flag) and CPU supports AVX 231 // 232 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 233 __ cmpl(rcx, 0x18000000); 234 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported 235 236 // 237 // XCR0, XFEATURE_ENABLED_MASK register 238 // 239 __ xorl(rcx, rcx); // zero for XCR0 register 240 __ xgetbv(); 241 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); 242 __ movl(Address(rsi, 0), rax); 243 __ movl(Address(rsi, 4), rdx); 244 245 // 246 // cpuid(0x7) Structured Extended Features 247 // 248 __ bind(sef_cpuid); 249 __ movl(rax, 7); 250 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported? 251 __ jccb(Assembler::greater, ext_cpuid); 252 253 __ xorl(rcx, rcx); 254 __ cpuid(); 255 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 256 __ movl(Address(rsi, 0), rax); 257 __ movl(Address(rsi, 4), rbx); 258 259 // 260 // Extended cpuid(0x80000000) 261 // 262 __ bind(ext_cpuid); 263 __ movl(rax, 0x80000000); 264 __ cpuid(); 265 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? 266 __ jcc(Assembler::belowEqual, done); 267 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? 268 __ jccb(Assembler::belowEqual, ext_cpuid1); 269 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? 270 __ jccb(Assembler::belowEqual, ext_cpuid5); 271 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? 272 __ jccb(Assembler::belowEqual, ext_cpuid7); 273 // 274 // Extended cpuid(0x80000008) 275 // 276 __ movl(rax, 0x80000008); 277 __ cpuid(); 278 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); 279 __ movl(Address(rsi, 0), rax); 280 __ movl(Address(rsi, 4), rbx); 281 __ movl(Address(rsi, 8), rcx); 282 __ movl(Address(rsi,12), rdx); 283 284 // 285 // Extended cpuid(0x80000007) 286 // 287 __ bind(ext_cpuid7); 288 __ movl(rax, 0x80000007); 289 __ cpuid(); 290 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset()))); 291 __ movl(Address(rsi, 0), rax); 292 __ movl(Address(rsi, 4), rbx); 293 __ movl(Address(rsi, 8), rcx); 294 __ movl(Address(rsi,12), rdx); 295 296 // 297 // Extended cpuid(0x80000005) 298 // 299 __ bind(ext_cpuid5); 300 __ movl(rax, 0x80000005); 301 __ cpuid(); 302 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); 303 __ movl(Address(rsi, 0), rax); 304 __ movl(Address(rsi, 4), rbx); 305 __ movl(Address(rsi, 8), rcx); 306 __ movl(Address(rsi,12), rdx); 307 308 // 309 // Extended cpuid(0x80000001) 310 // 311 __ bind(ext_cpuid1); 312 __ movl(rax, 0x80000001); 313 __ cpuid(); 314 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); 315 __ movl(Address(rsi, 0), rax); 316 __ movl(Address(rsi, 4), rbx); 317 __ movl(Address(rsi, 8), rcx); 318 __ movl(Address(rsi,12), rdx); 319 320 // 321 // Check if OS has enabled XGETBV instruction to access XCR0 322 // (OSXSAVE feature flag) and CPU supports AVX 323 // 324 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 325 __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 326 __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx 327 __ cmpl(rcx, 0x18000000); 328 __ jccb(Assembler::notEqual, done); // jump if AVX is not supported 329 330 __ movl(rax, 0x6); 331 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 332 __ cmpl(rax, 0x6); 333 __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported 334 335 // we need to bridge farther than imm8, so we use this island as a thunk 336 __ bind(done); 337 __ jmp(wrapup); 338 339 __ bind(start_simd_check); 340 // 341 // Some OSs have a bug when upper 128/256bits of YMM/ZMM 342 // registers are not restored after a signal processing. 343 // Generate SEGV here (reference through NULL) 344 // and check upper YMM/ZMM bits after it. 345 // 346 intx saved_useavx = UseAVX; 347 intx saved_usesse = UseSSE; 348 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 349 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 350 __ movl(rax, 0x10000); 351 __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm 352 __ cmpl(rax, 0x10000); 353 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 354 // check _cpuid_info.xem_xcr0_eax.bits.opmask 355 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 356 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 357 __ movl(rax, 0xE0); 358 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 359 __ cmpl(rax, 0xE0); 360 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 361 362 // If UseAVX is unitialized or is set by the user to include EVEX 363 if (use_evex) { 364 // EVEX setup: run in lowest evex mode 365 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 366 UseAVX = 3; 367 UseSSE = 2; 368 #ifdef _WINDOWS 369 // xmm5-xmm15 are not preserved by caller on windows 370 // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx 371 __ subptr(rsp, 64); 372 __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit); 373 #ifdef _LP64 374 __ subptr(rsp, 64); 375 __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit); 376 __ subptr(rsp, 64); 377 __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit); 378 #endif // _LP64 379 #endif // _WINDOWS 380 381 // load value into all 64 bytes of zmm7 register 382 __ movl(rcx, VM_Version::ymm_test_value()); 383 __ movdl(xmm0, rcx); 384 __ movl(rcx, 0xffff); 385 __ kmovwl(k1, rcx); 386 __ evpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit); 387 __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit); 388 #ifdef _LP64 389 __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit); 390 __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit); 391 #endif 392 VM_Version::clean_cpuFeatures(); 393 __ jmp(save_restore_except); 394 } 395 396 __ bind(legacy_setup); 397 // AVX setup 398 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 399 UseAVX = 1; 400 UseSSE = 2; 401 #ifdef _WINDOWS 402 __ subptr(rsp, 32); 403 __ vmovdqu(Address(rsp, 0), xmm7); 404 #ifdef _LP64 405 __ subptr(rsp, 32); 406 __ vmovdqu(Address(rsp, 0), xmm8); 407 __ subptr(rsp, 32); 408 __ vmovdqu(Address(rsp, 0), xmm15); 409 #endif // _LP64 410 #endif // _WINDOWS 411 412 // load value into all 32 bytes of ymm7 register 413 __ movl(rcx, VM_Version::ymm_test_value()); 414 415 __ movdl(xmm0, rcx); 416 __ pshufd(xmm0, xmm0, 0x00); 417 __ vinsertf128_high(xmm0, xmm0); 418 __ vmovdqu(xmm7, xmm0); 419 #ifdef _LP64 420 __ vmovdqu(xmm8, xmm0); 421 __ vmovdqu(xmm15, xmm0); 422 #endif 423 VM_Version::clean_cpuFeatures(); 424 425 __ bind(save_restore_except); 426 __ xorl(rsi, rsi); 427 VM_Version::set_cpuinfo_segv_addr(__ pc()); 428 // Generate SEGV 429 __ movl(rax, Address(rsi, 0)); 430 431 VM_Version::set_cpuinfo_cont_addr(__ pc()); 432 // Returns here after signal. Save xmm0 to check it later. 433 434 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 435 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 436 __ movl(rax, 0x10000); 437 __ andl(rax, Address(rsi, 4)); 438 __ cmpl(rax, 0x10000); 439 __ jcc(Assembler::notEqual, legacy_save_restore); 440 // check _cpuid_info.xem_xcr0_eax.bits.opmask 441 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 442 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 443 __ movl(rax, 0xE0); 444 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 445 __ cmpl(rax, 0xE0); 446 __ jcc(Assembler::notEqual, legacy_save_restore); 447 448 // If UseAVX is unitialized or is set by the user to include EVEX 449 if (use_evex) { 450 // EVEX check: run in lowest evex mode 451 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 452 UseAVX = 3; 453 UseSSE = 2; 454 __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset()))); 455 __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit); 456 __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit); 457 #ifdef _LP64 458 __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit); 459 __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit); 460 #endif 461 462 #ifdef _WINDOWS 463 #ifdef _LP64 464 __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit); 465 __ addptr(rsp, 64); 466 __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit); 467 __ addptr(rsp, 64); 468 #endif // _LP64 469 __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit); 470 __ addptr(rsp, 64); 471 #endif // _WINDOWS 472 generate_vzeroupper(wrapup); 473 VM_Version::clean_cpuFeatures(); 474 UseAVX = saved_useavx; 475 UseSSE = saved_usesse; 476 __ jmp(wrapup); 477 } 478 479 __ bind(legacy_save_restore); 480 // AVX check 481 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 482 UseAVX = 1; 483 UseSSE = 2; 484 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); 485 __ vmovdqu(Address(rsi, 0), xmm0); 486 __ vmovdqu(Address(rsi, 32), xmm7); 487 #ifdef _LP64 488 __ vmovdqu(Address(rsi, 64), xmm8); 489 __ vmovdqu(Address(rsi, 96), xmm15); 490 #endif 491 492 #ifdef _WINDOWS 493 #ifdef _LP64 494 __ vmovdqu(xmm15, Address(rsp, 0)); 495 __ addptr(rsp, 32); 496 __ vmovdqu(xmm8, Address(rsp, 0)); 497 __ addptr(rsp, 32); 498 #endif // _LP64 499 __ vmovdqu(xmm7, Address(rsp, 0)); 500 __ addptr(rsp, 32); 501 #endif // _WINDOWS 502 generate_vzeroupper(wrapup); 503 VM_Version::clean_cpuFeatures(); 504 UseAVX = saved_useavx; 505 UseSSE = saved_usesse; 506 507 __ bind(wrapup); 508 __ popf(); 509 __ pop(rsi); 510 __ pop(rbx); 511 __ pop(rbp); 512 __ ret(0); 513 514 # undef __ 515 516 return start; 517 }; 518 void generate_vzeroupper(Label& L_wrapup) { 519 # define __ _masm-> 520 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 521 __ cmpl(Address(rsi, 4), 0x756e6547); // 'uneG' 522 __ jcc(Assembler::notEqual, L_wrapup); 523 __ movl(rcx, 0x0FFF0FF0); 524 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 525 __ andl(rcx, Address(rsi, 0)); 526 __ cmpl(rcx, 0x00050670); // If it is Xeon Phi 3200/5200/7200 527 __ jcc(Assembler::equal, L_wrapup); 528 __ cmpl(rcx, 0x00080650); // If it is Future Xeon Phi 529 __ jcc(Assembler::equal, L_wrapup); 530 __ vzeroupper(); 531 # undef __ 532 } 533 }; 534 535 void VM_Version::get_processor_features() { 536 537 _cpu = 4; // 486 by default 538 _model = 0; 539 _stepping = 0; 540 _features = 0; 541 _logical_processors_per_package = 1; 542 // i486 internal cache is both I&D and has a 16-byte line size 543 _L1_data_cache_line_size = 16; 544 545 // Get raw processor info 546 547 get_cpu_info_stub(&_cpuid_info); 548 549 assert_is_initialized(); 550 _cpu = extended_cpu_family(); 551 _model = extended_cpu_model(); 552 _stepping = cpu_stepping(); 553 554 if (cpu_family() > 4) { // it supports CPUID 555 _features = feature_flags(); 556 // Logical processors are only available on P4s and above, 557 // and only if hyperthreading is available. 558 _logical_processors_per_package = logical_processor_count(); 559 _L1_data_cache_line_size = L1_line_size(); 560 } 561 562 _supports_cx8 = supports_cmpxchg8(); 563 // xchg and xadd instructions 564 _supports_atomic_getset4 = true; 565 _supports_atomic_getadd4 = true; 566 LP64_ONLY(_supports_atomic_getset8 = true); 567 LP64_ONLY(_supports_atomic_getadd8 = true); 568 569 #ifdef _LP64 570 // OS should support SSE for x64 and hardware should support at least SSE2. 571 if (!VM_Version::supports_sse2()) { 572 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); 573 } 574 // in 64 bit the use of SSE2 is the minimum 575 if (UseSSE < 2) UseSSE = 2; 576 #endif 577 578 #ifdef AMD64 579 // flush_icache_stub have to be generated first. 580 // That is why Icache line size is hard coded in ICache class, 581 // see icache_x86.hpp. It is also the reason why we can't use 582 // clflush instruction in 32-bit VM since it could be running 583 // on CPU which does not support it. 584 // 585 // The only thing we can do is to verify that flushed 586 // ICache::line_size has correct value. 587 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); 588 // clflush_size is size in quadwords (8 bytes). 589 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); 590 #endif 591 592 // If the OS doesn't support SSE, we can't use this feature even if the HW does 593 if (!os::supports_sse()) 594 _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); 595 596 if (UseSSE < 4) { 597 _features &= ~CPU_SSE4_1; 598 _features &= ~CPU_SSE4_2; 599 } 600 601 if (UseSSE < 3) { 602 _features &= ~CPU_SSE3; 603 _features &= ~CPU_SSSE3; 604 _features &= ~CPU_SSE4A; 605 } 606 607 if (UseSSE < 2) 608 _features &= ~CPU_SSE2; 609 610 if (UseSSE < 1) 611 _features &= ~CPU_SSE; 612 613 // first try initial setting and detect what we can support 614 if (UseAVX > 0) { 615 if (UseAVX > 2 && supports_evex()) { 616 UseAVX = 3; 617 } else if (UseAVX > 1 && supports_avx2()) { 618 UseAVX = 2; 619 } else if (UseAVX > 0 && supports_avx()) { 620 UseAVX = 1; 621 } else { 622 UseAVX = 0; 623 } 624 } else if (UseAVX < 0) { 625 UseAVX = 0; 626 } 627 628 if (UseAVX < 3) { 629 _features &= ~CPU_AVX512F; 630 _features &= ~CPU_AVX512DQ; 631 _features &= ~CPU_AVX512CD; 632 _features &= ~CPU_AVX512BW; 633 _features &= ~CPU_AVX512VL; 634 } 635 636 if (UseAVX < 2) 637 _features &= ~CPU_AVX2; 638 639 if (UseAVX < 1) { 640 _features &= ~CPU_AVX; 641 _features &= ~CPU_VZEROUPPER; 642 } 643 644 if (!UseAES && !FLAG_IS_DEFAULT(UseAES)) 645 _features &= ~CPU_AES; 646 647 if (logical_processors_per_package() == 1) { 648 // HT processor could be installed on a system which doesn't support HT. 649 _features &= ~CPU_HT; 650 } 651 652 if( is_intel() ) { // Intel cpus specific settings 653 if ((cpu_family() == 0x06) && 654 ((extended_cpu_model() == 0x57) || // Xeon Phi 3200/5200/7200 655 (extended_cpu_model() == 0x85))) { // Future Xeon Phi 656 _features &= ~CPU_VZEROUPPER; 657 if (FLAG_IS_DEFAULT(UseIncDec)){ 658 FLAG_SET_DEFAULT(UseIncDec, false); 659 } 660 #ifdef COMPILER2 661 if (FLAG_IS_DEFAULT(OptoScheduling)) { 662 OptoScheduling = true; 663 } 664 #endif 665 if (supports_sse4_2()) { // Silvermont 666 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 667 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 668 } 669 } 670 671 } 672 } 673 674 char buf[256]; 675 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 676 cores_per_cpu(), threads_per_core(), 677 cpu_family(), _model, _stepping, 678 (supports_cmov() ? ", cmov" : ""), 679 (supports_cmpxchg8() ? ", cx8" : ""), 680 (supports_fxsr() ? ", fxsr" : ""), 681 (supports_mmx() ? ", mmx" : ""), 682 (supports_sse() ? ", sse" : ""), 683 (supports_sse2() ? ", sse2" : ""), 684 (supports_sse3() ? ", sse3" : ""), 685 (supports_ssse3()? ", ssse3": ""), 686 (supports_sse4_1() ? ", sse4.1" : ""), 687 (supports_sse4_2() ? ", sse4.2" : ""), 688 (supports_popcnt() ? ", popcnt" : ""), 689 (supports_avx() ? ", avx" : ""), 690 (supports_avx2() ? ", avx2" : ""), 691 (supports_aes() ? ", aes" : ""), 692 (supports_clmul() ? ", clmul" : ""), 693 (supports_erms() ? ", erms" : ""), 694 (supports_rtm() ? ", rtm" : ""), 695 (supports_mmx_ext() ? ", mmxext" : ""), 696 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), 697 (supports_lzcnt() ? ", lzcnt": ""), 698 (supports_sse4a() ? ", sse4a": ""), 699 (supports_ht() ? ", ht": ""), 700 (supports_tsc() ? ", tsc": ""), 701 (supports_tscinv_bit() ? ", tscinvbit": ""), 702 (supports_tscinv() ? ", tscinv": ""), 703 (supports_bmi1() ? ", bmi1" : ""), 704 (supports_bmi2() ? ", bmi2" : ""), 705 (supports_adx() ? ", adx" : ""), 706 (supports_evex() ? ", evex" : ""), 707 (supports_sha() ? ", sha" : ""), 708 (supports_fma() ? ", fma" : "")); 709 _features_string = os::strdup(buf); 710 711 // UseSSE is set to the smaller of what hardware supports and what 712 // the command line requires. I.e., you cannot set UseSSE to 2 on 713 // older Pentiums which do not support it. 714 if (UseSSE > 4) UseSSE=4; 715 if (UseSSE < 0) UseSSE=0; 716 if (!supports_sse4_1()) // Drop to 3 if no SSE4 support 717 UseSSE = MIN2((intx)3,UseSSE); 718 if (!supports_sse3()) // Drop to 2 if no SSE3 support 719 UseSSE = MIN2((intx)2,UseSSE); 720 if (!supports_sse2()) // Drop to 1 if no SSE2 support 721 UseSSE = MIN2((intx)1,UseSSE); 722 if (!supports_sse ()) // Drop to 0 if no SSE support 723 UseSSE = 0; 724 725 // Use AES instructions if available. 726 if (supports_aes()) { 727 if (FLAG_IS_DEFAULT(UseAES)) { 728 FLAG_SET_DEFAULT(UseAES, true); 729 } 730 if (!UseAES) { 731 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 732 warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); 733 } 734 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 735 } else { 736 if (UseSSE > 2) { 737 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 738 FLAG_SET_DEFAULT(UseAESIntrinsics, true); 739 } 740 } else { 741 // The AES intrinsic stubs require AES instruction support (of course) 742 // but also require sse3 mode or higher for instructions it use. 743 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 744 warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled."); 745 } 746 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 747 } 748 749 // --AES-CTR begins-- 750 if (!UseAESIntrinsics) { 751 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 752 warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled."); 753 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 754 } 755 } else { 756 if(supports_sse4_1()) { 757 if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 758 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true); 759 } 760 } else { 761 // The AES-CTR intrinsic stubs require AES instruction support (of course) 762 // but also require sse4.1 mode or higher for instructions it use. 763 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 764 warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled."); 765 } 766 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 767 } 768 } 769 // --AES-CTR ends-- 770 } 771 } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) { 772 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 773 warning("AES instructions are not available on this CPU"); 774 FLAG_SET_DEFAULT(UseAES, false); 775 } 776 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 777 warning("AES intrinsics are not available on this CPU"); 778 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 779 } 780 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 781 warning("AES-CTR intrinsics are not available on this CPU"); 782 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 783 } 784 } 785 786 // Use CLMUL instructions if available. 787 if (supports_clmul()) { 788 if (FLAG_IS_DEFAULT(UseCLMUL)) { 789 UseCLMUL = true; 790 } 791 } else if (UseCLMUL) { 792 if (!FLAG_IS_DEFAULT(UseCLMUL)) 793 warning("CLMUL instructions not available on this CPU (AVX may also be required)"); 794 FLAG_SET_DEFAULT(UseCLMUL, false); 795 } 796 797 if (UseCLMUL && (UseSSE > 2)) { 798 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 799 UseCRC32Intrinsics = true; 800 } 801 } else if (UseCRC32Intrinsics) { 802 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 803 warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)"); 804 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 805 } 806 807 if (supports_sse4_2() && supports_clmul()) { 808 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 809 UseCRC32CIntrinsics = true; 810 } 811 } else if (UseCRC32CIntrinsics) { 812 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 813 warning("CRC32C intrinsics are not available on this CPU"); 814 } 815 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 816 } 817 818 // GHASH/GCM intrinsics 819 if (UseCLMUL && (UseSSE > 2)) { 820 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 821 UseGHASHIntrinsics = true; 822 } 823 } else if (UseGHASHIntrinsics) { 824 if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) 825 warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU"); 826 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 827 } 828 829 if (supports_fma() && UseSSE >= 2) { 830 if (FLAG_IS_DEFAULT(UseFMA)) { 831 UseFMA = true; 832 } 833 } else if (UseFMA) { 834 warning("FMA instructions are not available on this CPU"); 835 FLAG_SET_DEFAULT(UseFMA, false); 836 } 837 838 if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) { 839 if (FLAG_IS_DEFAULT(UseSHA)) { 840 UseSHA = true; 841 } 842 } else if (UseSHA) { 843 warning("SHA instructions are not available on this CPU"); 844 FLAG_SET_DEFAULT(UseSHA, false); 845 } 846 847 if (supports_sha() && UseSHA) { 848 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { 849 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); 850 } 851 } else if (UseSHA1Intrinsics) { 852 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 853 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 854 } 855 856 if (UseSHA) { 857 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { 858 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); 859 } 860 } else if (UseSHA256Intrinsics) { 861 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 862 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 863 } 864 865 if (UseSHA) { 866 if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) { 867 FLAG_SET_DEFAULT(UseSHA512Intrinsics, true); 868 } 869 } else if (UseSHA512Intrinsics) { 870 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 871 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 872 } 873 874 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { 875 FLAG_SET_DEFAULT(UseSHA, false); 876 } 877 878 if (UseAdler32Intrinsics) { 879 warning("Adler32Intrinsics not available on this CPU."); 880 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 881 } 882 883 if (!supports_rtm() && UseRTMLocking) { 884 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 885 // setting during arguments processing. See use_biased_locking(). 886 // VM_Version_init() is executed after UseBiasedLocking is used 887 // in Thread::allocate(). 888 vm_exit_during_initialization("RTM instructions are not available on this CPU"); 889 } 890 891 #if INCLUDE_RTM_OPT 892 if (UseRTMLocking) { 893 if (is_client_compilation_mode_vm()) { 894 // Only C2 does RTM locking optimization. 895 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 896 // setting during arguments processing. See use_biased_locking(). 897 vm_exit_during_initialization("RTM locking optimization is not supported in emulated client VM"); 898 } 899 if (is_intel_family_core()) { 900 if ((_model == CPU_MODEL_HASWELL_E3) || 901 (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) || 902 (_model == CPU_MODEL_BROADWELL && _stepping < 4)) { 903 // currently a collision between SKL and HSW_E3 904 if (!UnlockExperimentalVMOptions && UseAVX < 3) { 905 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); 906 } else { 907 warning("UseRTMLocking is only available as experimental option on this platform."); 908 } 909 } 910 } 911 if (!FLAG_IS_CMDLINE(UseRTMLocking)) { 912 // RTM locking should be used only for applications with 913 // high lock contention. For now we do not use it by default. 914 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); 915 } 916 if (!is_power_of_2(RTMTotalCountIncrRate)) { 917 warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64"); 918 FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64); 919 } 920 if (RTMAbortRatio < 0 || RTMAbortRatio > 100) { 921 warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50"); 922 FLAG_SET_DEFAULT(RTMAbortRatio, 50); 923 } 924 } else { // !UseRTMLocking 925 if (UseRTMForStackLocks) { 926 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { 927 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); 928 } 929 FLAG_SET_DEFAULT(UseRTMForStackLocks, false); 930 } 931 if (UseRTMDeopt) { 932 FLAG_SET_DEFAULT(UseRTMDeopt, false); 933 } 934 if (PrintPreciseRTMLockingStatistics) { 935 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); 936 } 937 } 938 #else 939 if (UseRTMLocking) { 940 // Only C2 does RTM locking optimization. 941 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 942 // setting during arguments processing. See use_biased_locking(). 943 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 944 } 945 #endif 946 947 #ifdef COMPILER2 948 if (UseFPUForSpilling) { 949 if (UseSSE < 2) { 950 // Only supported with SSE2+ 951 FLAG_SET_DEFAULT(UseFPUForSpilling, false); 952 } 953 } 954 #endif 955 #if defined(COMPILER2) || INCLUDE_JVMCI 956 if (MaxVectorSize > 0) { 957 if (!is_power_of_2(MaxVectorSize)) { 958 warning("MaxVectorSize must be a power of 2"); 959 FLAG_SET_DEFAULT(MaxVectorSize, 64); 960 } 961 if (UseSSE < 2) { 962 // Vectors (in XMM) are only supported with SSE2+ 963 if (MaxVectorSize > 0) { 964 if (!FLAG_IS_DEFAULT(MaxVectorSize)) 965 warning("MaxVectorSize must be 0"); 966 FLAG_SET_DEFAULT(MaxVectorSize, 0); 967 } 968 } 969 else if (UseAVX == 0 || !os_supports_avx_vectors()) { 970 // 32 bytes vectors (in YMM) are only supported with AVX+ 971 if (MaxVectorSize > 16) { 972 if (!FLAG_IS_DEFAULT(MaxVectorSize)) 973 warning("MaxVectorSize must be <= 16"); 974 FLAG_SET_DEFAULT(MaxVectorSize, 16); 975 } 976 } 977 else if (UseAVX == 1 || UseAVX == 2) { 978 // 64 bytes vectors (in ZMM) are only supported with AVX 3 979 if (MaxVectorSize > 32) { 980 if (!FLAG_IS_DEFAULT(MaxVectorSize)) 981 warning("MaxVectorSize must be <= 32"); 982 FLAG_SET_DEFAULT(MaxVectorSize, 32); 983 } 984 } 985 else if (UseAVX > 2 ) { 986 if (MaxVectorSize > 64) { 987 if (!FLAG_IS_DEFAULT(MaxVectorSize)) 988 warning("MaxVectorSize must be <= 64"); 989 FLAG_SET_DEFAULT(MaxVectorSize, 64); 990 } 991 } 992 #if defined(COMPILER2) && defined(ASSERT) 993 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { 994 tty->print_cr("State of YMM registers after signal handle:"); 995 int nreg = 2 LP64_ONLY(+2); 996 const char* ymm_name[4] = {"0", "7", "8", "15"}; 997 for (int i = 0; i < nreg; i++) { 998 tty->print("YMM%s:", ymm_name[i]); 999 for (int j = 7; j >=0; j--) { 1000 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); 1001 } 1002 tty->cr(); 1003 } 1004 } 1005 #endif // COMPILER2 && ASSERT 1006 } 1007 #endif // COMPILER2 || INCLUDE_JVMCI 1008 1009 #ifdef COMPILER2 1010 #ifdef _LP64 1011 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 1012 UseMultiplyToLenIntrinsic = true; 1013 } 1014 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 1015 UseSquareToLenIntrinsic = true; 1016 } 1017 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 1018 UseMulAddIntrinsic = true; 1019 } 1020 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 1021 UseMontgomeryMultiplyIntrinsic = true; 1022 } 1023 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 1024 UseMontgomerySquareIntrinsic = true; 1025 } 1026 #else 1027 if (UseMultiplyToLenIntrinsic) { 1028 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 1029 warning("multiplyToLen intrinsic is not available in 32-bit VM"); 1030 } 1031 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); 1032 } 1033 if (UseMontgomeryMultiplyIntrinsic) { 1034 if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 1035 warning("montgomeryMultiply intrinsic is not available in 32-bit VM"); 1036 } 1037 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false); 1038 } 1039 if (UseMontgomerySquareIntrinsic) { 1040 if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 1041 warning("montgomerySquare intrinsic is not available in 32-bit VM"); 1042 } 1043 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false); 1044 } 1045 if (UseSquareToLenIntrinsic) { 1046 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 1047 warning("squareToLen intrinsic is not available in 32-bit VM"); 1048 } 1049 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false); 1050 } 1051 if (UseMulAddIntrinsic) { 1052 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 1053 warning("mulAdd intrinsic is not available in 32-bit VM"); 1054 } 1055 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false); 1056 } 1057 #endif 1058 #endif // COMPILER2 1059 1060 // On new cpus instructions which update whole XMM register should be used 1061 // to prevent partial register stall due to dependencies on high half. 1062 // 1063 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) 1064 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) 1065 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). 1066 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). 1067 1068 if( is_amd() ) { // AMD cpus specific settings 1069 if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) { 1070 // Use it on new AMD cpus starting from Opteron. 1071 UseAddressNop = true; 1072 } 1073 if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) { 1074 // Use it on new AMD cpus starting from Opteron. 1075 UseNewLongLShift = true; 1076 } 1077 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 1078 if (supports_sse4a()) { 1079 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron 1080 } else { 1081 UseXmmLoadAndClearUpper = false; 1082 } 1083 } 1084 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 1085 if( supports_sse4a() ) { 1086 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' 1087 } else { 1088 UseXmmRegToRegMoveAll = false; 1089 } 1090 } 1091 if( FLAG_IS_DEFAULT(UseXmmI2F) ) { 1092 if( supports_sse4a() ) { 1093 UseXmmI2F = true; 1094 } else { 1095 UseXmmI2F = false; 1096 } 1097 } 1098 if( FLAG_IS_DEFAULT(UseXmmI2D) ) { 1099 if( supports_sse4a() ) { 1100 UseXmmI2D = true; 1101 } else { 1102 UseXmmI2D = false; 1103 } 1104 } 1105 if (supports_sse4_2()) { 1106 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1107 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1108 } 1109 } else { 1110 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1111 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1112 } 1113 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1114 } 1115 1116 // some defaults for AMD family 15h 1117 if ( cpu_family() == 0x15 ) { 1118 // On family 15h processors default is no sw prefetch 1119 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 1120 AllocatePrefetchStyle = 0; 1121 } 1122 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW 1123 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 1124 AllocatePrefetchInstr = 3; 1125 } 1126 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy 1127 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1128 UseXMMForArrayCopy = true; 1129 } 1130 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1131 UseUnalignedLoadStores = true; 1132 } 1133 } 1134 1135 #ifdef COMPILER2 1136 if (MaxVectorSize > 16) { 1137 // Limit vectors size to 16 bytes on current AMD cpus. 1138 FLAG_SET_DEFAULT(MaxVectorSize, 16); 1139 } 1140 #endif // COMPILER2 1141 } 1142 1143 if( is_intel() ) { // Intel cpus specific settings 1144 if( FLAG_IS_DEFAULT(UseStoreImmI16) ) { 1145 UseStoreImmI16 = false; // don't use it on Intel cpus 1146 } 1147 if( cpu_family() == 6 || cpu_family() == 15 ) { 1148 if( FLAG_IS_DEFAULT(UseAddressNop) ) { 1149 // Use it on all Intel cpus starting from PentiumPro 1150 UseAddressNop = true; 1151 } 1152 } 1153 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 1154 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus 1155 } 1156 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 1157 if( supports_sse3() ) { 1158 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus 1159 } else { 1160 UseXmmRegToRegMoveAll = false; 1161 } 1162 } 1163 if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus 1164 #ifdef COMPILER2 1165 if( FLAG_IS_DEFAULT(MaxLoopPad) ) { 1166 // For new Intel cpus do the next optimization: 1167 // don't align the beginning of a loop if there are enough instructions 1168 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 1169 // in current fetch line (OptoLoopAlignment) or the padding 1170 // is big (> MaxLoopPad). 1171 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of 1172 // generated NOP instructions. 11 is the largest size of one 1173 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 1174 MaxLoopPad = 11; 1175 } 1176 #endif // COMPILER2 1177 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1178 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus 1179 } 1180 if (supports_sse4_2() && supports_ht()) { // Newest Intel cpus 1181 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1182 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1183 } 1184 } 1185 if (supports_sse4_2()) { 1186 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1187 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1188 } 1189 } else { 1190 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1191 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1192 } 1193 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1194 } 1195 } 1196 if ((cpu_family() == 0x06) && 1197 ((extended_cpu_model() == 0x36) || // Centerton 1198 (extended_cpu_model() == 0x37) || // Silvermont 1199 (extended_cpu_model() == 0x4D))) { 1200 #ifdef COMPILER2 1201 if (FLAG_IS_DEFAULT(OptoScheduling)) { 1202 OptoScheduling = true; 1203 } 1204 #endif 1205 if (supports_sse4_2()) { // Silvermont 1206 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1207 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1208 } 1209 } 1210 if (FLAG_IS_DEFAULT(UseIncDec)){ 1211 FLAG_SET_DEFAULT(UseIncDec, false); 1212 } 1213 } 1214 if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 1215 AllocatePrefetchInstr = 3; 1216 } 1217 } 1218 1219 #ifdef _LP64 1220 if (UseSSE42Intrinsics) { 1221 if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1222 UseVectorizedMismatchIntrinsic = true; 1223 } 1224 } else if (UseVectorizedMismatchIntrinsic) { 1225 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) 1226 warning("vectorizedMismatch intrinsics are not available on this CPU"); 1227 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1228 } 1229 #else 1230 if (UseVectorizedMismatchIntrinsic) { 1231 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1232 warning("vectorizedMismatch intrinsic is not available in 32-bit VM"); 1233 } 1234 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1235 } 1236 #endif // _LP64 1237 1238 // Use count leading zeros count instruction if available. 1239 if (supports_lzcnt()) { 1240 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { 1241 UseCountLeadingZerosInstruction = true; 1242 } 1243 } else if (UseCountLeadingZerosInstruction) { 1244 warning("lzcnt instruction is not available on this CPU"); 1245 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); 1246 } 1247 1248 // Use count trailing zeros instruction if available 1249 if (supports_bmi1()) { 1250 // tzcnt does not require VEX prefix 1251 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { 1252 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1253 // Don't use tzcnt if BMI1 is switched off on command line. 1254 UseCountTrailingZerosInstruction = false; 1255 } else { 1256 UseCountTrailingZerosInstruction = true; 1257 } 1258 } 1259 } else if (UseCountTrailingZerosInstruction) { 1260 warning("tzcnt instruction is not available on this CPU"); 1261 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); 1262 } 1263 1264 // BMI instructions (except tzcnt) use an encoding with VEX prefix. 1265 // VEX prefix is generated only when AVX > 0. 1266 if (supports_bmi1() && supports_avx()) { 1267 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1268 UseBMI1Instructions = true; 1269 } 1270 } else if (UseBMI1Instructions) { 1271 warning("BMI1 instructions are not available on this CPU (AVX is also required)"); 1272 FLAG_SET_DEFAULT(UseBMI1Instructions, false); 1273 } 1274 1275 if (supports_bmi2() && supports_avx()) { 1276 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) { 1277 UseBMI2Instructions = true; 1278 } 1279 } else if (UseBMI2Instructions) { 1280 warning("BMI2 instructions are not available on this CPU (AVX is also required)"); 1281 FLAG_SET_DEFAULT(UseBMI2Instructions, false); 1282 } 1283 1284 // Use population count instruction if available. 1285 if (supports_popcnt()) { 1286 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 1287 UsePopCountInstruction = true; 1288 } 1289 } else if (UsePopCountInstruction) { 1290 warning("POPCNT instruction is not available on this CPU"); 1291 FLAG_SET_DEFAULT(UsePopCountInstruction, false); 1292 } 1293 1294 // Use fast-string operations if available. 1295 if (supports_erms()) { 1296 if (FLAG_IS_DEFAULT(UseFastStosb)) { 1297 UseFastStosb = true; 1298 } 1299 } else if (UseFastStosb) { 1300 warning("fast-string operations are not available on this CPU"); 1301 FLAG_SET_DEFAULT(UseFastStosb, false); 1302 } 1303 1304 #ifdef COMPILER2 1305 if (FLAG_IS_DEFAULT(AlignVector)) { 1306 // Modern processors allow misaligned memory operations for vectors. 1307 AlignVector = !UseUnalignedLoadStores; 1308 } 1309 #endif // COMPILER2 1310 1311 if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0; 1312 if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3; 1313 1314 // Allocation prefetch settings 1315 intx cache_line_size = prefetch_data_size(); 1316 if( cache_line_size > AllocatePrefetchStepSize ) 1317 AllocatePrefetchStepSize = cache_line_size; 1318 1319 AllocatePrefetchDistance = allocate_prefetch_distance(); 1320 AllocatePrefetchStyle = allocate_prefetch_style(); 1321 1322 if (is_intel() && cpu_family() == 6 && supports_sse3()) { 1323 if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core 1324 #ifdef _LP64 1325 AllocatePrefetchDistance = 384; 1326 #else 1327 AllocatePrefetchDistance = 320; 1328 #endif 1329 } 1330 if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus 1331 AllocatePrefetchDistance = 192; 1332 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) { 1333 FLAG_SET_DEFAULT(AllocatePrefetchLines, 4); 1334 } 1335 } 1336 #ifdef COMPILER2 1337 if (supports_sse4_2()) { 1338 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1339 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1340 } 1341 } 1342 #endif 1343 } 1344 1345 #ifdef _LP64 1346 // Prefetch settings 1347 PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); 1348 PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes(); 1349 PrefetchFieldsAhead = prefetch_fields_ahead(); 1350 #endif 1351 1352 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && 1353 (cache_line_size > ContendedPaddingWidth)) 1354 ContendedPaddingWidth = cache_line_size; 1355 1356 // This machine allows unaligned memory accesses 1357 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 1358 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 1359 } 1360 1361 #ifndef PRODUCT 1362 if (log_is_enabled(Info, os, cpu)) { 1363 outputStream* log = Log(os, cpu)::info_stream(); 1364 log->print_cr("Logical CPUs per core: %u", 1365 logical_processors_per_package()); 1366 log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); 1367 log->print("UseSSE=%d", (int) UseSSE); 1368 if (UseAVX > 0) { 1369 log->print(" UseAVX=%d", (int) UseAVX); 1370 } 1371 if (UseAES) { 1372 log->print(" UseAES=1"); 1373 } 1374 #ifdef COMPILER2 1375 if (MaxVectorSize > 0) { 1376 log->print(" MaxVectorSize=%d", (int) MaxVectorSize); 1377 } 1378 #endif 1379 log->cr(); 1380 log->print("Allocation"); 1381 if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { 1382 log->print_cr(": no prefetching"); 1383 } else { 1384 log->print(" prefetching: "); 1385 if (UseSSE == 0 && supports_3dnow_prefetch()) { 1386 log->print("PREFETCHW"); 1387 } else if (UseSSE >= 1) { 1388 if (AllocatePrefetchInstr == 0) { 1389 log->print("PREFETCHNTA"); 1390 } else if (AllocatePrefetchInstr == 1) { 1391 log->print("PREFETCHT0"); 1392 } else if (AllocatePrefetchInstr == 2) { 1393 log->print("PREFETCHT2"); 1394 } else if (AllocatePrefetchInstr == 3) { 1395 log->print("PREFETCHW"); 1396 } 1397 } 1398 if (AllocatePrefetchLines > 1) { 1399 log->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize); 1400 } else { 1401 log->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize); 1402 } 1403 } 1404 1405 if (PrefetchCopyIntervalInBytes > 0) { 1406 log->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes); 1407 } 1408 if (PrefetchScanIntervalInBytes > 0) { 1409 log->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes); 1410 } 1411 if (PrefetchFieldsAhead > 0) { 1412 log->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead); 1413 } 1414 if (ContendedPaddingWidth > 0) { 1415 log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth); 1416 } 1417 } 1418 #endif // !PRODUCT 1419 } 1420 1421 bool VM_Version::use_biased_locking() { 1422 #if INCLUDE_RTM_OPT 1423 // RTM locking is most useful when there is high lock contention and 1424 // low data contention. With high lock contention the lock is usually 1425 // inflated and biased locking is not suitable for that case. 1426 // RTM locking code requires that biased locking is off. 1427 // Note: we can't switch off UseBiasedLocking in get_processor_features() 1428 // because it is used by Thread::allocate() which is called before 1429 // VM_Version::initialize(). 1430 if (UseRTMLocking && UseBiasedLocking) { 1431 if (FLAG_IS_DEFAULT(UseBiasedLocking)) { 1432 FLAG_SET_DEFAULT(UseBiasedLocking, false); 1433 } else { 1434 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); 1435 UseBiasedLocking = false; 1436 } 1437 } 1438 #endif 1439 return UseBiasedLocking; 1440 } 1441 1442 void VM_Version::initialize() { 1443 ResourceMark rm; 1444 // Making this stub must be FIRST use of assembler 1445 1446 stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size); 1447 if (stub_blob == NULL) { 1448 vm_exit_during_initialization("Unable to allocate get_cpu_info_stub"); 1449 } 1450 CodeBuffer c(stub_blob); 1451 VM_Version_StubGenerator g(&c); 1452 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, 1453 g.generate_get_cpu_info()); 1454 1455 get_processor_features(); 1456 }