1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "logging/log.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "runtime/java.hpp" 31 #include "runtime/os.hpp" 32 #include "runtime/stubCodeGenerator.hpp" 33 #include "vm_version_x86.hpp" 34 35 36 int VM_Version::_cpu; 37 int VM_Version::_model; 38 int VM_Version::_stepping; 39 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; 40 41 // Address of instruction which causes SEGV 42 address VM_Version::_cpuinfo_segv_addr = 0; 43 // Address of instruction after the one which causes SEGV 44 address VM_Version::_cpuinfo_cont_addr = 0; 45 46 static BufferBlob* stub_blob; 47 static const int stub_size = 1000; 48 49 extern "C" { 50 typedef void (*get_cpu_info_stub_t)(void*); 51 } 52 static get_cpu_info_stub_t get_cpu_info_stub = NULL; 53 54 55 class VM_Version_StubGenerator: public StubCodeGenerator { 56 public: 57 58 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 59 60 address generate_get_cpu_info() { 61 // Flags to test CPU type. 62 const uint32_t HS_EFL_AC = 0x40000; 63 const uint32_t HS_EFL_ID = 0x200000; 64 // Values for when we don't have a CPUID instruction. 65 const int CPU_FAMILY_SHIFT = 8; 66 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); 67 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); 68 69 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; 70 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done, wrapup; 71 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check; 72 73 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); 74 # define __ _masm-> 75 76 address start = __ pc(); 77 78 // 79 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); 80 // 81 // LP64: rcx and rdx are first and second argument registers on windows 82 83 __ push(rbp); 84 #ifdef _LP64 85 __ mov(rbp, c_rarg0); // cpuid_info address 86 #else 87 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address 88 #endif 89 __ push(rbx); 90 __ push(rsi); 91 __ pushf(); // preserve rbx, and flags 92 __ pop(rax); 93 __ push(rax); 94 __ mov(rcx, rax); 95 // 96 // if we are unable to change the AC flag, we have a 386 97 // 98 __ xorl(rax, HS_EFL_AC); 99 __ push(rax); 100 __ popf(); 101 __ pushf(); 102 __ pop(rax); 103 __ cmpptr(rax, rcx); 104 __ jccb(Assembler::notEqual, detect_486); 105 106 __ movl(rax, CPU_FAMILY_386); 107 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 108 __ jmp(done); 109 110 // 111 // If we are unable to change the ID flag, we have a 486 which does 112 // not support the "cpuid" instruction. 113 // 114 __ bind(detect_486); 115 __ mov(rax, rcx); 116 __ xorl(rax, HS_EFL_ID); 117 __ push(rax); 118 __ popf(); 119 __ pushf(); 120 __ pop(rax); 121 __ cmpptr(rcx, rax); 122 __ jccb(Assembler::notEqual, detect_586); 123 124 __ bind(cpu486); 125 __ movl(rax, CPU_FAMILY_486); 126 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 127 __ jmp(done); 128 129 // 130 // At this point, we have a chip which supports the "cpuid" instruction 131 // 132 __ bind(detect_586); 133 __ xorl(rax, rax); 134 __ cpuid(); 135 __ orl(rax, rax); 136 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input 137 // value of at least 1, we give up and 138 // assume a 486 139 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 140 __ movl(Address(rsi, 0), rax); 141 __ movl(Address(rsi, 4), rbx); 142 __ movl(Address(rsi, 8), rcx); 143 __ movl(Address(rsi,12), rdx); 144 145 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? 146 __ jccb(Assembler::belowEqual, std_cpuid4); 147 148 // 149 // cpuid(0xB) Processor Topology 150 // 151 __ movl(rax, 0xb); 152 __ xorl(rcx, rcx); // Threads level 153 __ cpuid(); 154 155 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); 156 __ movl(Address(rsi, 0), rax); 157 __ movl(Address(rsi, 4), rbx); 158 __ movl(Address(rsi, 8), rcx); 159 __ movl(Address(rsi,12), rdx); 160 161 __ movl(rax, 0xb); 162 __ movl(rcx, 1); // Cores level 163 __ cpuid(); 164 __ push(rax); 165 __ andl(rax, 0x1f); // Determine if valid topology level 166 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 167 __ andl(rax, 0xffff); 168 __ pop(rax); 169 __ jccb(Assembler::equal, std_cpuid4); 170 171 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); 172 __ movl(Address(rsi, 0), rax); 173 __ movl(Address(rsi, 4), rbx); 174 __ movl(Address(rsi, 8), rcx); 175 __ movl(Address(rsi,12), rdx); 176 177 __ movl(rax, 0xb); 178 __ movl(rcx, 2); // Packages level 179 __ cpuid(); 180 __ push(rax); 181 __ andl(rax, 0x1f); // Determine if valid topology level 182 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 183 __ andl(rax, 0xffff); 184 __ pop(rax); 185 __ jccb(Assembler::equal, std_cpuid4); 186 187 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); 188 __ movl(Address(rsi, 0), rax); 189 __ movl(Address(rsi, 4), rbx); 190 __ movl(Address(rsi, 8), rcx); 191 __ movl(Address(rsi,12), rdx); 192 193 // 194 // cpuid(0x4) Deterministic cache params 195 // 196 __ bind(std_cpuid4); 197 __ movl(rax, 4); 198 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? 199 __ jccb(Assembler::greater, std_cpuid1); 200 201 __ xorl(rcx, rcx); // L1 cache 202 __ cpuid(); 203 __ push(rax); 204 __ andl(rax, 0x1f); // Determine if valid cache parameters used 205 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache 206 __ pop(rax); 207 __ jccb(Assembler::equal, std_cpuid1); 208 209 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); 210 __ movl(Address(rsi, 0), rax); 211 __ movl(Address(rsi, 4), rbx); 212 __ movl(Address(rsi, 8), rcx); 213 __ movl(Address(rsi,12), rdx); 214 215 // 216 // Standard cpuid(0x1) 217 // 218 __ bind(std_cpuid1); 219 __ movl(rax, 1); 220 __ cpuid(); 221 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 222 __ movl(Address(rsi, 0), rax); 223 __ movl(Address(rsi, 4), rbx); 224 __ movl(Address(rsi, 8), rcx); 225 __ movl(Address(rsi,12), rdx); 226 227 // 228 // Check if OS has enabled XGETBV instruction to access XCR0 229 // (OSXSAVE feature flag) and CPU supports AVX 230 // 231 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 232 __ cmpl(rcx, 0x18000000); 233 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported 234 235 // 236 // XCR0, XFEATURE_ENABLED_MASK register 237 // 238 __ xorl(rcx, rcx); // zero for XCR0 register 239 __ xgetbv(); 240 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); 241 __ movl(Address(rsi, 0), rax); 242 __ movl(Address(rsi, 4), rdx); 243 244 // 245 // cpuid(0x7) Structured Extended Features 246 // 247 __ bind(sef_cpuid); 248 __ movl(rax, 7); 249 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported? 250 __ jccb(Assembler::greater, ext_cpuid); 251 252 __ xorl(rcx, rcx); 253 __ cpuid(); 254 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 255 __ movl(Address(rsi, 0), rax); 256 __ movl(Address(rsi, 4), rbx); 257 258 // 259 // Extended cpuid(0x80000000) 260 // 261 __ bind(ext_cpuid); 262 __ movl(rax, 0x80000000); 263 __ cpuid(); 264 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? 265 __ jcc(Assembler::belowEqual, done); 266 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? 267 __ jccb(Assembler::belowEqual, ext_cpuid1); 268 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? 269 __ jccb(Assembler::belowEqual, ext_cpuid5); 270 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? 271 __ jccb(Assembler::belowEqual, ext_cpuid7); 272 // 273 // Extended cpuid(0x80000008) 274 // 275 __ movl(rax, 0x80000008); 276 __ cpuid(); 277 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); 278 __ movl(Address(rsi, 0), rax); 279 __ movl(Address(rsi, 4), rbx); 280 __ movl(Address(rsi, 8), rcx); 281 __ movl(Address(rsi,12), rdx); 282 283 // 284 // Extended cpuid(0x80000007) 285 // 286 __ bind(ext_cpuid7); 287 __ movl(rax, 0x80000007); 288 __ cpuid(); 289 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset()))); 290 __ movl(Address(rsi, 0), rax); 291 __ movl(Address(rsi, 4), rbx); 292 __ movl(Address(rsi, 8), rcx); 293 __ movl(Address(rsi,12), rdx); 294 295 // 296 // Extended cpuid(0x80000005) 297 // 298 __ bind(ext_cpuid5); 299 __ movl(rax, 0x80000005); 300 __ cpuid(); 301 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); 302 __ movl(Address(rsi, 0), rax); 303 __ movl(Address(rsi, 4), rbx); 304 __ movl(Address(rsi, 8), rcx); 305 __ movl(Address(rsi,12), rdx); 306 307 // 308 // Extended cpuid(0x80000001) 309 // 310 __ bind(ext_cpuid1); 311 __ movl(rax, 0x80000001); 312 __ cpuid(); 313 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); 314 __ movl(Address(rsi, 0), rax); 315 __ movl(Address(rsi, 4), rbx); 316 __ movl(Address(rsi, 8), rcx); 317 __ movl(Address(rsi,12), rdx); 318 319 // 320 // Check if OS has enabled XGETBV instruction to access XCR0 321 // (OSXSAVE feature flag) and CPU supports AVX 322 // 323 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 324 __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 325 __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx 326 __ cmpl(rcx, 0x18000000); 327 __ jccb(Assembler::notEqual, done); // jump if AVX is not supported 328 329 __ movl(rax, 0x6); 330 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 331 __ cmpl(rax, 0x6); 332 __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported 333 334 // we need to bridge farther than imm8, so we use this island as a thunk 335 __ bind(done); 336 __ jmp(wrapup); 337 338 __ bind(start_simd_check); 339 // 340 // Some OSs have a bug when upper 128/256bits of YMM/ZMM 341 // registers are not restored after a signal processing. 342 // Generate SEGV here (reference through NULL) 343 // and check upper YMM/ZMM bits after it. 344 // 345 intx saved_useavx = UseAVX; 346 intx saved_usesse = UseSSE; 347 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 348 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 349 __ movl(rax, 0x10000); 350 __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm 351 __ cmpl(rax, 0x10000); 352 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 353 // check _cpuid_info.xem_xcr0_eax.bits.opmask 354 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 355 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 356 __ movl(rax, 0xE0); 357 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 358 __ cmpl(rax, 0xE0); 359 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 360 361 // If UseAVX is unitialized or is set by the user to include EVEX 362 if ((saved_useavx == 99) || (saved_useavx > 2)) { 363 // EVEX setup: run in lowest evex mode 364 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 365 UseAVX = 3; 366 UseSSE = 2; 367 #ifdef _WINDOWS 368 // xmm5-xmm15 are not preserved by caller on windows 369 // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx 370 __ subptr(rsp, 64); 371 __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit); 372 #ifdef _LP64 373 __ subptr(rsp, 64); 374 __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit); 375 __ subptr(rsp, 64); 376 __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit); 377 #endif // _LP64 378 #endif // _WINDOWS 379 380 // load value into all 64 bytes of zmm7 register 381 __ movl(rcx, VM_Version::ymm_test_value()); 382 __ movdl(xmm0, rcx); 383 __ movl(rcx, 0xffff); 384 __ kmovwl(k1, rcx); 385 __ evpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit); 386 __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit); 387 #ifdef _LP64 388 __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit); 389 __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit); 390 #endif 391 VM_Version::clean_cpuFeatures(); 392 __ jmp(save_restore_except); 393 } 394 395 __ bind(legacy_setup); 396 // AVX setup 397 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 398 UseAVX = 1; 399 UseSSE = 2; 400 #ifdef _WINDOWS 401 __ subptr(rsp, 32); 402 __ vmovdqu(Address(rsp, 0), xmm7); 403 #ifdef _LP64 404 __ subptr(rsp, 32); 405 __ vmovdqu(Address(rsp, 0), xmm8); 406 __ subptr(rsp, 32); 407 __ vmovdqu(Address(rsp, 0), xmm15); 408 #endif // _LP64 409 #endif // _WINDOWS 410 411 // load value into all 32 bytes of ymm7 register 412 __ movl(rcx, VM_Version::ymm_test_value()); 413 414 __ movdl(xmm0, rcx); 415 __ pshufd(xmm0, xmm0, 0x00); 416 __ vinsertf128_high(xmm0, xmm0); 417 __ vmovdqu(xmm7, xmm0); 418 #ifdef _LP64 419 __ vmovdqu(xmm8, xmm0); 420 __ vmovdqu(xmm15, xmm0); 421 #endif 422 VM_Version::clean_cpuFeatures(); 423 424 __ bind(save_restore_except); 425 __ xorl(rsi, rsi); 426 VM_Version::set_cpuinfo_segv_addr(__ pc()); 427 // Generate SEGV 428 __ movl(rax, Address(rsi, 0)); 429 430 VM_Version::set_cpuinfo_cont_addr(__ pc()); 431 // Returns here after signal. Save xmm0 to check it later. 432 433 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 434 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 435 __ movl(rax, 0x10000); 436 __ andl(rax, Address(rsi, 4)); 437 __ cmpl(rax, 0x10000); 438 __ jccb(Assembler::notEqual, legacy_save_restore); 439 // check _cpuid_info.xem_xcr0_eax.bits.opmask 440 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 441 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 442 __ movl(rax, 0xE0); 443 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 444 __ cmpl(rax, 0xE0); 445 __ jccb(Assembler::notEqual, legacy_save_restore); 446 447 // If UseAVX is unitialized or is set by the user to include EVEX 448 if ((saved_useavx == 99) || (saved_useavx > 2)) { 449 // EVEX check: run in lowest evex mode 450 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 451 UseAVX = 3; 452 UseSSE = 2; 453 __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset()))); 454 __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit); 455 __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit); 456 #ifdef _LP64 457 __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit); 458 __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit); 459 #endif 460 461 #ifdef _WINDOWS 462 #ifdef _LP64 463 __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit); 464 __ addptr(rsp, 64); 465 __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit); 466 __ addptr(rsp, 64); 467 #endif // _LP64 468 __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit); 469 __ addptr(rsp, 64); 470 #endif // _WINDOWS 471 VM_Version::clean_cpuFeatures(); 472 UseAVX = saved_useavx; 473 UseSSE = saved_usesse; 474 __ jmp(wrapup); 475 } 476 477 __ bind(legacy_save_restore); 478 // AVX check 479 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 480 UseAVX = 1; 481 UseSSE = 2; 482 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); 483 __ vmovdqu(Address(rsi, 0), xmm0); 484 __ vmovdqu(Address(rsi, 32), xmm7); 485 #ifdef _LP64 486 __ vmovdqu(Address(rsi, 64), xmm8); 487 __ vmovdqu(Address(rsi, 96), xmm15); 488 #endif 489 490 #ifdef _WINDOWS 491 #ifdef _LP64 492 __ vmovdqu(xmm15, Address(rsp, 0)); 493 __ addptr(rsp, 32); 494 __ vmovdqu(xmm8, Address(rsp, 0)); 495 __ addptr(rsp, 32); 496 #endif // _LP64 497 __ vmovdqu(xmm7, Address(rsp, 0)); 498 __ addptr(rsp, 32); 499 #endif // _WINDOWS 500 VM_Version::clean_cpuFeatures(); 501 UseAVX = saved_useavx; 502 UseSSE = saved_usesse; 503 504 __ bind(wrapup); 505 __ popf(); 506 __ pop(rsi); 507 __ pop(rbx); 508 __ pop(rbp); 509 __ ret(0); 510 511 # undef __ 512 513 return start; 514 }; 515 }; 516 517 void VM_Version::get_processor_features() { 518 519 _cpu = 4; // 486 by default 520 _model = 0; 521 _stepping = 0; 522 _features = 0; 523 _logical_processors_per_package = 1; 524 // i486 internal cache is both I&D and has a 16-byte line size 525 _L1_data_cache_line_size = 16; 526 527 // Get raw processor info 528 529 get_cpu_info_stub(&_cpuid_info); 530 531 assert_is_initialized(); 532 _cpu = extended_cpu_family(); 533 _model = extended_cpu_model(); 534 _stepping = cpu_stepping(); 535 536 if (cpu_family() > 4) { // it supports CPUID 537 _features = feature_flags(); 538 // Logical processors are only available on P4s and above, 539 // and only if hyperthreading is available. 540 _logical_processors_per_package = logical_processor_count(); 541 _L1_data_cache_line_size = L1_line_size(); 542 } 543 544 _supports_cx8 = supports_cmpxchg8(); 545 // xchg and xadd instructions 546 _supports_atomic_getset4 = true; 547 _supports_atomic_getadd4 = true; 548 LP64_ONLY(_supports_atomic_getset8 = true); 549 LP64_ONLY(_supports_atomic_getadd8 = true); 550 551 #ifdef _LP64 552 // OS should support SSE for x64 and hardware should support at least SSE2. 553 if (!VM_Version::supports_sse2()) { 554 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); 555 } 556 // in 64 bit the use of SSE2 is the minimum 557 if (UseSSE < 2) UseSSE = 2; 558 #endif 559 560 #ifdef AMD64 561 // flush_icache_stub have to be generated first. 562 // That is why Icache line size is hard coded in ICache class, 563 // see icache_x86.hpp. It is also the reason why we can't use 564 // clflush instruction in 32-bit VM since it could be running 565 // on CPU which does not support it. 566 // 567 // The only thing we can do is to verify that flushed 568 // ICache::line_size has correct value. 569 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); 570 // clflush_size is size in quadwords (8 bytes). 571 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); 572 #endif 573 574 // If the OS doesn't support SSE, we can't use this feature even if the HW does 575 if (!os::supports_sse()) 576 _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); 577 578 if (UseSSE < 4) { 579 _features &= ~CPU_SSE4_1; 580 _features &= ~CPU_SSE4_2; 581 } 582 583 if (UseSSE < 3) { 584 _features &= ~CPU_SSE3; 585 _features &= ~CPU_SSSE3; 586 _features &= ~CPU_SSE4A; 587 } 588 589 if (UseSSE < 2) 590 _features &= ~CPU_SSE2; 591 592 if (UseSSE < 1) 593 _features &= ~CPU_SSE; 594 595 // first try initial setting and detect what we can support 596 if (UseAVX > 0) { 597 if (UseAVX > 2 && supports_evex()) { 598 UseAVX = 3; 599 } else if (UseAVX > 1 && supports_avx2()) { 600 UseAVX = 2; 601 } else if (UseAVX > 0 && supports_avx()) { 602 UseAVX = 1; 603 } else { 604 UseAVX = 0; 605 } 606 } else if (UseAVX < 0) { 607 UseAVX = 0; 608 } 609 610 if (UseAVX < 3) { 611 _features &= ~CPU_AVX512F; 612 _features &= ~CPU_AVX512DQ; 613 _features &= ~CPU_AVX512CD; 614 _features &= ~CPU_AVX512BW; 615 _features &= ~CPU_AVX512VL; 616 } 617 618 if (UseAVX < 2) 619 _features &= ~CPU_AVX2; 620 621 if (UseAVX < 1) 622 _features &= ~CPU_AVX; 623 624 if (!UseAES && !FLAG_IS_DEFAULT(UseAES)) 625 _features &= ~CPU_AES; 626 627 if (logical_processors_per_package() == 1) { 628 // HT processor could be installed on a system which doesn't support HT. 629 _features &= ~CPU_HT; 630 } 631 632 char buf[256]; 633 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 634 cores_per_cpu(), threads_per_core(), 635 cpu_family(), _model, _stepping, 636 (supports_cmov() ? ", cmov" : ""), 637 (supports_cmpxchg8() ? ", cx8" : ""), 638 (supports_fxsr() ? ", fxsr" : ""), 639 (supports_mmx() ? ", mmx" : ""), 640 (supports_sse() ? ", sse" : ""), 641 (supports_sse2() ? ", sse2" : ""), 642 (supports_sse3() ? ", sse3" : ""), 643 (supports_ssse3()? ", ssse3": ""), 644 (supports_sse4_1() ? ", sse4.1" : ""), 645 (supports_sse4_2() ? ", sse4.2" : ""), 646 (supports_popcnt() ? ", popcnt" : ""), 647 (supports_avx() ? ", avx" : ""), 648 (supports_avx2() ? ", avx2" : ""), 649 (supports_aes() ? ", aes" : ""), 650 (supports_clmul() ? ", clmul" : ""), 651 (supports_erms() ? ", erms" : ""), 652 (supports_rtm() ? ", rtm" : ""), 653 (supports_mmx_ext() ? ", mmxext" : ""), 654 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), 655 (supports_lzcnt() ? ", lzcnt": ""), 656 (supports_sse4a() ? ", sse4a": ""), 657 (supports_ht() ? ", ht": ""), 658 (supports_tsc() ? ", tsc": ""), 659 (supports_tscinv_bit() ? ", tscinvbit": ""), 660 (supports_tscinv() ? ", tscinv": ""), 661 (supports_bmi1() ? ", bmi1" : ""), 662 (supports_bmi2() ? ", bmi2" : ""), 663 (supports_adx() ? ", adx" : ""), 664 (supports_evex() ? ", evex" : ""), 665 (supports_sha() ? ", sha" : ""), 666 (supports_fma() ? ", fma" : "")); 667 _features_string = os::strdup(buf); 668 669 // UseSSE is set to the smaller of what hardware supports and what 670 // the command line requires. I.e., you cannot set UseSSE to 2 on 671 // older Pentiums which do not support it. 672 if (UseSSE > 4) UseSSE=4; 673 if (UseSSE < 0) UseSSE=0; 674 if (!supports_sse4_1()) // Drop to 3 if no SSE4 support 675 UseSSE = MIN2((intx)3,UseSSE); 676 if (!supports_sse3()) // Drop to 2 if no SSE3 support 677 UseSSE = MIN2((intx)2,UseSSE); 678 if (!supports_sse2()) // Drop to 1 if no SSE2 support 679 UseSSE = MIN2((intx)1,UseSSE); 680 if (!supports_sse ()) // Drop to 0 if no SSE support 681 UseSSE = 0; 682 683 // Use AES instructions if available. 684 if (supports_aes()) { 685 if (FLAG_IS_DEFAULT(UseAES)) { 686 FLAG_SET_DEFAULT(UseAES, true); 687 } 688 if (!UseAES) { 689 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 690 warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); 691 } 692 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 693 } else { 694 if (UseSSE > 2) { 695 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 696 FLAG_SET_DEFAULT(UseAESIntrinsics, true); 697 } 698 } else { 699 // The AES intrinsic stubs require AES instruction support (of course) 700 // but also require sse3 mode or higher for instructions it use. 701 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 702 warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled."); 703 } 704 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 705 } 706 707 // --AES-CTR begins-- 708 if (!UseAESIntrinsics) { 709 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 710 warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled."); 711 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 712 } 713 } else { 714 if(supports_sse4_1()) { 715 if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 716 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true); 717 } 718 } else { 719 // The AES-CTR intrinsic stubs require AES instruction support (of course) 720 // but also require sse4.1 mode or higher for instructions it use. 721 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 722 warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled."); 723 } 724 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 725 } 726 } 727 // --AES-CTR ends-- 728 } 729 } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) { 730 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 731 warning("AES instructions are not available on this CPU"); 732 FLAG_SET_DEFAULT(UseAES, false); 733 } 734 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 735 warning("AES intrinsics are not available on this CPU"); 736 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 737 } 738 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 739 warning("AES-CTR intrinsics are not available on this CPU"); 740 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 741 } 742 } 743 744 // Use CLMUL instructions if available. 745 if (supports_clmul()) { 746 if (FLAG_IS_DEFAULT(UseCLMUL)) { 747 UseCLMUL = true; 748 } 749 } else if (UseCLMUL) { 750 if (!FLAG_IS_DEFAULT(UseCLMUL)) 751 warning("CLMUL instructions not available on this CPU (AVX may also be required)"); 752 FLAG_SET_DEFAULT(UseCLMUL, false); 753 } 754 755 if (UseCLMUL && (UseSSE > 2)) { 756 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 757 UseCRC32Intrinsics = true; 758 } 759 } else if (UseCRC32Intrinsics) { 760 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 761 warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)"); 762 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 763 } 764 765 if (supports_sse4_2()) { 766 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 767 UseCRC32CIntrinsics = true; 768 } 769 } 770 else if (UseCRC32CIntrinsics) { 771 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 772 warning("CRC32C intrinsics are not available on this CPU"); 773 } 774 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 775 } 776 777 // GHASH/GCM intrinsics 778 if (UseCLMUL && (UseSSE > 2)) { 779 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 780 UseGHASHIntrinsics = true; 781 } 782 } else if (UseGHASHIntrinsics) { 783 if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) 784 warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU"); 785 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 786 } 787 788 if (supports_fma() && UseSSE >= 2) { 789 if (FLAG_IS_DEFAULT(UseFMA)) { 790 UseFMA = true; 791 } 792 } else if (UseFMA) { 793 warning("FMA instructions are not available on this CPU"); 794 FLAG_SET_DEFAULT(UseFMA, false); 795 } 796 797 if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) { 798 if (FLAG_IS_DEFAULT(UseSHA)) { 799 UseSHA = true; 800 } 801 } else if (UseSHA) { 802 warning("SHA instructions are not available on this CPU"); 803 FLAG_SET_DEFAULT(UseSHA, false); 804 } 805 806 if (supports_sha() && UseSHA) { 807 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { 808 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); 809 } 810 } else if (UseSHA1Intrinsics) { 811 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 812 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 813 } 814 815 if (UseSHA) { 816 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { 817 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); 818 } 819 } else if (UseSHA256Intrinsics) { 820 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 821 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 822 } 823 824 if (UseSHA) { 825 if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) { 826 FLAG_SET_DEFAULT(UseSHA512Intrinsics, true); 827 } 828 } else if (UseSHA512Intrinsics) { 829 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 830 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 831 } 832 833 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { 834 FLAG_SET_DEFAULT(UseSHA, false); 835 } 836 837 if (UseAdler32Intrinsics) { 838 warning("Adler32Intrinsics not available on this CPU."); 839 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 840 } 841 842 if (!supports_rtm() && UseRTMLocking) { 843 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 844 // setting during arguments processing. See use_biased_locking(). 845 // VM_Version_init() is executed after UseBiasedLocking is used 846 // in Thread::allocate(). 847 vm_exit_during_initialization("RTM instructions are not available on this CPU"); 848 } 849 850 #if INCLUDE_RTM_OPT 851 if (UseRTMLocking) { 852 if (is_intel_family_core()) { 853 if ((_model == CPU_MODEL_HASWELL_E3) || 854 (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) || 855 (_model == CPU_MODEL_BROADWELL && _stepping < 4)) { 856 // currently a collision between SKL and HSW_E3 857 if (!UnlockExperimentalVMOptions && UseAVX < 3) { 858 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); 859 } else { 860 warning("UseRTMLocking is only available as experimental option on this platform."); 861 } 862 } 863 } 864 if (!FLAG_IS_CMDLINE(UseRTMLocking)) { 865 // RTM locking should be used only for applications with 866 // high lock contention. For now we do not use it by default. 867 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); 868 } 869 if (!is_power_of_2(RTMTotalCountIncrRate)) { 870 warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64"); 871 FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64); 872 } 873 if (RTMAbortRatio < 0 || RTMAbortRatio > 100) { 874 warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50"); 875 FLAG_SET_DEFAULT(RTMAbortRatio, 50); 876 } 877 } else { // !UseRTMLocking 878 if (UseRTMForStackLocks) { 879 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { 880 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); 881 } 882 FLAG_SET_DEFAULT(UseRTMForStackLocks, false); 883 } 884 if (UseRTMDeopt) { 885 FLAG_SET_DEFAULT(UseRTMDeopt, false); 886 } 887 if (PrintPreciseRTMLockingStatistics) { 888 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); 889 } 890 } 891 #else 892 if (UseRTMLocking) { 893 // Only C2 does RTM locking optimization. 894 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 895 // setting during arguments processing. See use_biased_locking(). 896 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 897 } 898 #endif 899 900 #ifdef COMPILER2 901 if (UseFPUForSpilling) { 902 if (UseSSE < 2) { 903 // Only supported with SSE2+ 904 FLAG_SET_DEFAULT(UseFPUForSpilling, false); 905 } 906 } 907 #endif 908 #if defined(COMPILER2) || INCLUDE_JVMCI 909 if (MaxVectorSize > 0) { 910 if (!is_power_of_2(MaxVectorSize)) { 911 warning("MaxVectorSize must be a power of 2"); 912 FLAG_SET_DEFAULT(MaxVectorSize, 64); 913 } 914 if (MaxVectorSize > 64) { 915 FLAG_SET_DEFAULT(MaxVectorSize, 64); 916 } 917 if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) { 918 // 32 bytes vectors (in YMM) are only supported with AVX+ 919 FLAG_SET_DEFAULT(MaxVectorSize, 16); 920 } 921 if (UseSSE < 2) { 922 // Vectors (in XMM) are only supported with SSE2+ 923 FLAG_SET_DEFAULT(MaxVectorSize, 0); 924 } 925 #if defined(COMPILER2) && defined(ASSERT) 926 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { 927 tty->print_cr("State of YMM registers after signal handle:"); 928 int nreg = 2 LP64_ONLY(+2); 929 const char* ymm_name[4] = {"0", "7", "8", "15"}; 930 for (int i = 0; i < nreg; i++) { 931 tty->print("YMM%s:", ymm_name[i]); 932 for (int j = 7; j >=0; j--) { 933 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); 934 } 935 tty->cr(); 936 } 937 } 938 #endif // COMPILER2 && ASSERT 939 } 940 #endif // COMPILER2 || INCLUDE_JVMCI 941 942 #ifdef COMPILER2 943 #ifdef _LP64 944 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 945 UseMultiplyToLenIntrinsic = true; 946 } 947 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 948 UseSquareToLenIntrinsic = true; 949 } 950 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 951 UseMulAddIntrinsic = true; 952 } 953 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 954 UseMontgomeryMultiplyIntrinsic = true; 955 } 956 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 957 UseMontgomerySquareIntrinsic = true; 958 } 959 #else 960 if (UseMultiplyToLenIntrinsic) { 961 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 962 warning("multiplyToLen intrinsic is not available in 32-bit VM"); 963 } 964 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); 965 } 966 if (UseMontgomeryMultiplyIntrinsic) { 967 if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 968 warning("montgomeryMultiply intrinsic is not available in 32-bit VM"); 969 } 970 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false); 971 } 972 if (UseMontgomerySquareIntrinsic) { 973 if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 974 warning("montgomerySquare intrinsic is not available in 32-bit VM"); 975 } 976 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false); 977 } 978 if (UseSquareToLenIntrinsic) { 979 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 980 warning("squareToLen intrinsic is not available in 32-bit VM"); 981 } 982 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false); 983 } 984 if (UseMulAddIntrinsic) { 985 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 986 warning("mulAdd intrinsic is not available in 32-bit VM"); 987 } 988 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false); 989 } 990 #endif 991 #endif // COMPILER2 992 993 // On new cpus instructions which update whole XMM register should be used 994 // to prevent partial register stall due to dependencies on high half. 995 // 996 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) 997 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) 998 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). 999 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). 1000 1001 if( is_amd() ) { // AMD cpus specific settings 1002 if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) { 1003 // Use it on new AMD cpus starting from Opteron. 1004 UseAddressNop = true; 1005 } 1006 if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) { 1007 // Use it on new AMD cpus starting from Opteron. 1008 UseNewLongLShift = true; 1009 } 1010 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 1011 if (supports_sse4a()) { 1012 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron 1013 } else { 1014 UseXmmLoadAndClearUpper = false; 1015 } 1016 } 1017 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 1018 if( supports_sse4a() ) { 1019 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' 1020 } else { 1021 UseXmmRegToRegMoveAll = false; 1022 } 1023 } 1024 if( FLAG_IS_DEFAULT(UseXmmI2F) ) { 1025 if( supports_sse4a() ) { 1026 UseXmmI2F = true; 1027 } else { 1028 UseXmmI2F = false; 1029 } 1030 } 1031 if( FLAG_IS_DEFAULT(UseXmmI2D) ) { 1032 if( supports_sse4a() ) { 1033 UseXmmI2D = true; 1034 } else { 1035 UseXmmI2D = false; 1036 } 1037 } 1038 if (supports_sse4_2()) { 1039 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1040 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1041 } 1042 } else { 1043 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1044 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1045 } 1046 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1047 } 1048 1049 // some defaults for AMD family 15h 1050 if ( cpu_family() == 0x15 ) { 1051 // On family 15h processors default is no sw prefetch 1052 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 1053 AllocatePrefetchStyle = 0; 1054 } 1055 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW 1056 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 1057 AllocatePrefetchInstr = 3; 1058 } 1059 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy 1060 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1061 UseXMMForArrayCopy = true; 1062 } 1063 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1064 UseUnalignedLoadStores = true; 1065 } 1066 } 1067 1068 #ifdef COMPILER2 1069 if (MaxVectorSize > 16) { 1070 // Limit vectors size to 16 bytes on current AMD cpus. 1071 FLAG_SET_DEFAULT(MaxVectorSize, 16); 1072 } 1073 #endif // COMPILER2 1074 } 1075 1076 if( is_intel() ) { // Intel cpus specific settings 1077 if( FLAG_IS_DEFAULT(UseStoreImmI16) ) { 1078 UseStoreImmI16 = false; // don't use it on Intel cpus 1079 } 1080 if( cpu_family() == 6 || cpu_family() == 15 ) { 1081 if( FLAG_IS_DEFAULT(UseAddressNop) ) { 1082 // Use it on all Intel cpus starting from PentiumPro 1083 UseAddressNop = true; 1084 } 1085 } 1086 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 1087 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus 1088 } 1089 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 1090 if( supports_sse3() ) { 1091 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus 1092 } else { 1093 UseXmmRegToRegMoveAll = false; 1094 } 1095 } 1096 if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus 1097 #ifdef COMPILER2 1098 if( FLAG_IS_DEFAULT(MaxLoopPad) ) { 1099 // For new Intel cpus do the next optimization: 1100 // don't align the beginning of a loop if there are enough instructions 1101 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 1102 // in current fetch line (OptoLoopAlignment) or the padding 1103 // is big (> MaxLoopPad). 1104 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of 1105 // generated NOP instructions. 11 is the largest size of one 1106 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 1107 MaxLoopPad = 11; 1108 } 1109 #endif // COMPILER2 1110 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1111 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus 1112 } 1113 if (supports_sse4_2() && supports_ht()) { // Newest Intel cpus 1114 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1115 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1116 } 1117 } 1118 if (supports_sse4_2()) { 1119 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1120 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1121 } 1122 } else { 1123 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1124 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1125 } 1126 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1127 } 1128 } 1129 if ((cpu_family() == 0x06) && 1130 ((extended_cpu_model() == 0x36) || // Centerton 1131 (extended_cpu_model() == 0x37) || // Silvermont 1132 (extended_cpu_model() == 0x4D))) { 1133 #ifdef COMPILER2 1134 if (FLAG_IS_DEFAULT(OptoScheduling)) { 1135 OptoScheduling = true; 1136 } 1137 #endif 1138 if (supports_sse4_2()) { // Silvermont 1139 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1140 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1141 } 1142 } 1143 } 1144 if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 1145 AllocatePrefetchInstr = 3; 1146 } 1147 } 1148 1149 #ifdef _LP64 1150 if (UseSSE42Intrinsics) { 1151 if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1152 UseVectorizedMismatchIntrinsic = true; 1153 } 1154 } else if (UseVectorizedMismatchIntrinsic) { 1155 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) 1156 warning("vectorizedMismatch intrinsics are not available on this CPU"); 1157 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1158 } 1159 #else 1160 if (UseVectorizedMismatchIntrinsic) { 1161 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1162 warning("vectorizedMismatch intrinsic is not available in 32-bit VM"); 1163 } 1164 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1165 } 1166 #endif // _LP64 1167 1168 // Use count leading zeros count instruction if available. 1169 if (supports_lzcnt()) { 1170 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { 1171 UseCountLeadingZerosInstruction = true; 1172 } 1173 } else if (UseCountLeadingZerosInstruction) { 1174 warning("lzcnt instruction is not available on this CPU"); 1175 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); 1176 } 1177 1178 // Use count trailing zeros instruction if available 1179 if (supports_bmi1()) { 1180 // tzcnt does not require VEX prefix 1181 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { 1182 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1183 // Don't use tzcnt if BMI1 is switched off on command line. 1184 UseCountTrailingZerosInstruction = false; 1185 } else { 1186 UseCountTrailingZerosInstruction = true; 1187 } 1188 } 1189 } else if (UseCountTrailingZerosInstruction) { 1190 warning("tzcnt instruction is not available on this CPU"); 1191 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); 1192 } 1193 1194 // BMI instructions (except tzcnt) use an encoding with VEX prefix. 1195 // VEX prefix is generated only when AVX > 0. 1196 if (supports_bmi1() && supports_avx()) { 1197 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1198 UseBMI1Instructions = true; 1199 } 1200 } else if (UseBMI1Instructions) { 1201 warning("BMI1 instructions are not available on this CPU (AVX is also required)"); 1202 FLAG_SET_DEFAULT(UseBMI1Instructions, false); 1203 } 1204 1205 if (supports_bmi2() && supports_avx()) { 1206 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) { 1207 UseBMI2Instructions = true; 1208 } 1209 } else if (UseBMI2Instructions) { 1210 warning("BMI2 instructions are not available on this CPU (AVX is also required)"); 1211 FLAG_SET_DEFAULT(UseBMI2Instructions, false); 1212 } 1213 1214 // Use population count instruction if available. 1215 if (supports_popcnt()) { 1216 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 1217 UsePopCountInstruction = true; 1218 } 1219 } else if (UsePopCountInstruction) { 1220 warning("POPCNT instruction is not available on this CPU"); 1221 FLAG_SET_DEFAULT(UsePopCountInstruction, false); 1222 } 1223 1224 // Use fast-string operations if available. 1225 if (supports_erms()) { 1226 if (FLAG_IS_DEFAULT(UseFastStosb)) { 1227 UseFastStosb = true; 1228 } 1229 } else if (UseFastStosb) { 1230 warning("fast-string operations are not available on this CPU"); 1231 FLAG_SET_DEFAULT(UseFastStosb, false); 1232 } 1233 1234 #ifdef COMPILER2 1235 if (FLAG_IS_DEFAULT(AlignVector)) { 1236 // Modern processors allow misaligned memory operations for vectors. 1237 AlignVector = !UseUnalignedLoadStores; 1238 } 1239 #endif // COMPILER2 1240 1241 if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0; 1242 if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3; 1243 1244 // Allocation prefetch settings 1245 intx cache_line_size = prefetch_data_size(); 1246 if( cache_line_size > AllocatePrefetchStepSize ) 1247 AllocatePrefetchStepSize = cache_line_size; 1248 1249 AllocatePrefetchDistance = allocate_prefetch_distance(); 1250 AllocatePrefetchStyle = allocate_prefetch_style(); 1251 1252 if (is_intel() && cpu_family() == 6 && supports_sse3()) { 1253 if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core 1254 #ifdef _LP64 1255 AllocatePrefetchDistance = 384; 1256 #else 1257 AllocatePrefetchDistance = 320; 1258 #endif 1259 } 1260 if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus 1261 AllocatePrefetchDistance = 192; 1262 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) { 1263 FLAG_SET_DEFAULT(AllocatePrefetchLines, 4); 1264 } 1265 } 1266 #ifdef COMPILER2 1267 if (supports_sse4_2()) { 1268 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1269 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1270 } 1271 } 1272 #endif 1273 } 1274 1275 #ifdef _LP64 1276 // Prefetch settings 1277 PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); 1278 PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes(); 1279 PrefetchFieldsAhead = prefetch_fields_ahead(); 1280 #endif 1281 1282 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && 1283 (cache_line_size > ContendedPaddingWidth)) 1284 ContendedPaddingWidth = cache_line_size; 1285 1286 // This machine allows unaligned memory accesses 1287 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 1288 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 1289 } 1290 1291 #ifndef PRODUCT 1292 if (log_is_enabled(Info, os, cpu)) { 1293 outputStream* log = Log(os, cpu)::info_stream(); 1294 log->print_cr("Logical CPUs per core: %u", 1295 logical_processors_per_package()); 1296 log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); 1297 log->print("UseSSE=%d", (int) UseSSE); 1298 if (UseAVX > 0) { 1299 log->print(" UseAVX=%d", (int) UseAVX); 1300 } 1301 if (UseAES) { 1302 log->print(" UseAES=1"); 1303 } 1304 #ifdef COMPILER2 1305 if (MaxVectorSize > 0) { 1306 log->print(" MaxVectorSize=%d", (int) MaxVectorSize); 1307 } 1308 #endif 1309 log->cr(); 1310 log->print("Allocation"); 1311 if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { 1312 log->print_cr(": no prefetching"); 1313 } else { 1314 log->print(" prefetching: "); 1315 if (UseSSE == 0 && supports_3dnow_prefetch()) { 1316 log->print("PREFETCHW"); 1317 } else if (UseSSE >= 1) { 1318 if (AllocatePrefetchInstr == 0) { 1319 log->print("PREFETCHNTA"); 1320 } else if (AllocatePrefetchInstr == 1) { 1321 log->print("PREFETCHT0"); 1322 } else if (AllocatePrefetchInstr == 2) { 1323 log->print("PREFETCHT2"); 1324 } else if (AllocatePrefetchInstr == 3) { 1325 log->print("PREFETCHW"); 1326 } 1327 } 1328 if (AllocatePrefetchLines > 1) { 1329 log->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize); 1330 } else { 1331 log->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize); 1332 } 1333 } 1334 1335 if (PrefetchCopyIntervalInBytes > 0) { 1336 log->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes); 1337 } 1338 if (PrefetchScanIntervalInBytes > 0) { 1339 log->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes); 1340 } 1341 if (PrefetchFieldsAhead > 0) { 1342 log->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead); 1343 } 1344 if (ContendedPaddingWidth > 0) { 1345 log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth); 1346 } 1347 } 1348 #endif // !PRODUCT 1349 } 1350 1351 bool VM_Version::use_biased_locking() { 1352 #if INCLUDE_RTM_OPT 1353 // RTM locking is most useful when there is high lock contention and 1354 // low data contention. With high lock contention the lock is usually 1355 // inflated and biased locking is not suitable for that case. 1356 // RTM locking code requires that biased locking is off. 1357 // Note: we can't switch off UseBiasedLocking in get_processor_features() 1358 // because it is used by Thread::allocate() which is called before 1359 // VM_Version::initialize(). 1360 if (UseRTMLocking && UseBiasedLocking) { 1361 if (FLAG_IS_DEFAULT(UseBiasedLocking)) { 1362 FLAG_SET_DEFAULT(UseBiasedLocking, false); 1363 } else { 1364 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); 1365 UseBiasedLocking = false; 1366 } 1367 } 1368 #endif 1369 return UseBiasedLocking; 1370 } 1371 1372 void VM_Version::initialize() { 1373 ResourceMark rm; 1374 // Making this stub must be FIRST use of assembler 1375 1376 stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size); 1377 if (stub_blob == NULL) { 1378 vm_exit_during_initialization("Unable to allocate get_cpu_info_stub"); 1379 } 1380 CodeBuffer c(stub_blob); 1381 VM_Version_StubGenerator g(&c); 1382 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, 1383 g.generate_get_cpu_info()); 1384 1385 get_processor_features(); 1386 }