1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "runtime/java.hpp" 30 #include "runtime/os.hpp" 31 #include "runtime/stubCodeGenerator.hpp" 32 #include "vm_version_x86.hpp" 33 34 35 int VM_Version::_cpu; 36 int VM_Version::_model; 37 int VM_Version::_stepping; 38 uint64_t VM_Version::_cpuFeatures; 39 const char* VM_Version::_features_str = ""; 40 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; 41 42 // Address of instruction which causes SEGV 43 address VM_Version::_cpuinfo_segv_addr = 0; 44 // Address of instruction after the one which causes SEGV 45 address VM_Version::_cpuinfo_cont_addr = 0; 46 47 static BufferBlob* stub_blob; 48 static const int stub_size = 1000; 49 50 extern "C" { 51 typedef void (*get_cpu_info_stub_t)(void*); 52 } 53 static get_cpu_info_stub_t get_cpu_info_stub = NULL; 54 55 56 class VM_Version_StubGenerator: public StubCodeGenerator { 57 public: 58 59 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 60 61 address generate_get_cpu_info() { 62 // Flags to test CPU type. 63 const uint32_t HS_EFL_AC = 0x40000; 64 const uint32_t HS_EFL_ID = 0x200000; 65 // Values for when we don't have a CPUID instruction. 66 const int CPU_FAMILY_SHIFT = 8; 67 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); 68 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); 69 70 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; 71 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done, wrapup; 72 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check; 73 74 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); 75 # define __ _masm-> 76 77 address start = __ pc(); 78 79 // 80 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); 81 // 82 // LP64: rcx and rdx are first and second argument registers on windows 83 84 __ push(rbp); 85 #ifdef _LP64 86 __ mov(rbp, c_rarg0); // cpuid_info address 87 #else 88 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address 89 #endif 90 __ push(rbx); 91 __ push(rsi); 92 __ pushf(); // preserve rbx, and flags 93 __ pop(rax); 94 __ push(rax); 95 __ mov(rcx, rax); 96 // 97 // if we are unable to change the AC flag, we have a 386 98 // 99 __ xorl(rax, HS_EFL_AC); 100 __ push(rax); 101 __ popf(); 102 __ pushf(); 103 __ pop(rax); 104 __ cmpptr(rax, rcx); 105 __ jccb(Assembler::notEqual, detect_486); 106 107 __ movl(rax, CPU_FAMILY_386); 108 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 109 __ jmp(done); 110 111 // 112 // If we are unable to change the ID flag, we have a 486 which does 113 // not support the "cpuid" instruction. 114 // 115 __ bind(detect_486); 116 __ mov(rax, rcx); 117 __ xorl(rax, HS_EFL_ID); 118 __ push(rax); 119 __ popf(); 120 __ pushf(); 121 __ pop(rax); 122 __ cmpptr(rcx, rax); 123 __ jccb(Assembler::notEqual, detect_586); 124 125 __ bind(cpu486); 126 __ movl(rax, CPU_FAMILY_486); 127 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 128 __ jmp(done); 129 130 // 131 // At this point, we have a chip which supports the "cpuid" instruction 132 // 133 __ bind(detect_586); 134 __ xorl(rax, rax); 135 __ cpuid(); 136 __ orl(rax, rax); 137 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input 138 // value of at least 1, we give up and 139 // assume a 486 140 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 141 __ movl(Address(rsi, 0), rax); 142 __ movl(Address(rsi, 4), rbx); 143 __ movl(Address(rsi, 8), rcx); 144 __ movl(Address(rsi,12), rdx); 145 146 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? 147 __ jccb(Assembler::belowEqual, std_cpuid4); 148 149 // 150 // cpuid(0xB) Processor Topology 151 // 152 __ movl(rax, 0xb); 153 __ xorl(rcx, rcx); // Threads level 154 __ cpuid(); 155 156 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); 157 __ movl(Address(rsi, 0), rax); 158 __ movl(Address(rsi, 4), rbx); 159 __ movl(Address(rsi, 8), rcx); 160 __ movl(Address(rsi,12), rdx); 161 162 __ movl(rax, 0xb); 163 __ movl(rcx, 1); // Cores level 164 __ cpuid(); 165 __ push(rax); 166 __ andl(rax, 0x1f); // Determine if valid topology level 167 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 168 __ andl(rax, 0xffff); 169 __ pop(rax); 170 __ jccb(Assembler::equal, std_cpuid4); 171 172 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); 173 __ movl(Address(rsi, 0), rax); 174 __ movl(Address(rsi, 4), rbx); 175 __ movl(Address(rsi, 8), rcx); 176 __ movl(Address(rsi,12), rdx); 177 178 __ movl(rax, 0xb); 179 __ movl(rcx, 2); // Packages level 180 __ cpuid(); 181 __ push(rax); 182 __ andl(rax, 0x1f); // Determine if valid topology level 183 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 184 __ andl(rax, 0xffff); 185 __ pop(rax); 186 __ jccb(Assembler::equal, std_cpuid4); 187 188 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); 189 __ movl(Address(rsi, 0), rax); 190 __ movl(Address(rsi, 4), rbx); 191 __ movl(Address(rsi, 8), rcx); 192 __ movl(Address(rsi,12), rdx); 193 194 // 195 // cpuid(0x4) Deterministic cache params 196 // 197 __ bind(std_cpuid4); 198 __ movl(rax, 4); 199 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? 200 __ jccb(Assembler::greater, std_cpuid1); 201 202 __ xorl(rcx, rcx); // L1 cache 203 __ cpuid(); 204 __ push(rax); 205 __ andl(rax, 0x1f); // Determine if valid cache parameters used 206 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache 207 __ pop(rax); 208 __ jccb(Assembler::equal, std_cpuid1); 209 210 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); 211 __ movl(Address(rsi, 0), rax); 212 __ movl(Address(rsi, 4), rbx); 213 __ movl(Address(rsi, 8), rcx); 214 __ movl(Address(rsi,12), rdx); 215 216 // 217 // Standard cpuid(0x1) 218 // 219 __ bind(std_cpuid1); 220 __ movl(rax, 1); 221 __ cpuid(); 222 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 223 __ movl(Address(rsi, 0), rax); 224 __ movl(Address(rsi, 4), rbx); 225 __ movl(Address(rsi, 8), rcx); 226 __ movl(Address(rsi,12), rdx); 227 228 // 229 // Check if OS has enabled XGETBV instruction to access XCR0 230 // (OSXSAVE feature flag) and CPU supports AVX 231 // 232 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 233 __ cmpl(rcx, 0x18000000); 234 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported 235 236 // 237 // XCR0, XFEATURE_ENABLED_MASK register 238 // 239 __ xorl(rcx, rcx); // zero for XCR0 register 240 __ xgetbv(); 241 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); 242 __ movl(Address(rsi, 0), rax); 243 __ movl(Address(rsi, 4), rdx); 244 245 // 246 // cpuid(0x7) Structured Extended Features 247 // 248 __ bind(sef_cpuid); 249 __ movl(rax, 7); 250 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported? 251 __ jccb(Assembler::greater, ext_cpuid); 252 253 __ xorl(rcx, rcx); 254 __ cpuid(); 255 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 256 __ movl(Address(rsi, 0), rax); 257 __ movl(Address(rsi, 4), rbx); 258 259 // 260 // Extended cpuid(0x80000000) 261 // 262 __ bind(ext_cpuid); 263 __ movl(rax, 0x80000000); 264 __ cpuid(); 265 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? 266 __ jcc(Assembler::belowEqual, done); 267 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? 268 __ jccb(Assembler::belowEqual, ext_cpuid1); 269 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? 270 __ jccb(Assembler::belowEqual, ext_cpuid5); 271 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? 272 __ jccb(Assembler::belowEqual, ext_cpuid7); 273 // 274 // Extended cpuid(0x80000008) 275 // 276 __ movl(rax, 0x80000008); 277 __ cpuid(); 278 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); 279 __ movl(Address(rsi, 0), rax); 280 __ movl(Address(rsi, 4), rbx); 281 __ movl(Address(rsi, 8), rcx); 282 __ movl(Address(rsi,12), rdx); 283 284 // 285 // Extended cpuid(0x80000007) 286 // 287 __ bind(ext_cpuid7); 288 __ movl(rax, 0x80000007); 289 __ cpuid(); 290 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset()))); 291 __ movl(Address(rsi, 0), rax); 292 __ movl(Address(rsi, 4), rbx); 293 __ movl(Address(rsi, 8), rcx); 294 __ movl(Address(rsi,12), rdx); 295 296 // 297 // Extended cpuid(0x80000005) 298 // 299 __ bind(ext_cpuid5); 300 __ movl(rax, 0x80000005); 301 __ cpuid(); 302 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); 303 __ movl(Address(rsi, 0), rax); 304 __ movl(Address(rsi, 4), rbx); 305 __ movl(Address(rsi, 8), rcx); 306 __ movl(Address(rsi,12), rdx); 307 308 // 309 // Extended cpuid(0x80000001) 310 // 311 __ bind(ext_cpuid1); 312 __ movl(rax, 0x80000001); 313 __ cpuid(); 314 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); 315 __ movl(Address(rsi, 0), rax); 316 __ movl(Address(rsi, 4), rbx); 317 __ movl(Address(rsi, 8), rcx); 318 __ movl(Address(rsi,12), rdx); 319 320 // 321 // Check if OS has enabled XGETBV instruction to access XCR0 322 // (OSXSAVE feature flag) and CPU supports AVX 323 // 324 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 325 __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 326 __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx 327 __ cmpl(rcx, 0x18000000); 328 __ jccb(Assembler::notEqual, done); // jump if AVX is not supported 329 330 __ movl(rax, 0x6); 331 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 332 __ cmpl(rax, 0x6); 333 __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported 334 335 // we need to bridge farther than imm8, so we use this island as a thunk 336 __ bind(done); 337 __ jmp(wrapup); 338 339 __ bind(start_simd_check); 340 // 341 // Some OSs have a bug when upper 128/256bits of YMM/ZMM 342 // registers are not restored after a signal processing. 343 // Generate SEGV here (reference through NULL) 344 // and check upper YMM/ZMM bits after it. 345 // 346 intx saved_useavx = UseAVX; 347 intx saved_usesse = UseSSE; 348 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 349 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 350 __ movl(rax, 0x10000); 351 __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm 352 __ cmpl(rax, 0x10000); 353 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 354 // check _cpuid_info.xem_xcr0_eax.bits.opmask 355 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 356 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 357 __ movl(rax, 0xE0); 358 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 359 __ cmpl(rax, 0xE0); 360 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 361 362 // EVEX setup: run in lowest evex mode 363 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 364 UseAVX = 3; 365 UseSSE = 2; 366 // load value into all 64 bytes of zmm7 register 367 __ movl(rcx, VM_Version::ymm_test_value()); 368 __ movdl(xmm0, rcx); 369 __ movl(rcx, 0xffff); 370 #ifdef _LP64 371 __ kmovql(k1, rcx); 372 #else 373 __ kmovdl(k1, rcx); 374 #endif 375 __ evpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit); 376 __ evmovdqu(xmm7, xmm0, Assembler::AVX_512bit); 377 #ifdef _LP64 378 __ evmovdqu(xmm8, xmm0, Assembler::AVX_512bit); 379 __ evmovdqu(xmm31, xmm0, Assembler::AVX_512bit); 380 #endif 381 VM_Version::clean_cpuFeatures(); 382 __ jmp(save_restore_except); 383 384 __ bind(legacy_setup); 385 // AVX setup 386 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 387 UseAVX = 1; 388 UseSSE = 2; 389 // load value into all 32 bytes of ymm7 register 390 __ movl(rcx, VM_Version::ymm_test_value()); 391 392 __ movdl(xmm0, rcx); 393 __ pshufd(xmm0, xmm0, 0x00); 394 __ vinsertf128h(xmm0, xmm0, xmm0); 395 __ vmovdqu(xmm7, xmm0); 396 #ifdef _LP64 397 __ vmovdqu(xmm8, xmm0); 398 __ vmovdqu(xmm15, xmm0); 399 #endif 400 VM_Version::clean_cpuFeatures(); 401 402 __ bind(save_restore_except); 403 __ xorl(rsi, rsi); 404 VM_Version::set_cpuinfo_segv_addr(__ pc()); 405 // Generate SEGV 406 __ movl(rax, Address(rsi, 0)); 407 408 VM_Version::set_cpuinfo_cont_addr(__ pc()); 409 // Returns here after signal. Save xmm0 to check it later. 410 411 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 412 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 413 __ movl(rax, 0x10000); 414 __ andl(rax, Address(rsi, 4)); 415 __ cmpl(rax, 0x10000); 416 __ jccb(Assembler::notEqual, legacy_save_restore); 417 // check _cpuid_info.xem_xcr0_eax.bits.opmask 418 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 419 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 420 __ movl(rax, 0xE0); 421 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 422 __ cmpl(rax, 0xE0); 423 __ jccb(Assembler::notEqual, legacy_save_restore); 424 425 // EVEX check: run in lowest evex mode 426 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 427 UseAVX = 3; 428 UseSSE = 2; 429 __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset()))); 430 __ evmovdqu(Address(rsi, 0), xmm0, Assembler::AVX_512bit); 431 __ evmovdqu(Address(rsi, 64), xmm7, Assembler::AVX_512bit); 432 #ifdef _LP64 433 __ evmovdqu(Address(rsi, 128), xmm8, Assembler::AVX_512bit); 434 __ evmovdqu(Address(rsi, 192), xmm31, Assembler::AVX_512bit); 435 #endif 436 VM_Version::clean_cpuFeatures(); 437 UseAVX = saved_useavx; 438 UseSSE = saved_usesse; 439 __ jmp(wrapup); 440 441 __ bind(legacy_save_restore); 442 // AVX check 443 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 444 UseAVX = 1; 445 UseSSE = 2; 446 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); 447 __ vmovdqu(Address(rsi, 0), xmm0); 448 __ vmovdqu(Address(rsi, 32), xmm7); 449 #ifdef _LP64 450 __ vmovdqu(Address(rsi, 64), xmm8); 451 __ vmovdqu(Address(rsi, 96), xmm15); 452 #endif 453 VM_Version::clean_cpuFeatures(); 454 UseAVX = saved_useavx; 455 UseSSE = saved_usesse; 456 457 __ bind(wrapup); 458 __ popf(); 459 __ pop(rsi); 460 __ pop(rbx); 461 __ pop(rbp); 462 __ ret(0); 463 464 # undef __ 465 466 return start; 467 }; 468 }; 469 470 void VM_Version::get_processor_features() { 471 472 _cpu = 4; // 486 by default 473 _model = 0; 474 _stepping = 0; 475 _cpuFeatures = 0; 476 _logical_processors_per_package = 1; 477 // i486 internal cache is both I&D and has a 16-byte line size 478 _L1_data_cache_line_size = 16; 479 480 if (!Use486InstrsOnly) { 481 // Get raw processor info 482 483 get_cpu_info_stub(&_cpuid_info); 484 485 assert_is_initialized(); 486 _cpu = extended_cpu_family(); 487 _model = extended_cpu_model(); 488 _stepping = cpu_stepping(); 489 490 if (cpu_family() > 4) { // it supports CPUID 491 _cpuFeatures = feature_flags(); 492 // Logical processors are only available on P4s and above, 493 // and only if hyperthreading is available. 494 _logical_processors_per_package = logical_processor_count(); 495 _L1_data_cache_line_size = L1_line_size(); 496 } 497 } 498 499 _supports_cx8 = supports_cmpxchg8(); 500 // xchg and xadd instructions 501 _supports_atomic_getset4 = true; 502 _supports_atomic_getadd4 = true; 503 LP64_ONLY(_supports_atomic_getset8 = true); 504 LP64_ONLY(_supports_atomic_getadd8 = true); 505 506 #ifdef _LP64 507 // OS should support SSE for x64 and hardware should support at least SSE2. 508 if (!VM_Version::supports_sse2()) { 509 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); 510 } 511 // in 64 bit the use of SSE2 is the minimum 512 if (UseSSE < 2) UseSSE = 2; 513 #endif 514 515 #ifdef AMD64 516 // flush_icache_stub have to be generated first. 517 // That is why Icache line size is hard coded in ICache class, 518 // see icache_x86.hpp. It is also the reason why we can't use 519 // clflush instruction in 32-bit VM since it could be running 520 // on CPU which does not support it. 521 // 522 // The only thing we can do is to verify that flushed 523 // ICache::line_size has correct value. 524 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); 525 // clflush_size is size in quadwords (8 bytes). 526 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); 527 #endif 528 529 // If the OS doesn't support SSE, we can't use this feature even if the HW does 530 if (!os::supports_sse()) 531 _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); 532 533 if (UseSSE < 4) { 534 _cpuFeatures &= ~CPU_SSE4_1; 535 _cpuFeatures &= ~CPU_SSE4_2; 536 } 537 538 if (UseSSE < 3) { 539 _cpuFeatures &= ~CPU_SSE3; 540 _cpuFeatures &= ~CPU_SSSE3; 541 _cpuFeatures &= ~CPU_SSE4A; 542 } 543 544 if (UseSSE < 2) 545 _cpuFeatures &= ~CPU_SSE2; 546 547 if (UseSSE < 1) 548 _cpuFeatures &= ~CPU_SSE; 549 550 // first try initial setting and detect what we can support 551 if (UseAVX > 0) { 552 if (UseAVX > 2 && supports_evex()) { 553 UseAVX = 3; 554 } else if (UseAVX > 1 && supports_avx2()) { 555 UseAVX = 2; 556 } else if (UseAVX > 0 && supports_avx()) { 557 UseAVX = 1; 558 } else { 559 UseAVX = 0; 560 } 561 } else if (UseAVX < 0) { 562 UseAVX = 0; 563 } 564 565 if (UseAVX < 3) { 566 _cpuFeatures &= ~CPU_AVX512F; 567 _cpuFeatures &= ~CPU_AVX512DQ; 568 _cpuFeatures &= ~CPU_AVX512CD; 569 _cpuFeatures &= ~CPU_AVX512BW; 570 _cpuFeatures &= ~CPU_AVX512VL; 571 } 572 573 if (UseAVX < 2) 574 _cpuFeatures &= ~CPU_AVX2; 575 576 if (UseAVX < 1) 577 _cpuFeatures &= ~CPU_AVX; 578 579 if (!UseAES && !FLAG_IS_DEFAULT(UseAES)) 580 _cpuFeatures &= ~CPU_AES; 581 582 if (logical_processors_per_package() == 1) { 583 // HT processor could be installed on a system which doesn't support HT. 584 _cpuFeatures &= ~CPU_HT; 585 } 586 587 char buf[256]; 588 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 589 cores_per_cpu(), threads_per_core(), 590 cpu_family(), _model, _stepping, 591 (supports_cmov() ? ", cmov" : ""), 592 (supports_cmpxchg8() ? ", cx8" : ""), 593 (supports_fxsr() ? ", fxsr" : ""), 594 (supports_mmx() ? ", mmx" : ""), 595 (supports_sse() ? ", sse" : ""), 596 (supports_sse2() ? ", sse2" : ""), 597 (supports_sse3() ? ", sse3" : ""), 598 (supports_ssse3()? ", ssse3": ""), 599 (supports_sse4_1() ? ", sse4.1" : ""), 600 (supports_sse4_2() ? ", sse4.2" : ""), 601 (supports_popcnt() ? ", popcnt" : ""), 602 (supports_avx() ? ", avx" : ""), 603 (supports_avx2() ? ", avx2" : ""), 604 (supports_aes() ? ", aes" : ""), 605 (supports_clmul() ? ", clmul" : ""), 606 (supports_erms() ? ", erms" : ""), 607 (supports_rtm() ? ", rtm" : ""), 608 (supports_mmx_ext() ? ", mmxext" : ""), 609 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), 610 (supports_lzcnt() ? ", lzcnt": ""), 611 (supports_sse4a() ? ", sse4a": ""), 612 (supports_ht() ? ", ht": ""), 613 (supports_tsc() ? ", tsc": ""), 614 (supports_tscinv_bit() ? ", tscinvbit": ""), 615 (supports_tscinv() ? ", tscinv": ""), 616 (supports_bmi1() ? ", bmi1" : ""), 617 (supports_bmi2() ? ", bmi2" : ""), 618 (supports_adx() ? ", adx" : ""), 619 (supports_evex() ? ", evex" : "")); 620 _features_str = os::strdup(buf); 621 622 // UseSSE is set to the smaller of what hardware supports and what 623 // the command line requires. I.e., you cannot set UseSSE to 2 on 624 // older Pentiums which do not support it. 625 if (UseSSE > 4) UseSSE=4; 626 if (UseSSE < 0) UseSSE=0; 627 if (!supports_sse4_1()) // Drop to 3 if no SSE4 support 628 UseSSE = MIN2((intx)3,UseSSE); 629 if (!supports_sse3()) // Drop to 2 if no SSE3 support 630 UseSSE = MIN2((intx)2,UseSSE); 631 if (!supports_sse2()) // Drop to 1 if no SSE2 support 632 UseSSE = MIN2((intx)1,UseSSE); 633 if (!supports_sse ()) // Drop to 0 if no SSE support 634 UseSSE = 0; 635 636 // Use AES instructions if available. 637 if (supports_aes()) { 638 if (FLAG_IS_DEFAULT(UseAES)) { 639 UseAES = true; 640 } 641 } else if (UseAES) { 642 if (!FLAG_IS_DEFAULT(UseAES)) 643 warning("AES instructions are not available on this CPU"); 644 FLAG_SET_DEFAULT(UseAES, false); 645 } 646 647 // Use CLMUL instructions if available. 648 if (supports_clmul()) { 649 if (FLAG_IS_DEFAULT(UseCLMUL)) { 650 UseCLMUL = true; 651 } 652 } else if (UseCLMUL) { 653 if (!FLAG_IS_DEFAULT(UseCLMUL)) 654 warning("CLMUL instructions not available on this CPU (AVX may also be required)"); 655 FLAG_SET_DEFAULT(UseCLMUL, false); 656 } 657 658 if (UseCLMUL && (UseSSE > 2)) { 659 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 660 UseCRC32Intrinsics = true; 661 } 662 } else if (UseCRC32Intrinsics) { 663 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 664 warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)"); 665 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 666 } 667 668 // The AES intrinsic stubs require AES instruction support (of course) 669 // but also require sse3 mode for instructions it use. 670 if (UseAES && (UseSSE > 2)) { 671 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 672 UseAESIntrinsics = true; 673 } 674 } else if (UseAESIntrinsics) { 675 if (!FLAG_IS_DEFAULT(UseAESIntrinsics)) 676 warning("AES intrinsics are not available on this CPU"); 677 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 678 } 679 680 // GHASH/GCM intrinsics 681 if (UseCLMUL && (UseSSE > 2)) { 682 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 683 UseGHASHIntrinsics = true; 684 } 685 } else if (UseGHASHIntrinsics) { 686 if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) 687 warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU"); 688 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 689 } 690 691 if (UseSHA) { 692 warning("SHA instructions are not available on this CPU"); 693 FLAG_SET_DEFAULT(UseSHA, false); 694 } 695 696 if (UseSHA1Intrinsics) { 697 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 698 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 699 } 700 701 if (UseSHA256Intrinsics) { 702 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 703 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 704 } 705 706 if (UseSHA512Intrinsics) { 707 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 708 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 709 } 710 711 if (UseCRC32CIntrinsics) { 712 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) 713 warning("CRC32C intrinsics are not available on this CPU"); 714 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 715 } 716 717 if (UseAdler32Intrinsics) { 718 warning("Adler32Intrinsics not available on this CPU."); 719 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 720 } 721 722 // Adjust RTM (Restricted Transactional Memory) flags 723 if (!supports_rtm() && UseRTMLocking) { 724 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 725 // setting during arguments processing. See use_biased_locking(). 726 // VM_Version_init() is executed after UseBiasedLocking is used 727 // in Thread::allocate(). 728 vm_exit_during_initialization("RTM instructions are not available on this CPU"); 729 } 730 731 #if INCLUDE_RTM_OPT 732 if (UseRTMLocking) { 733 if (is_intel_family_core()) { 734 if ((_model == CPU_MODEL_HASWELL_E3) || 735 (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) || 736 (_model == CPU_MODEL_BROADWELL && _stepping < 4)) { 737 // currently a collision between SKL and HSW_E3 738 if (!UnlockExperimentalVMOptions && UseAVX < 3) { 739 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); 740 } else { 741 warning("UseRTMLocking is only available as experimental option on this platform."); 742 } 743 } 744 } 745 if (!FLAG_IS_CMDLINE(UseRTMLocking)) { 746 // RTM locking should be used only for applications with 747 // high lock contention. For now we do not use it by default. 748 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); 749 } 750 if (!is_power_of_2(RTMTotalCountIncrRate)) { 751 warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64"); 752 FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64); 753 } 754 if (RTMAbortRatio < 0 || RTMAbortRatio > 100) { 755 warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50"); 756 FLAG_SET_DEFAULT(RTMAbortRatio, 50); 757 } 758 } else { // !UseRTMLocking 759 if (UseRTMForStackLocks) { 760 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { 761 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); 762 } 763 FLAG_SET_DEFAULT(UseRTMForStackLocks, false); 764 } 765 if (UseRTMDeopt) { 766 FLAG_SET_DEFAULT(UseRTMDeopt, false); 767 } 768 if (PrintPreciseRTMLockingStatistics) { 769 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); 770 } 771 } 772 #else 773 if (UseRTMLocking) { 774 // Only C2 does RTM locking optimization. 775 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 776 // setting during arguments processing. See use_biased_locking(). 777 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 778 } 779 #endif 780 781 #ifdef COMPILER2 782 if (UseFPUForSpilling) { 783 if (UseSSE < 2) { 784 // Only supported with SSE2+ 785 FLAG_SET_DEFAULT(UseFPUForSpilling, false); 786 } 787 } 788 if (MaxVectorSize > 0) { 789 if (!is_power_of_2(MaxVectorSize)) { 790 warning("MaxVectorSize must be a power of 2"); 791 FLAG_SET_DEFAULT(MaxVectorSize, 64); 792 } 793 if (MaxVectorSize > 64) { 794 FLAG_SET_DEFAULT(MaxVectorSize, 64); 795 } 796 if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) { 797 // 32 bytes vectors (in YMM) are only supported with AVX+ 798 FLAG_SET_DEFAULT(MaxVectorSize, 16); 799 } 800 if (UseSSE < 2) { 801 // Vectors (in XMM) are only supported with SSE2+ 802 FLAG_SET_DEFAULT(MaxVectorSize, 0); 803 } 804 #ifdef ASSERT 805 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { 806 tty->print_cr("State of YMM registers after signal handle:"); 807 int nreg = 2 LP64_ONLY(+2); 808 const char* ymm_name[4] = {"0", "7", "8", "15"}; 809 for (int i = 0; i < nreg; i++) { 810 tty->print("YMM%s:", ymm_name[i]); 811 for (int j = 7; j >=0; j--) { 812 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); 813 } 814 tty->cr(); 815 } 816 } 817 #endif 818 } 819 820 #ifdef _LP64 821 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 822 UseMultiplyToLenIntrinsic = true; 823 } 824 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 825 UseSquareToLenIntrinsic = true; 826 } 827 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 828 UseMulAddIntrinsic = true; 829 } 830 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 831 UseMontgomeryMultiplyIntrinsic = true; 832 } 833 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 834 UseMontgomerySquareIntrinsic = true; 835 } 836 #else 837 if (UseMultiplyToLenIntrinsic) { 838 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 839 warning("multiplyToLen intrinsic is not available in 32-bit VM"); 840 } 841 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); 842 } 843 if (UseMontgomeryMultiplyIntrinsic) { 844 if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 845 warning("montgomeryMultiply intrinsic is not available in 32-bit VM"); 846 } 847 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false); 848 } 849 if (UseMontgomerySquareIntrinsic) { 850 if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 851 warning("montgomerySquare intrinsic is not available in 32-bit VM"); 852 } 853 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false); 854 } 855 if (UseSquareToLenIntrinsic) { 856 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 857 warning("squareToLen intrinsic is not available in 32-bit VM"); 858 } 859 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false); 860 } 861 if (UseMulAddIntrinsic) { 862 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 863 warning("mulAdd intrinsic is not available in 32-bit VM"); 864 } 865 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false); 866 } 867 #endif 868 #endif // COMPILER2 869 870 // On new cpus instructions which update whole XMM register should be used 871 // to prevent partial register stall due to dependencies on high half. 872 // 873 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) 874 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) 875 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). 876 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). 877 878 if( is_amd() ) { // AMD cpus specific settings 879 if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) { 880 // Use it on new AMD cpus starting from Opteron. 881 UseAddressNop = true; 882 } 883 if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) { 884 // Use it on new AMD cpus starting from Opteron. 885 UseNewLongLShift = true; 886 } 887 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 888 if( supports_sse4a() ) { 889 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron 890 } else { 891 UseXmmLoadAndClearUpper = false; 892 } 893 } 894 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 895 if( supports_sse4a() ) { 896 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' 897 } else { 898 UseXmmRegToRegMoveAll = false; 899 } 900 } 901 if( FLAG_IS_DEFAULT(UseXmmI2F) ) { 902 if( supports_sse4a() ) { 903 UseXmmI2F = true; 904 } else { 905 UseXmmI2F = false; 906 } 907 } 908 if( FLAG_IS_DEFAULT(UseXmmI2D) ) { 909 if( supports_sse4a() ) { 910 UseXmmI2D = true; 911 } else { 912 UseXmmI2D = false; 913 } 914 } 915 if( FLAG_IS_DEFAULT(UseSSE42Intrinsics) ) { 916 if( supports_sse4_2() && UseSSE >= 4 ) { 917 UseSSE42Intrinsics = true; 918 } 919 } 920 921 // some defaults for AMD family 15h 922 if ( cpu_family() == 0x15 ) { 923 // On family 15h processors default is no sw prefetch 924 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 925 AllocatePrefetchStyle = 0; 926 } 927 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW 928 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 929 AllocatePrefetchInstr = 3; 930 } 931 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy 932 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 933 UseXMMForArrayCopy = true; 934 } 935 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 936 UseUnalignedLoadStores = true; 937 } 938 } 939 940 #ifdef COMPILER2 941 if (MaxVectorSize > 16) { 942 // Limit vectors size to 16 bytes on current AMD cpus. 943 FLAG_SET_DEFAULT(MaxVectorSize, 16); 944 } 945 #endif // COMPILER2 946 } 947 948 if( is_intel() ) { // Intel cpus specific settings 949 if( FLAG_IS_DEFAULT(UseStoreImmI16) ) { 950 UseStoreImmI16 = false; // don't use it on Intel cpus 951 } 952 if( cpu_family() == 6 || cpu_family() == 15 ) { 953 if( FLAG_IS_DEFAULT(UseAddressNop) ) { 954 // Use it on all Intel cpus starting from PentiumPro 955 UseAddressNop = true; 956 } 957 } 958 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 959 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus 960 } 961 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 962 if( supports_sse3() ) { 963 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus 964 } else { 965 UseXmmRegToRegMoveAll = false; 966 } 967 } 968 if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus 969 #ifdef COMPILER2 970 if( FLAG_IS_DEFAULT(MaxLoopPad) ) { 971 // For new Intel cpus do the next optimization: 972 // don't align the beginning of a loop if there are enough instructions 973 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 974 // in current fetch line (OptoLoopAlignment) or the padding 975 // is big (> MaxLoopPad). 976 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of 977 // generated NOP instructions. 11 is the largest size of one 978 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 979 MaxLoopPad = 11; 980 } 981 #endif // COMPILER2 982 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 983 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus 984 } 985 if (supports_sse4_2() && supports_ht()) { // Newest Intel cpus 986 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 987 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 988 } 989 } 990 if (supports_sse4_2() && UseSSE >= 4) { 991 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 992 UseSSE42Intrinsics = true; 993 } 994 } 995 } 996 if ((cpu_family() == 0x06) && 997 ((extended_cpu_model() == 0x36) || // Centerton 998 (extended_cpu_model() == 0x37) || // Silvermont 999 (extended_cpu_model() == 0x4D))) { 1000 #ifdef COMPILER2 1001 if (FLAG_IS_DEFAULT(OptoScheduling)) { 1002 OptoScheduling = true; 1003 } 1004 #endif 1005 if (supports_sse4_2()) { // Silvermont 1006 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1007 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1008 } 1009 } 1010 } 1011 if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 1012 AllocatePrefetchInstr = 3; 1013 } 1014 } 1015 1016 // Use count leading zeros count instruction if available. 1017 if (supports_lzcnt()) { 1018 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { 1019 UseCountLeadingZerosInstruction = true; 1020 } 1021 } else if (UseCountLeadingZerosInstruction) { 1022 warning("lzcnt instruction is not available on this CPU"); 1023 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); 1024 } 1025 1026 // Use count trailing zeros instruction if available 1027 if (supports_bmi1()) { 1028 // tzcnt does not require VEX prefix 1029 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { 1030 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1031 // Don't use tzcnt if BMI1 is switched off on command line. 1032 UseCountTrailingZerosInstruction = false; 1033 } else { 1034 UseCountTrailingZerosInstruction = true; 1035 } 1036 } 1037 } else if (UseCountTrailingZerosInstruction) { 1038 warning("tzcnt instruction is not available on this CPU"); 1039 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); 1040 } 1041 1042 // BMI instructions (except tzcnt) use an encoding with VEX prefix. 1043 // VEX prefix is generated only when AVX > 0. 1044 if (supports_bmi1() && supports_avx()) { 1045 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1046 UseBMI1Instructions = true; 1047 } 1048 } else if (UseBMI1Instructions) { 1049 warning("BMI1 instructions are not available on this CPU (AVX is also required)"); 1050 FLAG_SET_DEFAULT(UseBMI1Instructions, false); 1051 } 1052 1053 if (supports_bmi2() && supports_avx()) { 1054 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) { 1055 UseBMI2Instructions = true; 1056 } 1057 } else if (UseBMI2Instructions) { 1058 warning("BMI2 instructions are not available on this CPU (AVX is also required)"); 1059 FLAG_SET_DEFAULT(UseBMI2Instructions, false); 1060 } 1061 1062 // Use population count instruction if available. 1063 if (supports_popcnt()) { 1064 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 1065 UsePopCountInstruction = true; 1066 } 1067 } else if (UsePopCountInstruction) { 1068 warning("POPCNT instruction is not available on this CPU"); 1069 FLAG_SET_DEFAULT(UsePopCountInstruction, false); 1070 } 1071 1072 // Use fast-string operations if available. 1073 if (supports_erms()) { 1074 if (FLAG_IS_DEFAULT(UseFastStosb)) { 1075 UseFastStosb = true; 1076 } 1077 } else if (UseFastStosb) { 1078 warning("fast-string operations are not available on this CPU"); 1079 FLAG_SET_DEFAULT(UseFastStosb, false); 1080 } 1081 1082 #ifdef COMPILER2 1083 if (FLAG_IS_DEFAULT(AlignVector)) { 1084 // Modern processors allow misaligned memory operations for vectors. 1085 AlignVector = !UseUnalignedLoadStores; 1086 } 1087 #endif // COMPILER2 1088 1089 assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value"); 1090 1091 // set valid Prefetch instruction 1092 if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0; 1093 if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3; 1094 if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0; 1095 if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3; 1096 1097 // Allocation prefetch settings 1098 intx cache_line_size = prefetch_data_size(); 1099 if( cache_line_size > AllocatePrefetchStepSize ) 1100 AllocatePrefetchStepSize = cache_line_size; 1101 1102 assert(AllocatePrefetchLines > 0, "invalid value"); 1103 if( AllocatePrefetchLines < 1 ) // set valid value in product VM 1104 AllocatePrefetchLines = 3; 1105 assert(AllocateInstancePrefetchLines > 0, "invalid value"); 1106 if( AllocateInstancePrefetchLines < 1 ) // set valid value in product VM 1107 AllocateInstancePrefetchLines = 1; 1108 1109 AllocatePrefetchDistance = allocate_prefetch_distance(); 1110 AllocatePrefetchStyle = allocate_prefetch_style(); 1111 1112 if (is_intel() && cpu_family() == 6 && supports_sse3()) { 1113 if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core 1114 #ifdef _LP64 1115 AllocatePrefetchDistance = 384; 1116 #else 1117 AllocatePrefetchDistance = 320; 1118 #endif 1119 } 1120 if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus 1121 AllocatePrefetchDistance = 192; 1122 AllocatePrefetchLines = 4; 1123 } 1124 #ifdef COMPILER2 1125 if (supports_sse4_2()) { 1126 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1127 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1128 } 1129 } 1130 #endif 1131 } 1132 assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value"); 1133 1134 #ifdef _LP64 1135 // Prefetch settings 1136 PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); 1137 PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes(); 1138 PrefetchFieldsAhead = prefetch_fields_ahead(); 1139 #endif 1140 1141 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && 1142 (cache_line_size > ContendedPaddingWidth)) 1143 ContendedPaddingWidth = cache_line_size; 1144 1145 // This machine allows unaligned memory accesses 1146 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 1147 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 1148 } 1149 1150 #ifndef PRODUCT 1151 if (PrintMiscellaneous && Verbose) { 1152 tty->print_cr("Logical CPUs per core: %u", 1153 logical_processors_per_package()); 1154 tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); 1155 tty->print("UseSSE=%d", (int) UseSSE); 1156 if (UseAVX > 0) { 1157 tty->print(" UseAVX=%d", (int) UseAVX); 1158 } 1159 if (UseAES) { 1160 tty->print(" UseAES=1"); 1161 } 1162 #ifdef COMPILER2 1163 if (MaxVectorSize > 0) { 1164 tty->print(" MaxVectorSize=%d", (int) MaxVectorSize); 1165 } 1166 #endif 1167 tty->cr(); 1168 tty->print("Allocation"); 1169 if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { 1170 tty->print_cr(": no prefetching"); 1171 } else { 1172 tty->print(" prefetching: "); 1173 if (UseSSE == 0 && supports_3dnow_prefetch()) { 1174 tty->print("PREFETCHW"); 1175 } else if (UseSSE >= 1) { 1176 if (AllocatePrefetchInstr == 0) { 1177 tty->print("PREFETCHNTA"); 1178 } else if (AllocatePrefetchInstr == 1) { 1179 tty->print("PREFETCHT0"); 1180 } else if (AllocatePrefetchInstr == 2) { 1181 tty->print("PREFETCHT2"); 1182 } else if (AllocatePrefetchInstr == 3) { 1183 tty->print("PREFETCHW"); 1184 } 1185 } 1186 if (AllocatePrefetchLines > 1) { 1187 tty->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize); 1188 } else { 1189 tty->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize); 1190 } 1191 } 1192 1193 if (PrefetchCopyIntervalInBytes > 0) { 1194 tty->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes); 1195 } 1196 if (PrefetchScanIntervalInBytes > 0) { 1197 tty->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes); 1198 } 1199 if (PrefetchFieldsAhead > 0) { 1200 tty->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead); 1201 } 1202 if (ContendedPaddingWidth > 0) { 1203 tty->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth); 1204 } 1205 } 1206 #endif // !PRODUCT 1207 } 1208 1209 bool VM_Version::use_biased_locking() { 1210 #if INCLUDE_RTM_OPT 1211 // RTM locking is most useful when there is high lock contention and 1212 // low data contention. With high lock contention the lock is usually 1213 // inflated and biased locking is not suitable for that case. 1214 // RTM locking code requires that biased locking is off. 1215 // Note: we can't switch off UseBiasedLocking in get_processor_features() 1216 // because it is used by Thread::allocate() which is called before 1217 // VM_Version::initialize(). 1218 if (UseRTMLocking && UseBiasedLocking) { 1219 if (FLAG_IS_DEFAULT(UseBiasedLocking)) { 1220 FLAG_SET_DEFAULT(UseBiasedLocking, false); 1221 } else { 1222 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); 1223 UseBiasedLocking = false; 1224 } 1225 } 1226 #endif 1227 return UseBiasedLocking; 1228 } 1229 1230 void VM_Version::initialize() { 1231 ResourceMark rm; 1232 // Making this stub must be FIRST use of assembler 1233 1234 stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size); 1235 if (stub_blob == NULL) { 1236 vm_exit_during_initialization("Unable to allocate get_cpu_info_stub"); 1237 } 1238 CodeBuffer c(stub_blob); 1239 VM_Version_StubGenerator g(&c); 1240 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, 1241 g.generate_get_cpu_info()); 1242 1243 get_processor_features(); 1244 }