1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/macroAssembler.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logStream.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "runtime/java.hpp" 33 #include "runtime/os.hpp" 34 #include "runtime/stubCodeGenerator.hpp" 35 #include "runtime/vm_version.hpp" 36 #include "utilities/powerOfTwo.hpp" 37 #include "utilities/virtualizationSupport.hpp" 38 39 #include OS_HEADER_INLINE(os) 40 41 int VM_Version::_cpu; 42 int VM_Version::_model; 43 int VM_Version::_stepping; 44 bool VM_Version::_has_intel_jcc_erratum; 45 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; 46 47 // Address of instruction which causes SEGV 48 address VM_Version::_cpuinfo_segv_addr = 0; 49 // Address of instruction after the one which causes SEGV 50 address VM_Version::_cpuinfo_cont_addr = 0; 51 52 static BufferBlob* stub_blob; 53 static const int stub_size = 2000; 54 55 extern "C" { 56 typedef void (*get_cpu_info_stub_t)(void*); 57 typedef void (*detect_virt_stub_t)(uint32_t, uint32_t*); 58 } 59 static get_cpu_info_stub_t get_cpu_info_stub = NULL; 60 static detect_virt_stub_t detect_virt_stub = NULL; 61 62 63 class VM_Version_StubGenerator: public StubCodeGenerator { 64 public: 65 66 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 67 68 address generate_get_cpu_info() { 69 // Flags to test CPU type. 70 const uint32_t HS_EFL_AC = 0x40000; 71 const uint32_t HS_EFL_ID = 0x200000; 72 // Values for when we don't have a CPUID instruction. 73 const int CPU_FAMILY_SHIFT = 8; 74 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); 75 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); 76 bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2); 77 78 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; 79 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, ext_cpuid8, done, wrapup; 80 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check; 81 82 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); 83 # define __ _masm-> 84 85 address start = __ pc(); 86 87 // 88 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); 89 // 90 // LP64: rcx and rdx are first and second argument registers on windows 91 92 __ push(rbp); 93 #ifdef _LP64 94 __ mov(rbp, c_rarg0); // cpuid_info address 95 #else 96 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address 97 #endif 98 __ push(rbx); 99 __ push(rsi); 100 __ pushf(); // preserve rbx, and flags 101 __ pop(rax); 102 __ push(rax); 103 __ mov(rcx, rax); 104 // 105 // if we are unable to change the AC flag, we have a 386 106 // 107 __ xorl(rax, HS_EFL_AC); 108 __ push(rax); 109 __ popf(); 110 __ pushf(); 111 __ pop(rax); 112 __ cmpptr(rax, rcx); 113 __ jccb(Assembler::notEqual, detect_486); 114 115 __ movl(rax, CPU_FAMILY_386); 116 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 117 __ jmp(done); 118 119 // 120 // If we are unable to change the ID flag, we have a 486 which does 121 // not support the "cpuid" instruction. 122 // 123 __ bind(detect_486); 124 __ mov(rax, rcx); 125 __ xorl(rax, HS_EFL_ID); 126 __ push(rax); 127 __ popf(); 128 __ pushf(); 129 __ pop(rax); 130 __ cmpptr(rcx, rax); 131 __ jccb(Assembler::notEqual, detect_586); 132 133 __ bind(cpu486); 134 __ movl(rax, CPU_FAMILY_486); 135 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 136 __ jmp(done); 137 138 // 139 // At this point, we have a chip which supports the "cpuid" instruction 140 // 141 __ bind(detect_586); 142 __ xorl(rax, rax); 143 __ cpuid(); 144 __ orl(rax, rax); 145 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input 146 // value of at least 1, we give up and 147 // assume a 486 148 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 149 __ movl(Address(rsi, 0), rax); 150 __ movl(Address(rsi, 4), rbx); 151 __ movl(Address(rsi, 8), rcx); 152 __ movl(Address(rsi,12), rdx); 153 154 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? 155 __ jccb(Assembler::belowEqual, std_cpuid4); 156 157 // 158 // cpuid(0xB) Processor Topology 159 // 160 __ movl(rax, 0xb); 161 __ xorl(rcx, rcx); // Threads level 162 __ cpuid(); 163 164 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); 165 __ movl(Address(rsi, 0), rax); 166 __ movl(Address(rsi, 4), rbx); 167 __ movl(Address(rsi, 8), rcx); 168 __ movl(Address(rsi,12), rdx); 169 170 __ movl(rax, 0xb); 171 __ movl(rcx, 1); // Cores level 172 __ cpuid(); 173 __ push(rax); 174 __ andl(rax, 0x1f); // Determine if valid topology level 175 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 176 __ andl(rax, 0xffff); 177 __ pop(rax); 178 __ jccb(Assembler::equal, std_cpuid4); 179 180 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); 181 __ movl(Address(rsi, 0), rax); 182 __ movl(Address(rsi, 4), rbx); 183 __ movl(Address(rsi, 8), rcx); 184 __ movl(Address(rsi,12), rdx); 185 186 __ movl(rax, 0xb); 187 __ movl(rcx, 2); // Packages level 188 __ cpuid(); 189 __ push(rax); 190 __ andl(rax, 0x1f); // Determine if valid topology level 191 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 192 __ andl(rax, 0xffff); 193 __ pop(rax); 194 __ jccb(Assembler::equal, std_cpuid4); 195 196 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); 197 __ movl(Address(rsi, 0), rax); 198 __ movl(Address(rsi, 4), rbx); 199 __ movl(Address(rsi, 8), rcx); 200 __ movl(Address(rsi,12), rdx); 201 202 // 203 // cpuid(0x4) Deterministic cache params 204 // 205 __ bind(std_cpuid4); 206 __ movl(rax, 4); 207 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? 208 __ jccb(Assembler::greater, std_cpuid1); 209 210 __ xorl(rcx, rcx); // L1 cache 211 __ cpuid(); 212 __ push(rax); 213 __ andl(rax, 0x1f); // Determine if valid cache parameters used 214 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache 215 __ pop(rax); 216 __ jccb(Assembler::equal, std_cpuid1); 217 218 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); 219 __ movl(Address(rsi, 0), rax); 220 __ movl(Address(rsi, 4), rbx); 221 __ movl(Address(rsi, 8), rcx); 222 __ movl(Address(rsi,12), rdx); 223 224 // 225 // Standard cpuid(0x1) 226 // 227 __ bind(std_cpuid1); 228 __ movl(rax, 1); 229 __ cpuid(); 230 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 231 __ movl(Address(rsi, 0), rax); 232 __ movl(Address(rsi, 4), rbx); 233 __ movl(Address(rsi, 8), rcx); 234 __ movl(Address(rsi,12), rdx); 235 236 // 237 // Check if OS has enabled XGETBV instruction to access XCR0 238 // (OSXSAVE feature flag) and CPU supports AVX 239 // 240 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 241 __ cmpl(rcx, 0x18000000); 242 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported 243 244 // 245 // XCR0, XFEATURE_ENABLED_MASK register 246 // 247 __ xorl(rcx, rcx); // zero for XCR0 register 248 __ xgetbv(); 249 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); 250 __ movl(Address(rsi, 0), rax); 251 __ movl(Address(rsi, 4), rdx); 252 253 // 254 // cpuid(0x7) Structured Extended Features 255 // 256 __ bind(sef_cpuid); 257 __ movl(rax, 7); 258 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported? 259 __ jccb(Assembler::greater, ext_cpuid); 260 261 __ xorl(rcx, rcx); 262 __ cpuid(); 263 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 264 __ movl(Address(rsi, 0), rax); 265 __ movl(Address(rsi, 4), rbx); 266 __ movl(Address(rsi, 8), rcx); 267 __ movl(Address(rsi, 12), rdx); 268 269 // 270 // Extended cpuid(0x80000000) 271 // 272 __ bind(ext_cpuid); 273 __ movl(rax, 0x80000000); 274 __ cpuid(); 275 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? 276 __ jcc(Assembler::belowEqual, done); 277 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? 278 __ jcc(Assembler::belowEqual, ext_cpuid1); 279 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? 280 __ jccb(Assembler::belowEqual, ext_cpuid5); 281 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? 282 __ jccb(Assembler::belowEqual, ext_cpuid7); 283 __ cmpl(rax, 0x80000008); // Is cpuid(0x80000009 and above) supported? 284 __ jccb(Assembler::belowEqual, ext_cpuid8); 285 __ cmpl(rax, 0x8000001E); // Is cpuid(0x8000001E) supported? 286 __ jccb(Assembler::below, ext_cpuid8); 287 // 288 // Extended cpuid(0x8000001E) 289 // 290 __ movl(rax, 0x8000001E); 291 __ cpuid(); 292 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1E_offset()))); 293 __ movl(Address(rsi, 0), rax); 294 __ movl(Address(rsi, 4), rbx); 295 __ movl(Address(rsi, 8), rcx); 296 __ movl(Address(rsi,12), rdx); 297 298 // 299 // Extended cpuid(0x80000008) 300 // 301 __ bind(ext_cpuid8); 302 __ movl(rax, 0x80000008); 303 __ cpuid(); 304 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); 305 __ movl(Address(rsi, 0), rax); 306 __ movl(Address(rsi, 4), rbx); 307 __ movl(Address(rsi, 8), rcx); 308 __ movl(Address(rsi,12), rdx); 309 310 // 311 // Extended cpuid(0x80000007) 312 // 313 __ bind(ext_cpuid7); 314 __ movl(rax, 0x80000007); 315 __ cpuid(); 316 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset()))); 317 __ movl(Address(rsi, 0), rax); 318 __ movl(Address(rsi, 4), rbx); 319 __ movl(Address(rsi, 8), rcx); 320 __ movl(Address(rsi,12), rdx); 321 322 // 323 // Extended cpuid(0x80000005) 324 // 325 __ bind(ext_cpuid5); 326 __ movl(rax, 0x80000005); 327 __ cpuid(); 328 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); 329 __ movl(Address(rsi, 0), rax); 330 __ movl(Address(rsi, 4), rbx); 331 __ movl(Address(rsi, 8), rcx); 332 __ movl(Address(rsi,12), rdx); 333 334 // 335 // Extended cpuid(0x80000001) 336 // 337 __ bind(ext_cpuid1); 338 __ movl(rax, 0x80000001); 339 __ cpuid(); 340 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); 341 __ movl(Address(rsi, 0), rax); 342 __ movl(Address(rsi, 4), rbx); 343 __ movl(Address(rsi, 8), rcx); 344 __ movl(Address(rsi,12), rdx); 345 346 // 347 // Check if OS has enabled XGETBV instruction to access XCR0 348 // (OSXSAVE feature flag) and CPU supports AVX 349 // 350 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 351 __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 352 __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx 353 __ cmpl(rcx, 0x18000000); 354 __ jccb(Assembler::notEqual, done); // jump if AVX is not supported 355 356 __ movl(rax, 0x6); 357 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 358 __ cmpl(rax, 0x6); 359 __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported 360 361 // we need to bridge farther than imm8, so we use this island as a thunk 362 __ bind(done); 363 __ jmp(wrapup); 364 365 __ bind(start_simd_check); 366 // 367 // Some OSs have a bug when upper 128/256bits of YMM/ZMM 368 // registers are not restored after a signal processing. 369 // Generate SEGV here (reference through NULL) 370 // and check upper YMM/ZMM bits after it. 371 // 372 intx saved_useavx = UseAVX; 373 intx saved_usesse = UseSSE; 374 375 // If UseAVX is unitialized or is set by the user to include EVEX 376 if (use_evex) { 377 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 378 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 379 __ movl(rax, 0x10000); 380 __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm 381 __ cmpl(rax, 0x10000); 382 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 383 // check _cpuid_info.xem_xcr0_eax.bits.opmask 384 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 385 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 386 __ movl(rax, 0xE0); 387 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 388 __ cmpl(rax, 0xE0); 389 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 390 391 if (FLAG_IS_DEFAULT(UseAVX)) { 392 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 393 __ movl(rax, Address(rsi, 0)); 394 __ cmpl(rax, 0x50654); // If it is Skylake 395 __ jcc(Assembler::equal, legacy_setup); 396 } 397 // EVEX setup: run in lowest evex mode 398 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 399 UseAVX = 3; 400 UseSSE = 2; 401 #ifdef _WINDOWS 402 // xmm5-xmm15 are not preserved by caller on windows 403 // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx 404 __ subptr(rsp, 64); 405 __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit); 406 #ifdef _LP64 407 __ subptr(rsp, 64); 408 __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit); 409 __ subptr(rsp, 64); 410 __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit); 411 #endif // _LP64 412 #endif // _WINDOWS 413 414 // load value into all 64 bytes of zmm7 register 415 __ movl(rcx, VM_Version::ymm_test_value()); 416 __ movdl(xmm0, rcx); 417 __ vpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit); 418 __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit); 419 #ifdef _LP64 420 __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit); 421 __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit); 422 #endif 423 VM_Version::clean_cpuFeatures(); 424 __ jmp(save_restore_except); 425 } 426 427 __ bind(legacy_setup); 428 // AVX setup 429 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 430 UseAVX = 1; 431 UseSSE = 2; 432 #ifdef _WINDOWS 433 __ subptr(rsp, 32); 434 __ vmovdqu(Address(rsp, 0), xmm7); 435 #ifdef _LP64 436 __ subptr(rsp, 32); 437 __ vmovdqu(Address(rsp, 0), xmm8); 438 __ subptr(rsp, 32); 439 __ vmovdqu(Address(rsp, 0), xmm15); 440 #endif // _LP64 441 #endif // _WINDOWS 442 443 // load value into all 32 bytes of ymm7 register 444 __ movl(rcx, VM_Version::ymm_test_value()); 445 446 __ movdl(xmm0, rcx); 447 __ pshufd(xmm0, xmm0, 0x00); 448 __ vinsertf128_high(xmm0, xmm0); 449 __ vmovdqu(xmm7, xmm0); 450 #ifdef _LP64 451 __ vmovdqu(xmm8, xmm0); 452 __ vmovdqu(xmm15, xmm0); 453 #endif 454 VM_Version::clean_cpuFeatures(); 455 456 __ bind(save_restore_except); 457 __ xorl(rsi, rsi); 458 VM_Version::set_cpuinfo_segv_addr(__ pc()); 459 // Generate SEGV 460 __ movl(rax, Address(rsi, 0)); 461 462 VM_Version::set_cpuinfo_cont_addr(__ pc()); 463 // Returns here after signal. Save xmm0 to check it later. 464 465 // If UseAVX is unitialized or is set by the user to include EVEX 466 if (use_evex) { 467 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 468 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 469 __ movl(rax, 0x10000); 470 __ andl(rax, Address(rsi, 4)); 471 __ cmpl(rax, 0x10000); 472 __ jcc(Assembler::notEqual, legacy_save_restore); 473 // check _cpuid_info.xem_xcr0_eax.bits.opmask 474 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 475 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 476 __ movl(rax, 0xE0); 477 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 478 __ cmpl(rax, 0xE0); 479 __ jcc(Assembler::notEqual, legacy_save_restore); 480 481 if (FLAG_IS_DEFAULT(UseAVX)) { 482 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 483 __ movl(rax, Address(rsi, 0)); 484 __ cmpl(rax, 0x50654); // If it is Skylake 485 __ jcc(Assembler::equal, legacy_save_restore); 486 } 487 // EVEX check: run in lowest evex mode 488 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 489 UseAVX = 3; 490 UseSSE = 2; 491 __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset()))); 492 __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit); 493 __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit); 494 #ifdef _LP64 495 __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit); 496 __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit); 497 #endif 498 499 #ifdef _WINDOWS 500 #ifdef _LP64 501 __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit); 502 __ addptr(rsp, 64); 503 __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit); 504 __ addptr(rsp, 64); 505 #endif // _LP64 506 __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit); 507 __ addptr(rsp, 64); 508 #endif // _WINDOWS 509 generate_vzeroupper(wrapup); 510 VM_Version::clean_cpuFeatures(); 511 UseAVX = saved_useavx; 512 UseSSE = saved_usesse; 513 __ jmp(wrapup); 514 } 515 516 __ bind(legacy_save_restore); 517 // AVX check 518 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 519 UseAVX = 1; 520 UseSSE = 2; 521 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); 522 __ vmovdqu(Address(rsi, 0), xmm0); 523 __ vmovdqu(Address(rsi, 32), xmm7); 524 #ifdef _LP64 525 __ vmovdqu(Address(rsi, 64), xmm8); 526 __ vmovdqu(Address(rsi, 96), xmm15); 527 #endif 528 529 #ifdef _WINDOWS 530 #ifdef _LP64 531 __ vmovdqu(xmm15, Address(rsp, 0)); 532 __ addptr(rsp, 32); 533 __ vmovdqu(xmm8, Address(rsp, 0)); 534 __ addptr(rsp, 32); 535 #endif // _LP64 536 __ vmovdqu(xmm7, Address(rsp, 0)); 537 __ addptr(rsp, 32); 538 #endif // _WINDOWS 539 generate_vzeroupper(wrapup); 540 VM_Version::clean_cpuFeatures(); 541 UseAVX = saved_useavx; 542 UseSSE = saved_usesse; 543 544 __ bind(wrapup); 545 __ popf(); 546 __ pop(rsi); 547 __ pop(rbx); 548 __ pop(rbp); 549 __ ret(0); 550 551 # undef __ 552 553 return start; 554 }; 555 void generate_vzeroupper(Label& L_wrapup) { 556 # define __ _masm-> 557 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 558 __ cmpl(Address(rsi, 4), 0x756e6547); // 'uneG' 559 __ jcc(Assembler::notEqual, L_wrapup); 560 __ movl(rcx, 0x0FFF0FF0); 561 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 562 __ andl(rcx, Address(rsi, 0)); 563 __ cmpl(rcx, 0x00050670); // If it is Xeon Phi 3200/5200/7200 564 __ jcc(Assembler::equal, L_wrapup); 565 __ cmpl(rcx, 0x00080650); // If it is Future Xeon Phi 566 __ jcc(Assembler::equal, L_wrapup); 567 // vzeroupper() will use a pre-computed instruction sequence that we 568 // can't compute until after we've determined CPU capabilities. Use 569 // uncached variant here directly to be able to bootstrap correctly 570 __ vzeroupper_uncached(); 571 # undef __ 572 } 573 address generate_detect_virt() { 574 StubCodeMark mark(this, "VM_Version", "detect_virt_stub"); 575 # define __ _masm-> 576 577 address start = __ pc(); 578 579 // Evacuate callee-saved registers 580 __ push(rbp); 581 __ push(rbx); 582 __ push(rsi); // for Windows 583 #ifdef _LP64 584 __ mov(rax, c_rarg0); // CPUID leaf 585 __ mov(rsi, c_rarg1); // register array address (eax, ebx, ecx, edx) 586 #else 587 __ movptr(rax, Address(rsp, 16)); // CPUID leaf 588 __ movptr(rsi, Address(rsp, 20)); // register array address 589 #endif 590 591 __ cpuid(); 592 593 // Store result to register array 594 __ movl(Address(rsi, 0), rax); 595 __ movl(Address(rsi, 4), rbx); 596 __ movl(Address(rsi, 8), rcx); 597 __ movl(Address(rsi, 12), rdx); 598 599 // Epilogue 600 __ pop(rsi); 601 __ pop(rbx); 602 __ pop(rbp); 603 __ ret(0); 604 605 # undef __ 606 607 return start; 608 }; 609 }; 610 611 void VM_Version::get_processor_features() { 612 613 _cpu = 4; // 486 by default 614 _model = 0; 615 _stepping = 0; 616 _features = 0; 617 _logical_processors_per_package = 1; 618 // i486 internal cache is both I&D and has a 16-byte line size 619 _L1_data_cache_line_size = 16; 620 621 // Get raw processor info 622 623 get_cpu_info_stub(&_cpuid_info); 624 625 assert_is_initialized(); 626 _cpu = extended_cpu_family(); 627 _model = extended_cpu_model(); 628 _stepping = cpu_stepping(); 629 630 if (cpu_family() > 4) { // it supports CPUID 631 _features = feature_flags(); 632 // Logical processors are only available on P4s and above, 633 // and only if hyperthreading is available. 634 _logical_processors_per_package = logical_processor_count(); 635 _L1_data_cache_line_size = L1_line_size(); 636 } 637 638 _supports_cx8 = supports_cmpxchg8(); 639 // xchg and xadd instructions 640 _supports_atomic_getset4 = true; 641 _supports_atomic_getadd4 = true; 642 LP64_ONLY(_supports_atomic_getset8 = true); 643 LP64_ONLY(_supports_atomic_getadd8 = true); 644 645 #ifdef _LP64 646 // OS should support SSE for x64 and hardware should support at least SSE2. 647 if (!VM_Version::supports_sse2()) { 648 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); 649 } 650 // in 64 bit the use of SSE2 is the minimum 651 if (UseSSE < 2) UseSSE = 2; 652 #endif 653 654 #ifdef AMD64 655 // flush_icache_stub have to be generated first. 656 // That is why Icache line size is hard coded in ICache class, 657 // see icache_x86.hpp. It is also the reason why we can't use 658 // clflush instruction in 32-bit VM since it could be running 659 // on CPU which does not support it. 660 // 661 // The only thing we can do is to verify that flushed 662 // ICache::line_size has correct value. 663 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); 664 // clflush_size is size in quadwords (8 bytes). 665 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); 666 #endif 667 668 #ifdef _LP64 669 // assigning this field effectively enables Unsafe.writebackMemory() 670 // by initing UnsafeConstant.DATA_CACHE_LINE_FLUSH_SIZE to non-zero 671 // that is only implemented on x86_64 and only if the OS plays ball 672 if (os::supports_map_sync()) { 673 // publish data cache line flush size to generic field, otherwise 674 // let if default to zero thereby disabling writeback 675 _data_cache_line_flush_size = _cpuid_info.std_cpuid1_ebx.bits.clflush_size * 8; 676 } 677 #endif 678 // If the OS doesn't support SSE, we can't use this feature even if the HW does 679 if (!os::supports_sse()) 680 _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); 681 682 if (UseSSE < 4) { 683 _features &= ~CPU_SSE4_1; 684 _features &= ~CPU_SSE4_2; 685 } 686 687 if (UseSSE < 3) { 688 _features &= ~CPU_SSE3; 689 _features &= ~CPU_SSSE3; 690 _features &= ~CPU_SSE4A; 691 } 692 693 if (UseSSE < 2) 694 _features &= ~CPU_SSE2; 695 696 if (UseSSE < 1) 697 _features &= ~CPU_SSE; 698 699 //since AVX instructions is slower than SSE in some ZX cpus, force USEAVX=0. 700 if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7))) { 701 UseAVX = 0; 702 } 703 704 // first try initial setting and detect what we can support 705 int use_avx_limit = 0; 706 if (UseAVX > 0) { 707 if (UseAVX > 2 && supports_evex()) { 708 use_avx_limit = 3; 709 } else if (UseAVX > 1 && supports_avx2()) { 710 use_avx_limit = 2; 711 } else if (UseAVX > 0 && supports_avx()) { 712 use_avx_limit = 1; 713 } else { 714 use_avx_limit = 0; 715 } 716 } 717 if (FLAG_IS_DEFAULT(UseAVX)) { 718 // Don't use AVX-512 on older Skylakes unless explicitly requested. 719 if (use_avx_limit > 2 && is_intel_skylake() && _stepping < 5) { 720 FLAG_SET_DEFAULT(UseAVX, 2); 721 } else { 722 FLAG_SET_DEFAULT(UseAVX, use_avx_limit); 723 } 724 } 725 if (UseAVX > use_avx_limit) { 726 warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit); 727 FLAG_SET_DEFAULT(UseAVX, use_avx_limit); 728 } else if (UseAVX < 0) { 729 warning("UseAVX=%d is not valid, setting it to UseAVX=0", (int) UseAVX); 730 FLAG_SET_DEFAULT(UseAVX, 0); 731 } 732 733 if (UseAVX < 3) { 734 _features &= ~CPU_AVX512F; 735 _features &= ~CPU_AVX512DQ; 736 _features &= ~CPU_AVX512CD; 737 _features &= ~CPU_AVX512BW; 738 _features &= ~CPU_AVX512VL; 739 _features &= ~CPU_AVX512_VPOPCNTDQ; 740 _features &= ~CPU_AVX512_VPCLMULQDQ; 741 _features &= ~CPU_AVX512_VAES; 742 _features &= ~CPU_AVX512_VNNI; 743 _features &= ~CPU_AVX512_VBMI; 744 _features &= ~CPU_AVX512_VBMI2; 745 } 746 747 if (UseAVX < 2) 748 _features &= ~CPU_AVX2; 749 750 if (UseAVX < 1) { 751 _features &= ~CPU_AVX; 752 _features &= ~CPU_VZEROUPPER; 753 } 754 755 if (logical_processors_per_package() == 1) { 756 // HT processor could be installed on a system which doesn't support HT. 757 _features &= ~CPU_HT; 758 } 759 760 if (is_intel()) { // Intel cpus specific settings 761 if (is_knights_family()) { 762 _features &= ~CPU_VZEROUPPER; 763 } 764 } 765 766 if (FLAG_IS_DEFAULT(IntelJccErratumMitigation)) { 767 _has_intel_jcc_erratum = compute_has_intel_jcc_erratum(); 768 } else { 769 _has_intel_jcc_erratum = IntelJccErratumMitigation; 770 } 771 772 char buf[512]; 773 int res = jio_snprintf(buf, sizeof(buf), 774 "(%u cores per cpu, %u threads per core) family %d model %d stepping %d microcode 0x%x" 775 "%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s", 776 777 cores_per_cpu(), threads_per_core(), 778 cpu_family(), _model, _stepping, os::cpu_microcode_revision(), 779 780 (supports_cmov() ? ", cmov" : ""), 781 (supports_cmpxchg8() ? ", cx8" : ""), 782 (supports_fxsr() ? ", fxsr" : ""), 783 (supports_mmx() ? ", mmx" : ""), 784 (supports_sse() ? ", sse" : ""), 785 (supports_sse2() ? ", sse2" : ""), 786 (supports_sse3() ? ", sse3" : ""), 787 (supports_ssse3()? ", ssse3": ""), 788 (supports_sse4_1() ? ", sse4.1" : ""), 789 (supports_sse4_2() ? ", sse4.2" : ""), 790 791 (supports_popcnt() ? ", popcnt" : ""), 792 (supports_vzeroupper() ? ", vzeroupper" : ""), 793 (supports_avx() ? ", avx" : ""), 794 (supports_avx2() ? ", avx2" : ""), 795 (supports_aes() ? ", aes" : ""), 796 (supports_clmul() ? ", clmul" : ""), 797 (supports_erms() ? ", erms" : ""), 798 (supports_rtm() ? ", rtm" : ""), 799 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), 800 (supports_lzcnt() ? ", lzcnt": ""), 801 802 (supports_sse4a() ? ", sse4a": ""), 803 (supports_ht() ? ", ht": ""), 804 (supports_tsc() ? ", tsc": ""), 805 (supports_tscinv_bit() ? ", tscinvbit": ""), 806 (supports_tscinv() ? ", tscinv": ""), 807 (supports_bmi1() ? ", bmi1" : ""), 808 (supports_bmi2() ? ", bmi2" : ""), 809 (supports_adx() ? ", adx" : ""), 810 (supports_evex() ? ", avx512f" : ""), 811 (supports_avx512dq() ? ", avx512dq" : ""), 812 813 (supports_avx512pf() ? ", avx512pf" : ""), 814 (supports_avx512er() ? ", avx512er" : ""), 815 (supports_avx512cd() ? ", avx512cd" : ""), 816 (supports_avx512bw() ? ", avx512bw" : ""), 817 (supports_avx512vl() ? ", avx512vl" : ""), 818 (supports_avx512_vpopcntdq() ? ", avx512_vpopcntdq" : ""), 819 (supports_avx512_vpclmulqdq() ? ", avx512_vpclmulqdq" : ""), 820 (supports_avx512_vbmi() ? ", avx512_vbmi" : ""), 821 (supports_avx512_vbmi2() ? ", avx512_vbmi2" : ""), 822 (supports_avx512_vaes() ? ", avx512_vaes" : ""), 823 824 (supports_avx512_vnni() ? ", avx512_vnni" : ""), 825 (supports_sha() ? ", sha" : ""), 826 (supports_fma() ? ", fma" : ""), 827 (supports_clflush() ? ", clflush" : ""), 828 (supports_clflushopt() ? ", clflushopt" : ""), 829 (supports_clwb() ? ", clwb" : "")); 830 831 assert(res > 0, "not enough temporary space allocated"); // increase 'buf' size 832 833 _features_string = os::strdup(buf); 834 835 // UseSSE is set to the smaller of what hardware supports and what 836 // the command line requires. I.e., you cannot set UseSSE to 2 on 837 // older Pentiums which do not support it. 838 int use_sse_limit = 0; 839 if (UseSSE > 0) { 840 if (UseSSE > 3 && supports_sse4_1()) { 841 use_sse_limit = 4; 842 } else if (UseSSE > 2 && supports_sse3()) { 843 use_sse_limit = 3; 844 } else if (UseSSE > 1 && supports_sse2()) { 845 use_sse_limit = 2; 846 } else if (UseSSE > 0 && supports_sse()) { 847 use_sse_limit = 1; 848 } else { 849 use_sse_limit = 0; 850 } 851 } 852 if (FLAG_IS_DEFAULT(UseSSE)) { 853 FLAG_SET_DEFAULT(UseSSE, use_sse_limit); 854 } else if (UseSSE > use_sse_limit) { 855 warning("UseSSE=%d is not supported on this CPU, setting it to UseSSE=%d", (int) UseSSE, use_sse_limit); 856 FLAG_SET_DEFAULT(UseSSE, use_sse_limit); 857 } else if (UseSSE < 0) { 858 warning("UseSSE=%d is not valid, setting it to UseSSE=0", (int) UseSSE); 859 FLAG_SET_DEFAULT(UseSSE, 0); 860 } 861 862 // Use AES instructions if available. 863 if (supports_aes()) { 864 if (FLAG_IS_DEFAULT(UseAES)) { 865 FLAG_SET_DEFAULT(UseAES, true); 866 } 867 if (!UseAES) { 868 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 869 warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); 870 } 871 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 872 } else { 873 if (UseSSE > 2) { 874 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 875 FLAG_SET_DEFAULT(UseAESIntrinsics, true); 876 } 877 } else { 878 // The AES intrinsic stubs require AES instruction support (of course) 879 // but also require sse3 mode or higher for instructions it use. 880 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 881 warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled."); 882 } 883 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 884 } 885 886 // --AES-CTR begins-- 887 if (!UseAESIntrinsics) { 888 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 889 warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled."); 890 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 891 } 892 } else { 893 if (supports_sse4_1()) { 894 if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 895 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true); 896 } 897 } else { 898 // The AES-CTR intrinsic stubs require AES instruction support (of course) 899 // but also require sse4.1 mode or higher for instructions it use. 900 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 901 warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled."); 902 } 903 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 904 } 905 } 906 // --AES-CTR ends-- 907 } 908 } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) { 909 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 910 warning("AES instructions are not available on this CPU"); 911 FLAG_SET_DEFAULT(UseAES, false); 912 } 913 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 914 warning("AES intrinsics are not available on this CPU"); 915 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 916 } 917 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 918 warning("AES-CTR intrinsics are not available on this CPU"); 919 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 920 } 921 } 922 923 // Use CLMUL instructions if available. 924 if (supports_clmul()) { 925 if (FLAG_IS_DEFAULT(UseCLMUL)) { 926 UseCLMUL = true; 927 } 928 } else if (UseCLMUL) { 929 if (!FLAG_IS_DEFAULT(UseCLMUL)) 930 warning("CLMUL instructions not available on this CPU (AVX may also be required)"); 931 FLAG_SET_DEFAULT(UseCLMUL, false); 932 } 933 934 if (UseCLMUL && (UseSSE > 2)) { 935 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 936 UseCRC32Intrinsics = true; 937 } 938 } else if (UseCRC32Intrinsics) { 939 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 940 warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)"); 941 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 942 } 943 944 if (supports_sse4_2() && supports_clmul()) { 945 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 946 UseCRC32CIntrinsics = true; 947 } 948 } else if (UseCRC32CIntrinsics) { 949 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 950 warning("CRC32C intrinsics are not available on this CPU"); 951 } 952 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 953 } 954 955 // GHASH/GCM intrinsics 956 if (UseCLMUL && (UseSSE > 2)) { 957 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 958 UseGHASHIntrinsics = true; 959 } 960 } else if (UseGHASHIntrinsics) { 961 if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) 962 warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU"); 963 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 964 } 965 966 // Base64 Intrinsics (Check the condition for which the intrinsic will be active) 967 if ((UseAVX > 2) && supports_avx512vl() && supports_avx512bw()) { 968 if (FLAG_IS_DEFAULT(UseBASE64Intrinsics)) { 969 UseBASE64Intrinsics = true; 970 } 971 } else if (UseBASE64Intrinsics) { 972 if (!FLAG_IS_DEFAULT(UseBASE64Intrinsics)) 973 warning("Base64 intrinsic requires EVEX instructions on this CPU"); 974 FLAG_SET_DEFAULT(UseBASE64Intrinsics, false); 975 } 976 977 if (supports_fma() && UseSSE >= 2) { // Check UseSSE since FMA code uses SSE instructions 978 if (FLAG_IS_DEFAULT(UseFMA)) { 979 UseFMA = true; 980 } 981 } else if (UseFMA) { 982 warning("FMA instructions are not available on this CPU"); 983 FLAG_SET_DEFAULT(UseFMA, false); 984 } 985 986 if (FLAG_IS_DEFAULT(UseMD5Intrinsics)) { 987 UseMD5Intrinsics = true; 988 } 989 990 if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) { 991 if (FLAG_IS_DEFAULT(UseSHA)) { 992 UseSHA = true; 993 } 994 } else if (UseSHA) { 995 warning("SHA instructions are not available on this CPU"); 996 FLAG_SET_DEFAULT(UseSHA, false); 997 } 998 999 if (supports_sha() && supports_sse4_1() && UseSHA) { 1000 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { 1001 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); 1002 } 1003 } else if (UseSHA1Intrinsics) { 1004 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 1005 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 1006 } 1007 1008 if (supports_sse4_1() && UseSHA) { 1009 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { 1010 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); 1011 } 1012 } else if (UseSHA256Intrinsics) { 1013 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 1014 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 1015 } 1016 1017 #ifdef _LP64 1018 // These are only supported on 64-bit 1019 if (UseSHA && supports_avx2() && supports_bmi2()) { 1020 if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) { 1021 FLAG_SET_DEFAULT(UseSHA512Intrinsics, true); 1022 } 1023 } else 1024 #endif 1025 if (UseSHA512Intrinsics) { 1026 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 1027 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 1028 } 1029 1030 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { 1031 FLAG_SET_DEFAULT(UseSHA, false); 1032 } 1033 1034 if (UseAdler32Intrinsics) { 1035 warning("Adler32Intrinsics not available on this CPU."); 1036 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 1037 } 1038 1039 if (!supports_rtm() && UseRTMLocking) { 1040 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 1041 // setting during arguments processing. See use_biased_locking(). 1042 // VM_Version_init() is executed after UseBiasedLocking is used 1043 // in Thread::allocate(). 1044 vm_exit_during_initialization("RTM instructions are not available on this CPU"); 1045 } 1046 1047 #if INCLUDE_RTM_OPT 1048 if (UseRTMLocking) { 1049 if (is_client_compilation_mode_vm()) { 1050 // Only C2 does RTM locking optimization. 1051 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 1052 // setting during arguments processing. See use_biased_locking(). 1053 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 1054 } 1055 if (is_intel_family_core()) { 1056 if ((_model == CPU_MODEL_HASWELL_E3) || 1057 (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) || 1058 (_model == CPU_MODEL_BROADWELL && _stepping < 4)) { 1059 // currently a collision between SKL and HSW_E3 1060 if (!UnlockExperimentalVMOptions && UseAVX < 3) { 1061 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this " 1062 "platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); 1063 } else { 1064 warning("UseRTMLocking is only available as experimental option on this platform."); 1065 } 1066 } 1067 } 1068 if (!FLAG_IS_CMDLINE(UseRTMLocking)) { 1069 // RTM locking should be used only for applications with 1070 // high lock contention. For now we do not use it by default. 1071 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); 1072 } 1073 } else { // !UseRTMLocking 1074 if (UseRTMForStackLocks) { 1075 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { 1076 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); 1077 } 1078 FLAG_SET_DEFAULT(UseRTMForStackLocks, false); 1079 } 1080 if (UseRTMDeopt) { 1081 FLAG_SET_DEFAULT(UseRTMDeopt, false); 1082 } 1083 if (PrintPreciseRTMLockingStatistics) { 1084 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); 1085 } 1086 } 1087 #else 1088 if (UseRTMLocking) { 1089 // Only C2 does RTM locking optimization. 1090 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 1091 // setting during arguments processing. See use_biased_locking(). 1092 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 1093 } 1094 #endif 1095 1096 #ifdef COMPILER2 1097 if (UseFPUForSpilling) { 1098 if (UseSSE < 2) { 1099 // Only supported with SSE2+ 1100 FLAG_SET_DEFAULT(UseFPUForSpilling, false); 1101 } 1102 } 1103 #endif 1104 1105 #if COMPILER2_OR_JVMCI 1106 int max_vector_size = 0; 1107 if (UseSSE < 2) { 1108 // Vectors (in XMM) are only supported with SSE2+ 1109 // SSE is always 2 on x64. 1110 max_vector_size = 0; 1111 } else if (UseAVX == 0 || !os_supports_avx_vectors()) { 1112 // 16 byte vectors (in XMM) are supported with SSE2+ 1113 max_vector_size = 16; 1114 } else if (UseAVX == 1 || UseAVX == 2) { 1115 // 32 bytes vectors (in YMM) are only supported with AVX+ 1116 max_vector_size = 32; 1117 } else if (UseAVX > 2) { 1118 // 64 bytes vectors (in ZMM) are only supported with AVX 3 1119 max_vector_size = 64; 1120 } 1121 1122 #ifdef _LP64 1123 int min_vector_size = 4; // We require MaxVectorSize to be at least 4 on 64bit 1124 #else 1125 int min_vector_size = 0; 1126 #endif 1127 1128 if (!FLAG_IS_DEFAULT(MaxVectorSize)) { 1129 if (MaxVectorSize < min_vector_size) { 1130 warning("MaxVectorSize must be at least %i on this platform", min_vector_size); 1131 FLAG_SET_DEFAULT(MaxVectorSize, min_vector_size); 1132 } 1133 if (MaxVectorSize > max_vector_size) { 1134 warning("MaxVectorSize must be at most %i on this platform", max_vector_size); 1135 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size); 1136 } 1137 if (!is_power_of_2(MaxVectorSize)) { 1138 warning("MaxVectorSize must be a power of 2, setting to default: %i", max_vector_size); 1139 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size); 1140 } 1141 } else { 1142 // If default, use highest supported configuration 1143 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size); 1144 } 1145 1146 #if defined(COMPILER2) && defined(ASSERT) 1147 if (MaxVectorSize > 0) { 1148 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { 1149 tty->print_cr("State of YMM registers after signal handle:"); 1150 int nreg = 2 LP64_ONLY(+2); 1151 const char* ymm_name[4] = {"0", "7", "8", "15"}; 1152 for (int i = 0; i < nreg; i++) { 1153 tty->print("YMM%s:", ymm_name[i]); 1154 for (int j = 7; j >=0; j--) { 1155 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); 1156 } 1157 tty->cr(); 1158 } 1159 } 1160 } 1161 #endif // COMPILER2 && ASSERT 1162 1163 if (!FLAG_IS_DEFAULT(AVX3Threshold)) { 1164 if (!is_power_of_2(AVX3Threshold)) { 1165 warning("AVX3Threshold must be a power of 2"); 1166 FLAG_SET_DEFAULT(AVX3Threshold, 4096); 1167 } 1168 } 1169 1170 #ifdef _LP64 1171 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 1172 UseMultiplyToLenIntrinsic = true; 1173 } 1174 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 1175 UseSquareToLenIntrinsic = true; 1176 } 1177 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 1178 UseMulAddIntrinsic = true; 1179 } 1180 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 1181 UseMontgomeryMultiplyIntrinsic = true; 1182 } 1183 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 1184 UseMontgomerySquareIntrinsic = true; 1185 } 1186 #else 1187 if (UseMultiplyToLenIntrinsic) { 1188 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 1189 warning("multiplyToLen intrinsic is not available in 32-bit VM"); 1190 } 1191 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); 1192 } 1193 if (UseMontgomeryMultiplyIntrinsic) { 1194 if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 1195 warning("montgomeryMultiply intrinsic is not available in 32-bit VM"); 1196 } 1197 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false); 1198 } 1199 if (UseMontgomerySquareIntrinsic) { 1200 if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 1201 warning("montgomerySquare intrinsic is not available in 32-bit VM"); 1202 } 1203 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false); 1204 } 1205 if (UseSquareToLenIntrinsic) { 1206 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 1207 warning("squareToLen intrinsic is not available in 32-bit VM"); 1208 } 1209 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false); 1210 } 1211 if (UseMulAddIntrinsic) { 1212 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 1213 warning("mulAdd intrinsic is not available in 32-bit VM"); 1214 } 1215 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false); 1216 } 1217 #endif // _LP64 1218 #endif // COMPILER2_OR_JVMCI 1219 1220 // On new cpus instructions which update whole XMM register should be used 1221 // to prevent partial register stall due to dependencies on high half. 1222 // 1223 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) 1224 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) 1225 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). 1226 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). 1227 1228 1229 if (is_zx()) { // ZX cpus specific settings 1230 if (FLAG_IS_DEFAULT(UseStoreImmI16)) { 1231 UseStoreImmI16 = false; // don't use it on ZX cpus 1232 } 1233 if ((cpu_family() == 6) || (cpu_family() == 7)) { 1234 if (FLAG_IS_DEFAULT(UseAddressNop)) { 1235 // Use it on all ZX cpus 1236 UseAddressNop = true; 1237 } 1238 } 1239 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) { 1240 UseXmmLoadAndClearUpper = true; // use movsd on all ZX cpus 1241 } 1242 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) { 1243 if (supports_sse3()) { 1244 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new ZX cpus 1245 } else { 1246 UseXmmRegToRegMoveAll = false; 1247 } 1248 } 1249 if (((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse3()) { // new ZX cpus 1250 #ifdef COMPILER2 1251 if (FLAG_IS_DEFAULT(MaxLoopPad)) { 1252 // For new ZX cpus do the next optimization: 1253 // don't align the beginning of a loop if there are enough instructions 1254 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 1255 // in current fetch line (OptoLoopAlignment) or the padding 1256 // is big (> MaxLoopPad). 1257 // Set MaxLoopPad to 11 for new ZX cpus to reduce number of 1258 // generated NOP instructions. 11 is the largest size of one 1259 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 1260 MaxLoopPad = 11; 1261 } 1262 #endif // COMPILER2 1263 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1264 UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus 1265 } 1266 if (supports_sse4_2()) { // new ZX cpus 1267 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1268 UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus 1269 } 1270 } 1271 if (supports_sse4_2()) { 1272 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1273 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1274 } 1275 } else { 1276 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1277 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1278 } 1279 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1280 } 1281 } 1282 1283 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 1284 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1285 } 1286 } 1287 1288 if (is_amd_family()) { // AMD cpus specific settings 1289 if (supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop)) { 1290 // Use it on new AMD cpus starting from Opteron. 1291 UseAddressNop = true; 1292 } 1293 if (supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift)) { 1294 // Use it on new AMD cpus starting from Opteron. 1295 UseNewLongLShift = true; 1296 } 1297 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) { 1298 if (supports_sse4a()) { 1299 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron 1300 } else { 1301 UseXmmLoadAndClearUpper = false; 1302 } 1303 } 1304 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) { 1305 if (supports_sse4a()) { 1306 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' 1307 } else { 1308 UseXmmRegToRegMoveAll = false; 1309 } 1310 } 1311 if (FLAG_IS_DEFAULT(UseXmmI2F)) { 1312 if (supports_sse4a()) { 1313 UseXmmI2F = true; 1314 } else { 1315 UseXmmI2F = false; 1316 } 1317 } 1318 if (FLAG_IS_DEFAULT(UseXmmI2D)) { 1319 if (supports_sse4a()) { 1320 UseXmmI2D = true; 1321 } else { 1322 UseXmmI2D = false; 1323 } 1324 } 1325 if (supports_sse4_2()) { 1326 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1327 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1328 } 1329 } else { 1330 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1331 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1332 } 1333 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1334 } 1335 1336 // some defaults for AMD family 15h 1337 if (cpu_family() == 0x15) { 1338 // On family 15h processors default is no sw prefetch 1339 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 1340 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0); 1341 } 1342 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW 1343 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 1344 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1345 } 1346 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy 1347 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1348 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true); 1349 } 1350 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1351 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true); 1352 } 1353 } 1354 1355 #ifdef COMPILER2 1356 if (cpu_family() < 0x17 && MaxVectorSize > 16) { 1357 // Limit vectors size to 16 bytes on AMD cpus < 17h. 1358 FLAG_SET_DEFAULT(MaxVectorSize, 16); 1359 } 1360 #endif // COMPILER2 1361 1362 // Some defaults for AMD family 17h || Hygon family 18h 1363 if (cpu_family() == 0x17 || cpu_family() == 0x18) { 1364 // On family 17h processors use XMM and UnalignedLoadStores for Array Copy 1365 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1366 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true); 1367 } 1368 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1369 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true); 1370 } 1371 #ifdef COMPILER2 1372 if (supports_sse4_2() && FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1373 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1374 } 1375 #endif 1376 } 1377 } 1378 1379 if (is_intel()) { // Intel cpus specific settings 1380 if (FLAG_IS_DEFAULT(UseStoreImmI16)) { 1381 UseStoreImmI16 = false; // don't use it on Intel cpus 1382 } 1383 if (cpu_family() == 6 || cpu_family() == 15) { 1384 if (FLAG_IS_DEFAULT(UseAddressNop)) { 1385 // Use it on all Intel cpus starting from PentiumPro 1386 UseAddressNop = true; 1387 } 1388 } 1389 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) { 1390 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus 1391 } 1392 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) { 1393 if (supports_sse3()) { 1394 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus 1395 } else { 1396 UseXmmRegToRegMoveAll = false; 1397 } 1398 } 1399 if (cpu_family() == 6 && supports_sse3()) { // New Intel cpus 1400 #ifdef COMPILER2 1401 if (FLAG_IS_DEFAULT(MaxLoopPad)) { 1402 // For new Intel cpus do the next optimization: 1403 // don't align the beginning of a loop if there are enough instructions 1404 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 1405 // in current fetch line (OptoLoopAlignment) or the padding 1406 // is big (> MaxLoopPad). 1407 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of 1408 // generated NOP instructions. 11 is the largest size of one 1409 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 1410 MaxLoopPad = 11; 1411 } 1412 #endif // COMPILER2 1413 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1414 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus 1415 } 1416 if ((supports_sse4_2() && supports_ht()) || supports_avx()) { // Newest Intel cpus 1417 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1418 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1419 } 1420 } 1421 if (supports_sse4_2()) { 1422 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1423 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1424 } 1425 } else { 1426 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1427 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1428 } 1429 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1430 } 1431 } 1432 if (is_atom_family() || is_knights_family()) { 1433 #ifdef COMPILER2 1434 if (FLAG_IS_DEFAULT(OptoScheduling)) { 1435 OptoScheduling = true; 1436 } 1437 #endif 1438 if (supports_sse4_2()) { // Silvermont 1439 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1440 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1441 } 1442 } 1443 if (FLAG_IS_DEFAULT(UseIncDec)) { 1444 FLAG_SET_DEFAULT(UseIncDec, false); 1445 } 1446 } 1447 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 1448 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1449 } 1450 } 1451 1452 #ifdef _LP64 1453 if (UseSSE42Intrinsics) { 1454 if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1455 UseVectorizedMismatchIntrinsic = true; 1456 } 1457 } else if (UseVectorizedMismatchIntrinsic) { 1458 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) 1459 warning("vectorizedMismatch intrinsics are not available on this CPU"); 1460 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1461 } 1462 #else 1463 if (UseVectorizedMismatchIntrinsic) { 1464 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1465 warning("vectorizedMismatch intrinsic is not available in 32-bit VM"); 1466 } 1467 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1468 } 1469 #endif // _LP64 1470 1471 // Use count leading zeros count instruction if available. 1472 if (supports_lzcnt()) { 1473 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { 1474 UseCountLeadingZerosInstruction = true; 1475 } 1476 } else if (UseCountLeadingZerosInstruction) { 1477 warning("lzcnt instruction is not available on this CPU"); 1478 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); 1479 } 1480 1481 // Use count trailing zeros instruction if available 1482 if (supports_bmi1()) { 1483 // tzcnt does not require VEX prefix 1484 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { 1485 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1486 // Don't use tzcnt if BMI1 is switched off on command line. 1487 UseCountTrailingZerosInstruction = false; 1488 } else { 1489 UseCountTrailingZerosInstruction = true; 1490 } 1491 } 1492 } else if (UseCountTrailingZerosInstruction) { 1493 warning("tzcnt instruction is not available on this CPU"); 1494 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); 1495 } 1496 1497 // BMI instructions (except tzcnt) use an encoding with VEX prefix. 1498 // VEX prefix is generated only when AVX > 0. 1499 if (supports_bmi1() && supports_avx()) { 1500 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1501 UseBMI1Instructions = true; 1502 } 1503 } else if (UseBMI1Instructions) { 1504 warning("BMI1 instructions are not available on this CPU (AVX is also required)"); 1505 FLAG_SET_DEFAULT(UseBMI1Instructions, false); 1506 } 1507 1508 if (supports_bmi2() && supports_avx()) { 1509 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) { 1510 UseBMI2Instructions = true; 1511 } 1512 } else if (UseBMI2Instructions) { 1513 warning("BMI2 instructions are not available on this CPU (AVX is also required)"); 1514 FLAG_SET_DEFAULT(UseBMI2Instructions, false); 1515 } 1516 1517 // Use population count instruction if available. 1518 if (supports_popcnt()) { 1519 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 1520 UsePopCountInstruction = true; 1521 } 1522 } else if (UsePopCountInstruction) { 1523 warning("POPCNT instruction is not available on this CPU"); 1524 FLAG_SET_DEFAULT(UsePopCountInstruction, false); 1525 } 1526 1527 // Use fast-string operations if available. 1528 if (supports_erms()) { 1529 if (FLAG_IS_DEFAULT(UseFastStosb)) { 1530 UseFastStosb = true; 1531 } 1532 } else if (UseFastStosb) { 1533 warning("fast-string operations are not available on this CPU"); 1534 FLAG_SET_DEFAULT(UseFastStosb, false); 1535 } 1536 1537 // Use XMM/YMM MOVDQU instruction for Object Initialization 1538 if (!UseFastStosb && UseSSE >= 2 && UseUnalignedLoadStores) { 1539 if (FLAG_IS_DEFAULT(UseXMMForObjInit)) { 1540 UseXMMForObjInit = true; 1541 } 1542 } else if (UseXMMForObjInit) { 1543 warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off."); 1544 FLAG_SET_DEFAULT(UseXMMForObjInit, false); 1545 } 1546 1547 #ifdef COMPILER2 1548 if (FLAG_IS_DEFAULT(AlignVector)) { 1549 // Modern processors allow misaligned memory operations for vectors. 1550 AlignVector = !UseUnalignedLoadStores; 1551 } 1552 if (FLAG_IS_DEFAULT(OptimizeFill)) { 1553 // 8247307: On x86, the auto-vectorized loop array fill code shows 1554 // better performance than the array fill stubs. We should reenable 1555 // this after the x86 stubs get improved. 1556 OptimizeFill = false; 1557 } 1558 #endif // COMPILER2 1559 1560 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 1561 if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) { 1562 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0); 1563 } else if (!supports_sse() && supports_3dnow_prefetch()) { 1564 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1565 } 1566 } 1567 1568 // Allocation prefetch settings 1569 intx cache_line_size = prefetch_data_size(); 1570 if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize) && 1571 (cache_line_size > AllocatePrefetchStepSize)) { 1572 FLAG_SET_DEFAULT(AllocatePrefetchStepSize, cache_line_size); 1573 } 1574 1575 if ((AllocatePrefetchDistance == 0) && (AllocatePrefetchStyle != 0)) { 1576 assert(!FLAG_IS_DEFAULT(AllocatePrefetchDistance), "default value should not be 0"); 1577 if (!FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 1578 warning("AllocatePrefetchDistance is set to 0 which disable prefetching. Ignoring AllocatePrefetchStyle flag."); 1579 } 1580 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0); 1581 } 1582 1583 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { 1584 bool use_watermark_prefetch = (AllocatePrefetchStyle == 2); 1585 FLAG_SET_DEFAULT(AllocatePrefetchDistance, allocate_prefetch_distance(use_watermark_prefetch)); 1586 } 1587 1588 if (is_intel() && cpu_family() == 6 && supports_sse3()) { 1589 if (FLAG_IS_DEFAULT(AllocatePrefetchLines) && 1590 supports_sse4_2() && supports_ht()) { // Nehalem based cpus 1591 FLAG_SET_DEFAULT(AllocatePrefetchLines, 4); 1592 } 1593 #ifdef COMPILER2 1594 if (FLAG_IS_DEFAULT(UseFPUForSpilling) && supports_sse4_2()) { 1595 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1596 } 1597 #endif 1598 } 1599 1600 if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse4_2()) { 1601 #ifdef COMPILER2 1602 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1603 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1604 } 1605 #endif 1606 } 1607 1608 #ifdef _LP64 1609 // Prefetch settings 1610 1611 // Prefetch interval for gc copy/scan == 9 dcache lines. Derived from 1612 // 50-warehouse specjbb runs on a 2-way 1.8ghz opteron using a 4gb heap. 1613 // Tested intervals from 128 to 2048 in increments of 64 == one cache line. 1614 // 256 bytes (4 dcache lines) was the nearest runner-up to 576. 1615 1616 // gc copy/scan is disabled if prefetchw isn't supported, because 1617 // Prefetch::write emits an inlined prefetchw on Linux. 1618 // Do not use the 3dnow prefetchw instruction. It isn't supported on em64t. 1619 // The used prefetcht0 instruction works for both amd64 and em64t. 1620 1621 if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes)) { 1622 FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 576); 1623 } 1624 if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes)) { 1625 FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 576); 1626 } 1627 if (FLAG_IS_DEFAULT(PrefetchFieldsAhead)) { 1628 FLAG_SET_DEFAULT(PrefetchFieldsAhead, 1); 1629 } 1630 #endif 1631 1632 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && 1633 (cache_line_size > ContendedPaddingWidth)) 1634 ContendedPaddingWidth = cache_line_size; 1635 1636 // This machine allows unaligned memory accesses 1637 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 1638 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 1639 } 1640 1641 #ifndef PRODUCT 1642 if (log_is_enabled(Info, os, cpu)) { 1643 LogStream ls(Log(os, cpu)::info()); 1644 outputStream* log = &ls; 1645 log->print_cr("Logical CPUs per core: %u", 1646 logical_processors_per_package()); 1647 log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); 1648 log->print("UseSSE=%d", (int) UseSSE); 1649 if (UseAVX > 0) { 1650 log->print(" UseAVX=%d", (int) UseAVX); 1651 } 1652 if (UseAES) { 1653 log->print(" UseAES=1"); 1654 } 1655 #ifdef COMPILER2 1656 if (MaxVectorSize > 0) { 1657 log->print(" MaxVectorSize=%d", (int) MaxVectorSize); 1658 } 1659 #endif 1660 log->cr(); 1661 log->print("Allocation"); 1662 if (AllocatePrefetchStyle <= 0 || (UseSSE == 0 && !supports_3dnow_prefetch())) { 1663 log->print_cr(": no prefetching"); 1664 } else { 1665 log->print(" prefetching: "); 1666 if (UseSSE == 0 && supports_3dnow_prefetch()) { 1667 log->print("PREFETCHW"); 1668 } else if (UseSSE >= 1) { 1669 if (AllocatePrefetchInstr == 0) { 1670 log->print("PREFETCHNTA"); 1671 } else if (AllocatePrefetchInstr == 1) { 1672 log->print("PREFETCHT0"); 1673 } else if (AllocatePrefetchInstr == 2) { 1674 log->print("PREFETCHT2"); 1675 } else if (AllocatePrefetchInstr == 3) { 1676 log->print("PREFETCHW"); 1677 } 1678 } 1679 if (AllocatePrefetchLines > 1) { 1680 log->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize); 1681 } else { 1682 log->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize); 1683 } 1684 } 1685 1686 if (PrefetchCopyIntervalInBytes > 0) { 1687 log->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes); 1688 } 1689 if (PrefetchScanIntervalInBytes > 0) { 1690 log->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes); 1691 } 1692 if (PrefetchFieldsAhead > 0) { 1693 log->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead); 1694 } 1695 if (ContendedPaddingWidth > 0) { 1696 log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth); 1697 } 1698 } 1699 #endif // !PRODUCT 1700 } 1701 1702 void VM_Version::print_platform_virtualization_info(outputStream* st) { 1703 VirtualizationType vrt = VM_Version::get_detected_virtualization(); 1704 if (vrt == XenHVM) { 1705 st->print_cr("Xen hardware-assisted virtualization detected"); 1706 } else if (vrt == KVM) { 1707 st->print_cr("KVM virtualization detected"); 1708 } else if (vrt == VMWare) { 1709 st->print_cr("VMWare virtualization detected"); 1710 VirtualizationSupport::print_virtualization_info(st); 1711 } else if (vrt == HyperV) { 1712 st->print_cr("Hyper-V virtualization detected"); 1713 } else if (vrt == HyperVRole) { 1714 st->print_cr("Hyper-V role detected"); 1715 } 1716 } 1717 1718 bool VM_Version::use_biased_locking() { 1719 #if INCLUDE_RTM_OPT 1720 // RTM locking is most useful when there is high lock contention and 1721 // low data contention. With high lock contention the lock is usually 1722 // inflated and biased locking is not suitable for that case. 1723 // RTM locking code requires that biased locking is off. 1724 // Note: we can't switch off UseBiasedLocking in get_processor_features() 1725 // because it is used by Thread::allocate() which is called before 1726 // VM_Version::initialize(). 1727 if (UseRTMLocking && UseBiasedLocking) { 1728 if (FLAG_IS_DEFAULT(UseBiasedLocking)) { 1729 FLAG_SET_DEFAULT(UseBiasedLocking, false); 1730 } else { 1731 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); 1732 UseBiasedLocking = false; 1733 } 1734 } 1735 #endif 1736 return UseBiasedLocking; 1737 } 1738 1739 bool VM_Version::compute_has_intel_jcc_erratum() { 1740 if (!is_intel_family_core()) { 1741 // Only Intel CPUs are affected. 1742 return false; 1743 } 1744 // The following table of affected CPUs is based on the following document released by Intel: 1745 // https://www.intel.com/content/dam/support/us/en/documents/processors/mitigations-jump-conditional-code-erratum.pdf 1746 switch (_model) { 1747 case 0x8E: 1748 // 06_8EH | 9 | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Amber Lake Y 1749 // 06_8EH | 9 | 7th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake U 1750 // 06_8EH | 9 | 7th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake U 23e 1751 // 06_8EH | 9 | 7th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake Y 1752 // 06_8EH | A | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake U43e 1753 // 06_8EH | B | 8th Generation Intel® Core™ Processors based on microarchitecture code name Whiskey Lake U 1754 // 06_8EH | C | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Amber Lake Y 1755 // 06_8EH | C | 10th Generation Intel® Core™ Processor Family based on microarchitecture code name Comet Lake U42 1756 // 06_8EH | C | 8th Generation Intel® Core™ Processors based on microarchitecture code name Whiskey Lake U 1757 return _stepping == 0x9 || _stepping == 0xA || _stepping == 0xB || _stepping == 0xC; 1758 case 0x4E: 1759 // 06_4E | 3 | 6th Generation Intel® Core™ Processors based on microarchitecture code name Skylake U 1760 // 06_4E | 3 | 6th Generation Intel® Core™ Processor Family based on microarchitecture code name Skylake U23e 1761 // 06_4E | 3 | 6th Generation Intel® Core™ Processors based on microarchitecture code name Skylake Y 1762 return _stepping == 0x3; 1763 case 0x55: 1764 // 06_55H | 4 | Intel® Xeon® Processor D Family based on microarchitecture code name Skylake D, Bakerville 1765 // 06_55H | 4 | Intel® Xeon® Scalable Processors based on microarchitecture code name Skylake Server 1766 // 06_55H | 4 | Intel® Xeon® Processor W Family based on microarchitecture code name Skylake W 1767 // 06_55H | 4 | Intel® Core™ X-series Processors based on microarchitecture code name Skylake X 1768 // 06_55H | 4 | Intel® Xeon® Processor E3 v5 Family based on microarchitecture code name Skylake Xeon E3 1769 // 06_55 | 7 | 2nd Generation Intel® Xeon® Scalable Processors based on microarchitecture code name Cascade Lake (server) 1770 return _stepping == 0x4 || _stepping == 0x7; 1771 case 0x5E: 1772 // 06_5E | 3 | 6th Generation Intel® Core™ Processor Family based on microarchitecture code name Skylake H 1773 // 06_5E | 3 | 6th Generation Intel® Core™ Processor Family based on microarchitecture code name Skylake S 1774 return _stepping == 0x3; 1775 case 0x9E: 1776 // 06_9EH | 9 | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake G 1777 // 06_9EH | 9 | 7th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake H 1778 // 06_9EH | 9 | 7th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake S 1779 // 06_9EH | 9 | Intel® Core™ X-series Processors based on microarchitecture code name Kaby Lake X 1780 // 06_9EH | 9 | Intel® Xeon® Processor E3 v6 Family Kaby Lake Xeon E3 1781 // 06_9EH | A | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake H 1782 // 06_9EH | A | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake S 1783 // 06_9EH | A | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake S (6+2) x/KBP 1784 // 06_9EH | A | Intel® Xeon® Processor E Family based on microarchitecture code name Coffee Lake S (6+2) 1785 // 06_9EH | A | Intel® Xeon® Processor E Family based on microarchitecture code name Coffee Lake S (4+2) 1786 // 06_9EH | B | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake S (4+2) 1787 // 06_9EH | B | Intel® Celeron® Processor G Series based on microarchitecture code name Coffee Lake S (4+2) 1788 // 06_9EH | D | 9th Generation Intel® Core™ Processor Family based on microarchitecturecode name Coffee Lake H (8+2) 1789 // 06_9EH | D | 9th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake S (8+2) 1790 return _stepping == 0x9 || _stepping == 0xA || _stepping == 0xB || _stepping == 0xD; 1791 case 0xA6: 1792 // 06_A6H | 0 | 10th Generation Intel® Core™ Processor Family based on microarchitecture code name Comet Lake U62 1793 return _stepping == 0x0; 1794 case 0xAE: 1795 // 06_AEH | A | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake Refresh U (4+2) 1796 return _stepping == 0xA; 1797 default: 1798 // If we are running on another intel machine not recognized in the table, we are okay. 1799 return false; 1800 } 1801 } 1802 1803 // On Xen, the cpuid instruction returns 1804 // eax / registers[0]: Version of Xen 1805 // ebx / registers[1]: chars 'XenV' 1806 // ecx / registers[2]: chars 'MMXe' 1807 // edx / registers[3]: chars 'nVMM' 1808 // 1809 // On KVM / VMWare / MS Hyper-V, the cpuid instruction returns 1810 // ebx / registers[1]: chars 'KVMK' / 'VMwa' / 'Micr' 1811 // ecx / registers[2]: chars 'VMKV' / 'reVM' / 'osof' 1812 // edx / registers[3]: chars 'M' / 'ware' / 't Hv' 1813 // 1814 // more information : 1815 // https://kb.vmware.com/s/article/1009458 1816 // 1817 void VM_Version::check_virtualizations() { 1818 uint32_t registers[4] = {0}; 1819 char signature[13] = {0}; 1820 1821 // In case of Xen, it might be appear until 0x40010000. 1822 // https://lists.linuxfoundation.org/pipermail/virtualization/2012-May/019974.html 1823 for (int leaf = 0x40000000; leaf < 0x40010000; leaf += 0x100) { 1824 detect_virt_stub(leaf, registers); 1825 memcpy(signature, ®isters[1], 12); 1826 1827 if (strncmp("VMwareVMware", signature, 12) == 0) { 1828 Abstract_VM_Version::_detected_virtualization = VMWare; 1829 // check for extended metrics from guestlib 1830 VirtualizationSupport::initialize(); 1831 } else if (strncmp("Microsoft Hv", signature, 12) == 0) { 1832 Abstract_VM_Version::_detected_virtualization = HyperV; 1833 #ifdef _WINDOWS 1834 // CPUID leaf 0x40000007 is available to the root partition only. 1835 // See Hypervisor Top Level Functional Specification section 2.4.8 for more details. 1836 // https://github.com/MicrosoftDocs/Virtualization-Documentation/raw/master/tlfs/Hypervisor%20Top%20Level%20Functional%20Specification%20v6.0b.pdf 1837 detect_virt_stub(0x40000007, registers); 1838 if ((registers[0] != 0x0) || 1839 (registers[1] != 0x0) || 1840 (registers[2] != 0x0) || 1841 (registers[3] != 0x0)) { 1842 Abstract_VM_Version::_detected_virtualization = HyperVRole; 1843 } 1844 #endif 1845 } else if (strncmp("KVMKVMKVM", signature, 9) == 0) { 1846 Abstract_VM_Version::_detected_virtualization = KVM; 1847 } else if (strncmp("XenVMMXenVMM", signature, 12) == 0) { 1848 Abstract_VM_Version::_detected_virtualization = XenHVM; 1849 } 1850 } 1851 } 1852 1853 void VM_Version::initialize() { 1854 ResourceMark rm; 1855 // Making this stub must be FIRST use of assembler 1856 stub_blob = BufferBlob::create("VM_Version stub", stub_size); 1857 if (stub_blob == NULL) { 1858 vm_exit_during_initialization("Unable to allocate stub for VM_Version"); 1859 } 1860 CodeBuffer c(stub_blob); 1861 VM_Version_StubGenerator g(&c); 1862 1863 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, 1864 g.generate_get_cpu_info()); 1865 detect_virt_stub = CAST_TO_FN_PTR(detect_virt_stub_t, 1866 g.generate_detect_virt()); 1867 1868 get_processor_features(); 1869 1870 LP64_ONLY(Assembler::precompute_instructions();) 1871 1872 if (VM_Version::supports_hv()) { // Supports hypervisor 1873 check_virtualizations(); 1874 } 1875 }