1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/macroAssembler.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logStream.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "runtime/java.hpp" 33 #include "runtime/os.hpp" 34 #include "runtime/stubCodeGenerator.hpp" 35 #include "runtime/vm_version.hpp" 36 #include "utilities/virtualizationSupport.hpp" 37 38 #include OS_HEADER_INLINE(os) 39 40 int VM_Version::_cpu; 41 int VM_Version::_model; 42 int VM_Version::_stepping; 43 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; 44 45 // Address of instruction which causes SEGV 46 address VM_Version::_cpuinfo_segv_addr = 0; 47 // Address of instruction after the one which causes SEGV 48 address VM_Version::_cpuinfo_cont_addr = 0; 49 50 static BufferBlob* stub_blob; 51 static const int stub_size = 1100; 52 53 extern "C" { 54 typedef void (*get_cpu_info_stub_t)(void*); 55 } 56 static get_cpu_info_stub_t get_cpu_info_stub = NULL; 57 58 59 class VM_Version_StubGenerator: public StubCodeGenerator { 60 public: 61 62 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 63 64 address generate_get_cpu_info() { 65 // Flags to test CPU type. 66 const uint32_t HS_EFL_AC = 0x40000; 67 const uint32_t HS_EFL_ID = 0x200000; 68 // Values for when we don't have a CPUID instruction. 69 const int CPU_FAMILY_SHIFT = 8; 70 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); 71 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); 72 bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2); 73 74 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; 75 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, ext_cpuid8, done, wrapup; 76 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check; 77 78 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); 79 # define __ _masm-> 80 81 address start = __ pc(); 82 83 // 84 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); 85 // 86 // LP64: rcx and rdx are first and second argument registers on windows 87 88 __ push(rbp); 89 #ifdef _LP64 90 __ mov(rbp, c_rarg0); // cpuid_info address 91 #else 92 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address 93 #endif 94 __ push(rbx); 95 __ push(rsi); 96 __ pushf(); // preserve rbx, and flags 97 __ pop(rax); 98 __ push(rax); 99 __ mov(rcx, rax); 100 // 101 // if we are unable to change the AC flag, we have a 386 102 // 103 __ xorl(rax, HS_EFL_AC); 104 __ push(rax); 105 __ popf(); 106 __ pushf(); 107 __ pop(rax); 108 __ cmpptr(rax, rcx); 109 __ jccb(Assembler::notEqual, detect_486); 110 111 __ movl(rax, CPU_FAMILY_386); 112 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 113 __ jmp(done); 114 115 // 116 // If we are unable to change the ID flag, we have a 486 which does 117 // not support the "cpuid" instruction. 118 // 119 __ bind(detect_486); 120 __ mov(rax, rcx); 121 __ xorl(rax, HS_EFL_ID); 122 __ push(rax); 123 __ popf(); 124 __ pushf(); 125 __ pop(rax); 126 __ cmpptr(rcx, rax); 127 __ jccb(Assembler::notEqual, detect_586); 128 129 __ bind(cpu486); 130 __ movl(rax, CPU_FAMILY_486); 131 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 132 __ jmp(done); 133 134 // 135 // At this point, we have a chip which supports the "cpuid" instruction 136 // 137 __ bind(detect_586); 138 __ xorl(rax, rax); 139 __ cpuid(); 140 __ orl(rax, rax); 141 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input 142 // value of at least 1, we give up and 143 // assume a 486 144 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 145 __ movl(Address(rsi, 0), rax); 146 __ movl(Address(rsi, 4), rbx); 147 __ movl(Address(rsi, 8), rcx); 148 __ movl(Address(rsi,12), rdx); 149 150 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? 151 __ jccb(Assembler::belowEqual, std_cpuid4); 152 153 // 154 // cpuid(0xB) Processor Topology 155 // 156 __ movl(rax, 0xb); 157 __ xorl(rcx, rcx); // Threads level 158 __ cpuid(); 159 160 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); 161 __ movl(Address(rsi, 0), rax); 162 __ movl(Address(rsi, 4), rbx); 163 __ movl(Address(rsi, 8), rcx); 164 __ movl(Address(rsi,12), rdx); 165 166 __ movl(rax, 0xb); 167 __ movl(rcx, 1); // Cores level 168 __ cpuid(); 169 __ push(rax); 170 __ andl(rax, 0x1f); // Determine if valid topology level 171 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 172 __ andl(rax, 0xffff); 173 __ pop(rax); 174 __ jccb(Assembler::equal, std_cpuid4); 175 176 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); 177 __ movl(Address(rsi, 0), rax); 178 __ movl(Address(rsi, 4), rbx); 179 __ movl(Address(rsi, 8), rcx); 180 __ movl(Address(rsi,12), rdx); 181 182 __ movl(rax, 0xb); 183 __ movl(rcx, 2); // Packages level 184 __ cpuid(); 185 __ push(rax); 186 __ andl(rax, 0x1f); // Determine if valid topology level 187 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 188 __ andl(rax, 0xffff); 189 __ pop(rax); 190 __ jccb(Assembler::equal, std_cpuid4); 191 192 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); 193 __ movl(Address(rsi, 0), rax); 194 __ movl(Address(rsi, 4), rbx); 195 __ movl(Address(rsi, 8), rcx); 196 __ movl(Address(rsi,12), rdx); 197 198 // 199 // cpuid(0x4) Deterministic cache params 200 // 201 __ bind(std_cpuid4); 202 __ movl(rax, 4); 203 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? 204 __ jccb(Assembler::greater, std_cpuid1); 205 206 __ xorl(rcx, rcx); // L1 cache 207 __ cpuid(); 208 __ push(rax); 209 __ andl(rax, 0x1f); // Determine if valid cache parameters used 210 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache 211 __ pop(rax); 212 __ jccb(Assembler::equal, std_cpuid1); 213 214 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); 215 __ movl(Address(rsi, 0), rax); 216 __ movl(Address(rsi, 4), rbx); 217 __ movl(Address(rsi, 8), rcx); 218 __ movl(Address(rsi,12), rdx); 219 220 // 221 // Standard cpuid(0x1) 222 // 223 __ bind(std_cpuid1); 224 __ movl(rax, 1); 225 __ cpuid(); 226 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 227 __ movl(Address(rsi, 0), rax); 228 __ movl(Address(rsi, 4), rbx); 229 __ movl(Address(rsi, 8), rcx); 230 __ movl(Address(rsi,12), rdx); 231 232 // 233 // Check if OS has enabled XGETBV instruction to access XCR0 234 // (OSXSAVE feature flag) and CPU supports AVX 235 // 236 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 237 __ cmpl(rcx, 0x18000000); 238 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported 239 240 // 241 // XCR0, XFEATURE_ENABLED_MASK register 242 // 243 __ xorl(rcx, rcx); // zero for XCR0 register 244 __ xgetbv(); 245 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); 246 __ movl(Address(rsi, 0), rax); 247 __ movl(Address(rsi, 4), rdx); 248 249 // 250 // cpuid(0x7) Structured Extended Features 251 // 252 __ bind(sef_cpuid); 253 __ movl(rax, 7); 254 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported? 255 __ jccb(Assembler::greater, ext_cpuid); 256 257 __ xorl(rcx, rcx); 258 __ cpuid(); 259 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 260 __ movl(Address(rsi, 0), rax); 261 __ movl(Address(rsi, 4), rbx); 262 __ movl(Address(rsi, 8), rcx); 263 __ movl(Address(rsi, 12), rdx); 264 265 // 266 // Extended cpuid(0x80000000) 267 // 268 __ bind(ext_cpuid); 269 __ movl(rax, 0x80000000); 270 __ cpuid(); 271 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? 272 __ jcc(Assembler::belowEqual, done); 273 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? 274 __ jcc(Assembler::belowEqual, ext_cpuid1); 275 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? 276 __ jccb(Assembler::belowEqual, ext_cpuid5); 277 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? 278 __ jccb(Assembler::belowEqual, ext_cpuid7); 279 __ cmpl(rax, 0x80000008); // Is cpuid(0x80000009 and above) supported? 280 __ jccb(Assembler::belowEqual, ext_cpuid8); 281 __ cmpl(rax, 0x8000001E); // Is cpuid(0x8000001E) supported? 282 __ jccb(Assembler::below, ext_cpuid8); 283 // 284 // Extended cpuid(0x8000001E) 285 // 286 __ movl(rax, 0x8000001E); 287 __ cpuid(); 288 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1E_offset()))); 289 __ movl(Address(rsi, 0), rax); 290 __ movl(Address(rsi, 4), rbx); 291 __ movl(Address(rsi, 8), rcx); 292 __ movl(Address(rsi,12), rdx); 293 294 // 295 // Extended cpuid(0x80000008) 296 // 297 __ bind(ext_cpuid8); 298 __ movl(rax, 0x80000008); 299 __ cpuid(); 300 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); 301 __ movl(Address(rsi, 0), rax); 302 __ movl(Address(rsi, 4), rbx); 303 __ movl(Address(rsi, 8), rcx); 304 __ movl(Address(rsi,12), rdx); 305 306 // 307 // Extended cpuid(0x80000007) 308 // 309 __ bind(ext_cpuid7); 310 __ movl(rax, 0x80000007); 311 __ cpuid(); 312 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset()))); 313 __ movl(Address(rsi, 0), rax); 314 __ movl(Address(rsi, 4), rbx); 315 __ movl(Address(rsi, 8), rcx); 316 __ movl(Address(rsi,12), rdx); 317 318 // 319 // Extended cpuid(0x80000005) 320 // 321 __ bind(ext_cpuid5); 322 __ movl(rax, 0x80000005); 323 __ cpuid(); 324 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); 325 __ movl(Address(rsi, 0), rax); 326 __ movl(Address(rsi, 4), rbx); 327 __ movl(Address(rsi, 8), rcx); 328 __ movl(Address(rsi,12), rdx); 329 330 // 331 // Extended cpuid(0x80000001) 332 // 333 __ bind(ext_cpuid1); 334 __ movl(rax, 0x80000001); 335 __ cpuid(); 336 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); 337 __ movl(Address(rsi, 0), rax); 338 __ movl(Address(rsi, 4), rbx); 339 __ movl(Address(rsi, 8), rcx); 340 __ movl(Address(rsi,12), rdx); 341 342 // 343 // Check if OS has enabled XGETBV instruction to access XCR0 344 // (OSXSAVE feature flag) and CPU supports AVX 345 // 346 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 347 __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 348 __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx 349 __ cmpl(rcx, 0x18000000); 350 __ jccb(Assembler::notEqual, done); // jump if AVX is not supported 351 352 __ movl(rax, 0x6); 353 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 354 __ cmpl(rax, 0x6); 355 __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported 356 357 // we need to bridge farther than imm8, so we use this island as a thunk 358 __ bind(done); 359 __ jmp(wrapup); 360 361 __ bind(start_simd_check); 362 // 363 // Some OSs have a bug when upper 128/256bits of YMM/ZMM 364 // registers are not restored after a signal processing. 365 // Generate SEGV here (reference through NULL) 366 // and check upper YMM/ZMM bits after it. 367 // 368 intx saved_useavx = UseAVX; 369 intx saved_usesse = UseSSE; 370 371 // If UseAVX is unitialized or is set by the user to include EVEX 372 if (use_evex) { 373 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 374 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 375 __ movl(rax, 0x10000); 376 __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm 377 __ cmpl(rax, 0x10000); 378 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 379 // check _cpuid_info.xem_xcr0_eax.bits.opmask 380 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 381 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 382 __ movl(rax, 0xE0); 383 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 384 __ cmpl(rax, 0xE0); 385 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 386 387 if (FLAG_IS_DEFAULT(UseAVX)) { 388 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 389 __ movl(rax, Address(rsi, 0)); 390 __ cmpl(rax, 0x50654); // If it is Skylake 391 __ jcc(Assembler::equal, legacy_setup); 392 } 393 // EVEX setup: run in lowest evex mode 394 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 395 UseAVX = 3; 396 UseSSE = 2; 397 #ifdef _WINDOWS 398 // xmm5-xmm15 are not preserved by caller on windows 399 // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx 400 __ subptr(rsp, 64); 401 __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit); 402 #ifdef _LP64 403 __ subptr(rsp, 64); 404 __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit); 405 __ subptr(rsp, 64); 406 __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit); 407 #endif // _LP64 408 #endif // _WINDOWS 409 410 // load value into all 64 bytes of zmm7 register 411 __ movl(rcx, VM_Version::ymm_test_value()); 412 __ movdl(xmm0, rcx); 413 __ vpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit); 414 __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit); 415 #ifdef _LP64 416 __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit); 417 __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit); 418 #endif 419 VM_Version::clean_cpuFeatures(); 420 __ jmp(save_restore_except); 421 } 422 423 __ bind(legacy_setup); 424 // AVX setup 425 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 426 UseAVX = 1; 427 UseSSE = 2; 428 #ifdef _WINDOWS 429 __ subptr(rsp, 32); 430 __ vmovdqu(Address(rsp, 0), xmm7); 431 #ifdef _LP64 432 __ subptr(rsp, 32); 433 __ vmovdqu(Address(rsp, 0), xmm8); 434 __ subptr(rsp, 32); 435 __ vmovdqu(Address(rsp, 0), xmm15); 436 #endif // _LP64 437 #endif // _WINDOWS 438 439 // load value into all 32 bytes of ymm7 register 440 __ movl(rcx, VM_Version::ymm_test_value()); 441 442 __ movdl(xmm0, rcx); 443 __ pshufd(xmm0, xmm0, 0x00); 444 __ vinsertf128_high(xmm0, xmm0); 445 __ vmovdqu(xmm7, xmm0); 446 #ifdef _LP64 447 __ vmovdqu(xmm8, xmm0); 448 __ vmovdqu(xmm15, xmm0); 449 #endif 450 VM_Version::clean_cpuFeatures(); 451 452 __ bind(save_restore_except); 453 __ xorl(rsi, rsi); 454 VM_Version::set_cpuinfo_segv_addr(__ pc()); 455 // Generate SEGV 456 __ movl(rax, Address(rsi, 0)); 457 458 VM_Version::set_cpuinfo_cont_addr(__ pc()); 459 // Returns here after signal. Save xmm0 to check it later. 460 461 // If UseAVX is unitialized or is set by the user to include EVEX 462 if (use_evex) { 463 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 464 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 465 __ movl(rax, 0x10000); 466 __ andl(rax, Address(rsi, 4)); 467 __ cmpl(rax, 0x10000); 468 __ jcc(Assembler::notEqual, legacy_save_restore); 469 // check _cpuid_info.xem_xcr0_eax.bits.opmask 470 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 471 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 472 __ movl(rax, 0xE0); 473 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 474 __ cmpl(rax, 0xE0); 475 __ jcc(Assembler::notEqual, legacy_save_restore); 476 477 if (FLAG_IS_DEFAULT(UseAVX)) { 478 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 479 __ movl(rax, Address(rsi, 0)); 480 __ cmpl(rax, 0x50654); // If it is Skylake 481 __ jcc(Assembler::equal, legacy_save_restore); 482 } 483 // EVEX check: run in lowest evex mode 484 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 485 UseAVX = 3; 486 UseSSE = 2; 487 __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset()))); 488 __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit); 489 __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit); 490 #ifdef _LP64 491 __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit); 492 __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit); 493 #endif 494 495 #ifdef _WINDOWS 496 #ifdef _LP64 497 __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit); 498 __ addptr(rsp, 64); 499 __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit); 500 __ addptr(rsp, 64); 501 #endif // _LP64 502 __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit); 503 __ addptr(rsp, 64); 504 #endif // _WINDOWS 505 generate_vzeroupper(wrapup); 506 VM_Version::clean_cpuFeatures(); 507 UseAVX = saved_useavx; 508 UseSSE = saved_usesse; 509 __ jmp(wrapup); 510 } 511 512 __ bind(legacy_save_restore); 513 // AVX check 514 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 515 UseAVX = 1; 516 UseSSE = 2; 517 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); 518 __ vmovdqu(Address(rsi, 0), xmm0); 519 __ vmovdqu(Address(rsi, 32), xmm7); 520 #ifdef _LP64 521 __ vmovdqu(Address(rsi, 64), xmm8); 522 __ vmovdqu(Address(rsi, 96), xmm15); 523 #endif 524 525 #ifdef _WINDOWS 526 #ifdef _LP64 527 __ vmovdqu(xmm15, Address(rsp, 0)); 528 __ addptr(rsp, 32); 529 __ vmovdqu(xmm8, Address(rsp, 0)); 530 __ addptr(rsp, 32); 531 #endif // _LP64 532 __ vmovdqu(xmm7, Address(rsp, 0)); 533 __ addptr(rsp, 32); 534 #endif // _WINDOWS 535 generate_vzeroupper(wrapup); 536 VM_Version::clean_cpuFeatures(); 537 UseAVX = saved_useavx; 538 UseSSE = saved_usesse; 539 540 __ bind(wrapup); 541 __ popf(); 542 __ pop(rsi); 543 __ pop(rbx); 544 __ pop(rbp); 545 __ ret(0); 546 547 # undef __ 548 549 return start; 550 }; 551 void generate_vzeroupper(Label& L_wrapup) { 552 # define __ _masm-> 553 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 554 __ cmpl(Address(rsi, 4), 0x756e6547); // 'uneG' 555 __ jcc(Assembler::notEqual, L_wrapup); 556 __ movl(rcx, 0x0FFF0FF0); 557 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 558 __ andl(rcx, Address(rsi, 0)); 559 __ cmpl(rcx, 0x00050670); // If it is Xeon Phi 3200/5200/7200 560 __ jcc(Assembler::equal, L_wrapup); 561 __ cmpl(rcx, 0x00080650); // If it is Future Xeon Phi 562 __ jcc(Assembler::equal, L_wrapup); 563 __ vzeroupper(); 564 # undef __ 565 } 566 }; 567 568 void VM_Version::get_processor_features() { 569 570 _cpu = 4; // 486 by default 571 _model = 0; 572 _stepping = 0; 573 _features = 0; 574 _logical_processors_per_package = 1; 575 // i486 internal cache is both I&D and has a 16-byte line size 576 _L1_data_cache_line_size = 16; 577 578 // Get raw processor info 579 580 get_cpu_info_stub(&_cpuid_info); 581 582 assert_is_initialized(); 583 _cpu = extended_cpu_family(); 584 _model = extended_cpu_model(); 585 _stepping = cpu_stepping(); 586 587 if (cpu_family() > 4) { // it supports CPUID 588 _features = feature_flags(); 589 // Logical processors are only available on P4s and above, 590 // and only if hyperthreading is available. 591 _logical_processors_per_package = logical_processor_count(); 592 _L1_data_cache_line_size = L1_line_size(); 593 } 594 595 _supports_cx8 = supports_cmpxchg8(); 596 // xchg and xadd instructions 597 _supports_atomic_getset4 = true; 598 _supports_atomic_getadd4 = true; 599 LP64_ONLY(_supports_atomic_getset8 = true); 600 LP64_ONLY(_supports_atomic_getadd8 = true); 601 602 #ifdef _LP64 603 // OS should support SSE for x64 and hardware should support at least SSE2. 604 if (!VM_Version::supports_sse2()) { 605 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); 606 } 607 // in 64 bit the use of SSE2 is the minimum 608 if (UseSSE < 2) UseSSE = 2; 609 #endif 610 611 #ifdef AMD64 612 // flush_icache_stub have to be generated first. 613 // That is why Icache line size is hard coded in ICache class, 614 // see icache_x86.hpp. It is also the reason why we can't use 615 // clflush instruction in 32-bit VM since it could be running 616 // on CPU which does not support it. 617 // 618 // The only thing we can do is to verify that flushed 619 // ICache::line_size has correct value. 620 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); 621 // clflush_size is size in quadwords (8 bytes). 622 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); 623 #endif 624 625 #ifdef _LP64 626 // assigning this field effectively enables Unsafe.writebackMemory() 627 // by initing UnsafeConstant.DATA_CACHE_LINE_FLUSH_SIZE to non-zero 628 // that is only implemented on x86_64 and only if the OS plays ball 629 if (os::supports_map_sync()) { 630 // publish data cache line flush size to generic field, otherwise 631 // let if default to zero thereby disabling writeback 632 _data_cache_line_flush_size = _cpuid_info.std_cpuid1_ebx.bits.clflush_size * 8; 633 } 634 #endif 635 // If the OS doesn't support SSE, we can't use this feature even if the HW does 636 if (!os::supports_sse()) 637 _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); 638 639 if (UseSSE < 4) { 640 _features &= ~CPU_SSE4_1; 641 _features &= ~CPU_SSE4_2; 642 } 643 644 if (UseSSE < 3) { 645 _features &= ~CPU_SSE3; 646 _features &= ~CPU_SSSE3; 647 _features &= ~CPU_SSE4A; 648 } 649 650 if (UseSSE < 2) 651 _features &= ~CPU_SSE2; 652 653 if (UseSSE < 1) 654 _features &= ~CPU_SSE; 655 656 //since AVX instructions is slower than SSE in some ZX cpus, force USEAVX=0. 657 if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7))) { 658 UseAVX = 0; 659 } 660 661 // first try initial setting and detect what we can support 662 int use_avx_limit = 0; 663 if (UseAVX > 0) { 664 if (UseAVX > 2 && supports_evex()) { 665 use_avx_limit = 3; 666 } else if (UseAVX > 1 && supports_avx2()) { 667 use_avx_limit = 2; 668 } else if (UseAVX > 0 && supports_avx()) { 669 use_avx_limit = 1; 670 } else { 671 use_avx_limit = 0; 672 } 673 } 674 if (FLAG_IS_DEFAULT(UseAVX)) { 675 FLAG_SET_DEFAULT(UseAVX, use_avx_limit); 676 if (is_intel_family_core() && _model == CPU_MODEL_SKYLAKE && _stepping < 5) { 677 FLAG_SET_DEFAULT(UseAVX, 2); //Set UseAVX=2 for Skylake 678 } 679 } else if (UseAVX > use_avx_limit) { 680 warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit); 681 FLAG_SET_DEFAULT(UseAVX, use_avx_limit); 682 } else if (UseAVX < 0) { 683 warning("UseAVX=%d is not valid, setting it to UseAVX=0", (int) UseAVX); 684 FLAG_SET_DEFAULT(UseAVX, 0); 685 } 686 687 if (UseAVX < 3) { 688 _features &= ~CPU_AVX512F; 689 _features &= ~CPU_AVX512DQ; 690 _features &= ~CPU_AVX512CD; 691 _features &= ~CPU_AVX512BW; 692 _features &= ~CPU_AVX512VL; 693 _features &= ~CPU_AVX512_VPOPCNTDQ; 694 _features &= ~CPU_AVX512_VPCLMULQDQ; 695 _features &= ~CPU_VAES; 696 } 697 698 if (UseAVX < 2) 699 _features &= ~CPU_AVX2; 700 701 if (UseAVX < 1) { 702 _features &= ~CPU_AVX; 703 _features &= ~CPU_VZEROUPPER; 704 } 705 706 if (logical_processors_per_package() == 1) { 707 // HT processor could be installed on a system which doesn't support HT. 708 _features &= ~CPU_HT; 709 } 710 711 if (is_intel()) { // Intel cpus specific settings 712 if (is_knights_family()) { 713 _features &= ~CPU_VZEROUPPER; 714 } 715 } 716 717 char buf[256]; 718 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 719 cores_per_cpu(), threads_per_core(), 720 cpu_family(), _model, _stepping, 721 (supports_cmov() ? ", cmov" : ""), 722 (supports_cmpxchg8() ? ", cx8" : ""), 723 (supports_fxsr() ? ", fxsr" : ""), 724 (supports_mmx() ? ", mmx" : ""), 725 (supports_sse() ? ", sse" : ""), 726 (supports_sse2() ? ", sse2" : ""), 727 (supports_sse3() ? ", sse3" : ""), 728 (supports_ssse3()? ", ssse3": ""), 729 (supports_sse4_1() ? ", sse4.1" : ""), 730 (supports_sse4_2() ? ", sse4.2" : ""), 731 (supports_popcnt() ? ", popcnt" : ""), 732 (supports_avx() ? ", avx" : ""), 733 (supports_avx2() ? ", avx2" : ""), 734 (supports_aes() ? ", aes" : ""), 735 (supports_clmul() ? ", clmul" : ""), 736 (supports_erms() ? ", erms" : ""), 737 (supports_rtm() ? ", rtm" : ""), 738 (supports_mmx_ext() ? ", mmxext" : ""), 739 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), 740 (supports_lzcnt() ? ", lzcnt": ""), 741 (supports_sse4a() ? ", sse4a": ""), 742 (supports_ht() ? ", ht": ""), 743 (supports_tsc() ? ", tsc": ""), 744 (supports_tscinv_bit() ? ", tscinvbit": ""), 745 (supports_tscinv() ? ", tscinv": ""), 746 (supports_bmi1() ? ", bmi1" : ""), 747 (supports_bmi2() ? ", bmi2" : ""), 748 (supports_adx() ? ", adx" : ""), 749 (supports_evex() ? ", evex" : ""), 750 (supports_sha() ? ", sha" : ""), 751 (supports_fma() ? ", fma" : ""), 752 (supports_vbmi2() ? ", vbmi2" : ""), 753 (supports_vaes() ? ", vaes" : ""), 754 (supports_vnni() ? ", vnni" : "")); 755 _features_string = os::strdup(buf); 756 757 // UseSSE is set to the smaller of what hardware supports and what 758 // the command line requires. I.e., you cannot set UseSSE to 2 on 759 // older Pentiums which do not support it. 760 int use_sse_limit = 0; 761 if (UseSSE > 0) { 762 if (UseSSE > 3 && supports_sse4_1()) { 763 use_sse_limit = 4; 764 } else if (UseSSE > 2 && supports_sse3()) { 765 use_sse_limit = 3; 766 } else if (UseSSE > 1 && supports_sse2()) { 767 use_sse_limit = 2; 768 } else if (UseSSE > 0 && supports_sse()) { 769 use_sse_limit = 1; 770 } else { 771 use_sse_limit = 0; 772 } 773 } 774 if (FLAG_IS_DEFAULT(UseSSE)) { 775 FLAG_SET_DEFAULT(UseSSE, use_sse_limit); 776 } else if (UseSSE > use_sse_limit) { 777 warning("UseSSE=%d is not supported on this CPU, setting it to UseSSE=%d", (int) UseSSE, use_sse_limit); 778 FLAG_SET_DEFAULT(UseSSE, use_sse_limit); 779 } else if (UseSSE < 0) { 780 warning("UseSSE=%d is not valid, setting it to UseSSE=0", (int) UseSSE); 781 FLAG_SET_DEFAULT(UseSSE, 0); 782 } 783 784 // Use AES instructions if available. 785 if (supports_aes()) { 786 if (FLAG_IS_DEFAULT(UseAES)) { 787 FLAG_SET_DEFAULT(UseAES, true); 788 } 789 if (!UseAES) { 790 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 791 warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); 792 } 793 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 794 } else { 795 if (UseSSE > 2) { 796 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 797 FLAG_SET_DEFAULT(UseAESIntrinsics, true); 798 } 799 } else { 800 // The AES intrinsic stubs require AES instruction support (of course) 801 // but also require sse3 mode or higher for instructions it use. 802 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 803 warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled."); 804 } 805 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 806 } 807 808 // --AES-CTR begins-- 809 if (!UseAESIntrinsics) { 810 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 811 warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled."); 812 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 813 } 814 } else { 815 if (supports_sse4_1()) { 816 if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 817 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true); 818 } 819 } else { 820 // The AES-CTR intrinsic stubs require AES instruction support (of course) 821 // but also require sse4.1 mode or higher for instructions it use. 822 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 823 warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled."); 824 } 825 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 826 } 827 } 828 // --AES-CTR ends-- 829 } 830 } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) { 831 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 832 warning("AES instructions are not available on this CPU"); 833 FLAG_SET_DEFAULT(UseAES, false); 834 } 835 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 836 warning("AES intrinsics are not available on this CPU"); 837 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 838 } 839 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 840 warning("AES-CTR intrinsics are not available on this CPU"); 841 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 842 } 843 } 844 845 // Use CLMUL instructions if available. 846 if (supports_clmul()) { 847 if (FLAG_IS_DEFAULT(UseCLMUL)) { 848 UseCLMUL = true; 849 } 850 } else if (UseCLMUL) { 851 if (!FLAG_IS_DEFAULT(UseCLMUL)) 852 warning("CLMUL instructions not available on this CPU (AVX may also be required)"); 853 FLAG_SET_DEFAULT(UseCLMUL, false); 854 } 855 856 if (UseCLMUL && (UseSSE > 2)) { 857 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 858 UseCRC32Intrinsics = true; 859 } 860 } else if (UseCRC32Intrinsics) { 861 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 862 warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)"); 863 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 864 } 865 866 if (supports_sse4_2() && supports_clmul()) { 867 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 868 UseCRC32CIntrinsics = true; 869 } 870 } else if (UseCRC32CIntrinsics) { 871 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 872 warning("CRC32C intrinsics are not available on this CPU"); 873 } 874 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 875 } 876 877 // GHASH/GCM intrinsics 878 if (UseCLMUL && (UseSSE > 2)) { 879 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 880 UseGHASHIntrinsics = true; 881 } 882 } else if (UseGHASHIntrinsics) { 883 if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) 884 warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU"); 885 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 886 } 887 888 // Base64 Intrinsics (Check the condition for which the intrinsic will be active) 889 if ((UseAVX > 2) && supports_avx512vl() && supports_avx512bw()) { 890 if (FLAG_IS_DEFAULT(UseBASE64Intrinsics)) { 891 UseBASE64Intrinsics = true; 892 } 893 } else if (UseBASE64Intrinsics) { 894 if (!FLAG_IS_DEFAULT(UseBASE64Intrinsics)) 895 warning("Base64 intrinsic requires EVEX instructions on this CPU"); 896 FLAG_SET_DEFAULT(UseBASE64Intrinsics, false); 897 } 898 899 if (supports_fma() && UseSSE >= 2) { // Check UseSSE since FMA code uses SSE instructions 900 if (FLAG_IS_DEFAULT(UseFMA)) { 901 UseFMA = true; 902 } 903 } else if (UseFMA) { 904 warning("FMA instructions are not available on this CPU"); 905 FLAG_SET_DEFAULT(UseFMA, false); 906 } 907 908 if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) { 909 if (FLAG_IS_DEFAULT(UseSHA)) { 910 UseSHA = true; 911 } 912 } else if (UseSHA) { 913 warning("SHA instructions are not available on this CPU"); 914 FLAG_SET_DEFAULT(UseSHA, false); 915 } 916 917 if (supports_sha() && supports_sse4_1() && UseSHA) { 918 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { 919 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); 920 } 921 } else if (UseSHA1Intrinsics) { 922 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 923 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 924 } 925 926 if (supports_sse4_1() && UseSHA) { 927 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { 928 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); 929 } 930 } else if (UseSHA256Intrinsics) { 931 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 932 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 933 } 934 935 #ifdef _LP64 936 // These are only supported on 64-bit 937 if (UseSHA && supports_avx2() && supports_bmi2()) { 938 if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) { 939 FLAG_SET_DEFAULT(UseSHA512Intrinsics, true); 940 } 941 } else 942 #endif 943 if (UseSHA512Intrinsics) { 944 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 945 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 946 } 947 948 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { 949 FLAG_SET_DEFAULT(UseSHA, false); 950 } 951 952 if (UseAdler32Intrinsics) { 953 warning("Adler32Intrinsics not available on this CPU."); 954 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 955 } 956 957 if (!supports_rtm() && UseRTMLocking) { 958 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 959 // setting during arguments processing. See use_biased_locking(). 960 // VM_Version_init() is executed after UseBiasedLocking is used 961 // in Thread::allocate(). 962 vm_exit_during_initialization("RTM instructions are not available on this CPU"); 963 } 964 965 #if INCLUDE_RTM_OPT 966 if (UseRTMLocking) { 967 if (is_client_compilation_mode_vm()) { 968 // Only C2 does RTM locking optimization. 969 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 970 // setting during arguments processing. See use_biased_locking(). 971 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 972 } 973 if (is_intel_family_core()) { 974 if ((_model == CPU_MODEL_HASWELL_E3) || 975 (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) || 976 (_model == CPU_MODEL_BROADWELL && _stepping < 4)) { 977 // currently a collision between SKL and HSW_E3 978 if (!UnlockExperimentalVMOptions && UseAVX < 3) { 979 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this " 980 "platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); 981 } else { 982 warning("UseRTMLocking is only available as experimental option on this platform."); 983 } 984 } 985 } 986 if (!FLAG_IS_CMDLINE(UseRTMLocking)) { 987 // RTM locking should be used only for applications with 988 // high lock contention. For now we do not use it by default. 989 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); 990 } 991 } else { // !UseRTMLocking 992 if (UseRTMForStackLocks) { 993 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { 994 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); 995 } 996 FLAG_SET_DEFAULT(UseRTMForStackLocks, false); 997 } 998 if (UseRTMDeopt) { 999 FLAG_SET_DEFAULT(UseRTMDeopt, false); 1000 } 1001 if (PrintPreciseRTMLockingStatistics) { 1002 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); 1003 } 1004 } 1005 #else 1006 if (UseRTMLocking) { 1007 // Only C2 does RTM locking optimization. 1008 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 1009 // setting during arguments processing. See use_biased_locking(). 1010 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 1011 } 1012 #endif 1013 1014 #ifdef COMPILER2 1015 if (UseFPUForSpilling) { 1016 if (UseSSE < 2) { 1017 // Only supported with SSE2+ 1018 FLAG_SET_DEFAULT(UseFPUForSpilling, false); 1019 } 1020 } 1021 #endif 1022 1023 #if COMPILER2_OR_JVMCI 1024 int max_vector_size = 0; 1025 if (UseSSE < 2) { 1026 // Vectors (in XMM) are only supported with SSE2+ 1027 // SSE is always 2 on x64. 1028 max_vector_size = 0; 1029 } else if (UseAVX == 0 || !os_supports_avx_vectors()) { 1030 // 16 byte vectors (in XMM) are supported with SSE2+ 1031 max_vector_size = 16; 1032 } else if (UseAVX == 1 || UseAVX == 2) { 1033 // 32 bytes vectors (in YMM) are only supported with AVX+ 1034 max_vector_size = 32; 1035 } else if (UseAVX > 2) { 1036 // 64 bytes vectors (in ZMM) are only supported with AVX 3 1037 max_vector_size = 64; 1038 } 1039 1040 #ifdef _LP64 1041 int min_vector_size = 4; // We require MaxVectorSize to be at least 4 on 64bit 1042 #else 1043 int min_vector_size = 0; 1044 #endif 1045 1046 if (!FLAG_IS_DEFAULT(MaxVectorSize)) { 1047 if (MaxVectorSize < min_vector_size) { 1048 warning("MaxVectorSize must be at least %i on this platform", min_vector_size); 1049 FLAG_SET_DEFAULT(MaxVectorSize, min_vector_size); 1050 } 1051 if (MaxVectorSize > max_vector_size) { 1052 warning("MaxVectorSize must be at most %i on this platform", max_vector_size); 1053 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size); 1054 } 1055 if (!is_power_of_2(MaxVectorSize)) { 1056 warning("MaxVectorSize must be a power of 2, setting to default: %i", max_vector_size); 1057 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size); 1058 } 1059 } else { 1060 // If default, use highest supported configuration 1061 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size); 1062 } 1063 1064 #if defined(COMPILER2) && defined(ASSERT) 1065 if (MaxVectorSize > 0) { 1066 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { 1067 tty->print_cr("State of YMM registers after signal handle:"); 1068 int nreg = 2 LP64_ONLY(+2); 1069 const char* ymm_name[4] = {"0", "7", "8", "15"}; 1070 for (int i = 0; i < nreg; i++) { 1071 tty->print("YMM%s:", ymm_name[i]); 1072 for (int j = 7; j >=0; j--) { 1073 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); 1074 } 1075 tty->cr(); 1076 } 1077 } 1078 } 1079 #endif // COMPILER2 && ASSERT 1080 1081 if (!FLAG_IS_DEFAULT(AVX3Threshold)) { 1082 if (!is_power_of_2(AVX3Threshold)) { 1083 warning("AVX3Threshold must be a power of 2"); 1084 FLAG_SET_DEFAULT(AVX3Threshold, 4096); 1085 } 1086 } 1087 1088 #ifdef _LP64 1089 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 1090 UseMultiplyToLenIntrinsic = true; 1091 } 1092 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 1093 UseSquareToLenIntrinsic = true; 1094 } 1095 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 1096 UseMulAddIntrinsic = true; 1097 } 1098 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 1099 UseMontgomeryMultiplyIntrinsic = true; 1100 } 1101 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 1102 UseMontgomerySquareIntrinsic = true; 1103 } 1104 #else 1105 if (UseMultiplyToLenIntrinsic) { 1106 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 1107 warning("multiplyToLen intrinsic is not available in 32-bit VM"); 1108 } 1109 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); 1110 } 1111 if (UseMontgomeryMultiplyIntrinsic) { 1112 if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 1113 warning("montgomeryMultiply intrinsic is not available in 32-bit VM"); 1114 } 1115 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false); 1116 } 1117 if (UseMontgomerySquareIntrinsic) { 1118 if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 1119 warning("montgomerySquare intrinsic is not available in 32-bit VM"); 1120 } 1121 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false); 1122 } 1123 if (UseSquareToLenIntrinsic) { 1124 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 1125 warning("squareToLen intrinsic is not available in 32-bit VM"); 1126 } 1127 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false); 1128 } 1129 if (UseMulAddIntrinsic) { 1130 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 1131 warning("mulAdd intrinsic is not available in 32-bit VM"); 1132 } 1133 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false); 1134 } 1135 #endif // _LP64 1136 #endif // COMPILER2_OR_JVMCI 1137 1138 // On new cpus instructions which update whole XMM register should be used 1139 // to prevent partial register stall due to dependencies on high half. 1140 // 1141 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) 1142 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) 1143 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). 1144 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). 1145 1146 1147 if (is_zx()) { // ZX cpus specific settings 1148 if (FLAG_IS_DEFAULT(UseStoreImmI16)) { 1149 UseStoreImmI16 = false; // don't use it on ZX cpus 1150 } 1151 if ((cpu_family() == 6) || (cpu_family() == 7)) { 1152 if (FLAG_IS_DEFAULT(UseAddressNop)) { 1153 // Use it on all ZX cpus 1154 UseAddressNop = true; 1155 } 1156 } 1157 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) { 1158 UseXmmLoadAndClearUpper = true; // use movsd on all ZX cpus 1159 } 1160 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) { 1161 if (supports_sse3()) { 1162 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new ZX cpus 1163 } else { 1164 UseXmmRegToRegMoveAll = false; 1165 } 1166 } 1167 if (((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse3()) { // new ZX cpus 1168 #ifdef COMPILER2 1169 if (FLAG_IS_DEFAULT(MaxLoopPad)) { 1170 // For new ZX cpus do the next optimization: 1171 // don't align the beginning of a loop if there are enough instructions 1172 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 1173 // in current fetch line (OptoLoopAlignment) or the padding 1174 // is big (> MaxLoopPad). 1175 // Set MaxLoopPad to 11 for new ZX cpus to reduce number of 1176 // generated NOP instructions. 11 is the largest size of one 1177 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 1178 MaxLoopPad = 11; 1179 } 1180 #endif // COMPILER2 1181 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1182 UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus 1183 } 1184 if (supports_sse4_2()) { // new ZX cpus 1185 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1186 UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus 1187 } 1188 } 1189 if (supports_sse4_2()) { 1190 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1191 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1192 } 1193 } else { 1194 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1195 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1196 } 1197 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1198 } 1199 } 1200 1201 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 1202 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1203 } 1204 } 1205 1206 if (is_amd_family()) { // AMD cpus specific settings 1207 if (supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop)) { 1208 // Use it on new AMD cpus starting from Opteron. 1209 UseAddressNop = true; 1210 } 1211 if (supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift)) { 1212 // Use it on new AMD cpus starting from Opteron. 1213 UseNewLongLShift = true; 1214 } 1215 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) { 1216 if (supports_sse4a()) { 1217 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron 1218 } else { 1219 UseXmmLoadAndClearUpper = false; 1220 } 1221 } 1222 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) { 1223 if (supports_sse4a()) { 1224 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' 1225 } else { 1226 UseXmmRegToRegMoveAll = false; 1227 } 1228 } 1229 if (FLAG_IS_DEFAULT(UseXmmI2F)) { 1230 if (supports_sse4a()) { 1231 UseXmmI2F = true; 1232 } else { 1233 UseXmmI2F = false; 1234 } 1235 } 1236 if (FLAG_IS_DEFAULT(UseXmmI2D)) { 1237 if (supports_sse4a()) { 1238 UseXmmI2D = true; 1239 } else { 1240 UseXmmI2D = false; 1241 } 1242 } 1243 if (supports_sse4_2()) { 1244 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1245 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1246 } 1247 } else { 1248 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1249 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1250 } 1251 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1252 } 1253 1254 // some defaults for AMD family 15h 1255 if (cpu_family() == 0x15) { 1256 // On family 15h processors default is no sw prefetch 1257 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 1258 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0); 1259 } 1260 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW 1261 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 1262 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1263 } 1264 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy 1265 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1266 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true); 1267 } 1268 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1269 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true); 1270 } 1271 } 1272 1273 #ifdef COMPILER2 1274 if (cpu_family() < 0x17 && MaxVectorSize > 16) { 1275 // Limit vectors size to 16 bytes on AMD cpus < 17h. 1276 FLAG_SET_DEFAULT(MaxVectorSize, 16); 1277 } 1278 #endif // COMPILER2 1279 1280 // Some defaults for AMD family 17h || Hygon family 18h 1281 if (cpu_family() == 0x17 || cpu_family() == 0x18) { 1282 // On family 17h processors use XMM and UnalignedLoadStores for Array Copy 1283 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1284 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true); 1285 } 1286 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1287 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true); 1288 } 1289 #ifdef COMPILER2 1290 if (supports_sse4_2() && FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1291 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1292 } 1293 #endif 1294 } 1295 } 1296 1297 if (is_intel()) { // Intel cpus specific settings 1298 if (FLAG_IS_DEFAULT(UseStoreImmI16)) { 1299 UseStoreImmI16 = false; // don't use it on Intel cpus 1300 } 1301 if (cpu_family() == 6 || cpu_family() == 15) { 1302 if (FLAG_IS_DEFAULT(UseAddressNop)) { 1303 // Use it on all Intel cpus starting from PentiumPro 1304 UseAddressNop = true; 1305 } 1306 } 1307 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) { 1308 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus 1309 } 1310 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) { 1311 if (supports_sse3()) { 1312 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus 1313 } else { 1314 UseXmmRegToRegMoveAll = false; 1315 } 1316 } 1317 if (cpu_family() == 6 && supports_sse3()) { // New Intel cpus 1318 #ifdef COMPILER2 1319 if (FLAG_IS_DEFAULT(MaxLoopPad)) { 1320 // For new Intel cpus do the next optimization: 1321 // don't align the beginning of a loop if there are enough instructions 1322 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 1323 // in current fetch line (OptoLoopAlignment) or the padding 1324 // is big (> MaxLoopPad). 1325 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of 1326 // generated NOP instructions. 11 is the largest size of one 1327 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 1328 MaxLoopPad = 11; 1329 } 1330 #endif // COMPILER2 1331 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1332 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus 1333 } 1334 if ((supports_sse4_2() && supports_ht()) || supports_avx()) { // Newest Intel cpus 1335 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1336 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1337 } 1338 } 1339 if (supports_sse4_2()) { 1340 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1341 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1342 } 1343 } else { 1344 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1345 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1346 } 1347 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1348 } 1349 } 1350 if (is_atom_family() || is_knights_family()) { 1351 #ifdef COMPILER2 1352 if (FLAG_IS_DEFAULT(OptoScheduling)) { 1353 OptoScheduling = true; 1354 } 1355 #endif 1356 if (supports_sse4_2()) { // Silvermont 1357 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1358 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1359 } 1360 } 1361 if (FLAG_IS_DEFAULT(UseIncDec)) { 1362 FLAG_SET_DEFAULT(UseIncDec, false); 1363 } 1364 } 1365 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 1366 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1367 } 1368 } 1369 1370 #ifdef _LP64 1371 if (UseSSE42Intrinsics) { 1372 if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1373 UseVectorizedMismatchIntrinsic = true; 1374 } 1375 } else if (UseVectorizedMismatchIntrinsic) { 1376 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) 1377 warning("vectorizedMismatch intrinsics are not available on this CPU"); 1378 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1379 } 1380 #else 1381 if (UseVectorizedMismatchIntrinsic) { 1382 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1383 warning("vectorizedMismatch intrinsic is not available in 32-bit VM"); 1384 } 1385 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1386 } 1387 #endif // _LP64 1388 1389 // Use count leading zeros count instruction if available. 1390 if (supports_lzcnt()) { 1391 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { 1392 UseCountLeadingZerosInstruction = true; 1393 } 1394 } else if (UseCountLeadingZerosInstruction) { 1395 warning("lzcnt instruction is not available on this CPU"); 1396 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); 1397 } 1398 1399 // Use count trailing zeros instruction if available 1400 if (supports_bmi1()) { 1401 // tzcnt does not require VEX prefix 1402 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { 1403 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1404 // Don't use tzcnt if BMI1 is switched off on command line. 1405 UseCountTrailingZerosInstruction = false; 1406 } else { 1407 UseCountTrailingZerosInstruction = true; 1408 } 1409 } 1410 } else if (UseCountTrailingZerosInstruction) { 1411 warning("tzcnt instruction is not available on this CPU"); 1412 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); 1413 } 1414 1415 // BMI instructions (except tzcnt) use an encoding with VEX prefix. 1416 // VEX prefix is generated only when AVX > 0. 1417 if (supports_bmi1() && supports_avx()) { 1418 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1419 UseBMI1Instructions = true; 1420 } 1421 } else if (UseBMI1Instructions) { 1422 warning("BMI1 instructions are not available on this CPU (AVX is also required)"); 1423 FLAG_SET_DEFAULT(UseBMI1Instructions, false); 1424 } 1425 1426 if (supports_bmi2() && supports_avx()) { 1427 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) { 1428 UseBMI2Instructions = true; 1429 } 1430 } else if (UseBMI2Instructions) { 1431 warning("BMI2 instructions are not available on this CPU (AVX is also required)"); 1432 FLAG_SET_DEFAULT(UseBMI2Instructions, false); 1433 } 1434 1435 // To enable VBMI2 we require that processor supports it and also that EVEX encoding is supported and enabled. 1436 if (supports_vbmi2() && UseAVX > 2) { 1437 if (FLAG_IS_DEFAULT(UseVBMI2)) { 1438 UseVBMI2 = true; 1439 } 1440 } else if (UseVBMI2) { 1441 warning("VBMI2 instructions are not available on this CPU"); 1442 FLAG_SET_DEFAULT(UseVBMI2, false); 1443 } 1444 1445 // Use population count instruction if available. 1446 if (supports_popcnt()) { 1447 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 1448 UsePopCountInstruction = true; 1449 } 1450 } else if (UsePopCountInstruction) { 1451 warning("POPCNT instruction is not available on this CPU"); 1452 FLAG_SET_DEFAULT(UsePopCountInstruction, false); 1453 } 1454 1455 // Use fast-string operations if available. 1456 if (supports_erms()) { 1457 if (FLAG_IS_DEFAULT(UseFastStosb)) { 1458 UseFastStosb = true; 1459 } 1460 } else if (UseFastStosb) { 1461 warning("fast-string operations are not available on this CPU"); 1462 FLAG_SET_DEFAULT(UseFastStosb, false); 1463 } 1464 1465 // Use XMM/YMM MOVDQU instruction for Object Initialization 1466 if (!UseFastStosb && UseSSE >= 2 && UseUnalignedLoadStores) { 1467 if (FLAG_IS_DEFAULT(UseXMMForObjInit)) { 1468 UseXMMForObjInit = true; 1469 } 1470 } else if (UseXMMForObjInit) { 1471 warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off."); 1472 FLAG_SET_DEFAULT(UseXMMForObjInit, false); 1473 } 1474 1475 #ifdef COMPILER2 1476 if (FLAG_IS_DEFAULT(AlignVector)) { 1477 // Modern processors allow misaligned memory operations for vectors. 1478 AlignVector = !UseUnalignedLoadStores; 1479 } 1480 #endif // COMPILER2 1481 1482 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 1483 if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) { 1484 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0); 1485 } else if (!supports_sse() && supports_3dnow_prefetch()) { 1486 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1487 } 1488 } 1489 1490 // Allocation prefetch settings 1491 intx cache_line_size = prefetch_data_size(); 1492 if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize) && 1493 (cache_line_size > AllocatePrefetchStepSize)) { 1494 FLAG_SET_DEFAULT(AllocatePrefetchStepSize, cache_line_size); 1495 } 1496 1497 if ((AllocatePrefetchDistance == 0) && (AllocatePrefetchStyle != 0)) { 1498 assert(!FLAG_IS_DEFAULT(AllocatePrefetchDistance), "default value should not be 0"); 1499 if (!FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 1500 warning("AllocatePrefetchDistance is set to 0 which disable prefetching. Ignoring AllocatePrefetchStyle flag."); 1501 } 1502 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0); 1503 } 1504 1505 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { 1506 bool use_watermark_prefetch = (AllocatePrefetchStyle == 2); 1507 FLAG_SET_DEFAULT(AllocatePrefetchDistance, allocate_prefetch_distance(use_watermark_prefetch)); 1508 } 1509 1510 if (is_intel() && cpu_family() == 6 && supports_sse3()) { 1511 if (FLAG_IS_DEFAULT(AllocatePrefetchLines) && 1512 supports_sse4_2() && supports_ht()) { // Nehalem based cpus 1513 FLAG_SET_DEFAULT(AllocatePrefetchLines, 4); 1514 } 1515 #ifdef COMPILER2 1516 if (FLAG_IS_DEFAULT(UseFPUForSpilling) && supports_sse4_2()) { 1517 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1518 } 1519 #endif 1520 } 1521 1522 if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse4_2()) { 1523 #ifdef COMPILER2 1524 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1525 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1526 } 1527 #endif 1528 } 1529 1530 #ifdef _LP64 1531 // Prefetch settings 1532 1533 // Prefetch interval for gc copy/scan == 9 dcache lines. Derived from 1534 // 50-warehouse specjbb runs on a 2-way 1.8ghz opteron using a 4gb heap. 1535 // Tested intervals from 128 to 2048 in increments of 64 == one cache line. 1536 // 256 bytes (4 dcache lines) was the nearest runner-up to 576. 1537 1538 // gc copy/scan is disabled if prefetchw isn't supported, because 1539 // Prefetch::write emits an inlined prefetchw on Linux. 1540 // Do not use the 3dnow prefetchw instruction. It isn't supported on em64t. 1541 // The used prefetcht0 instruction works for both amd64 and em64t. 1542 1543 if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes)) { 1544 FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 576); 1545 } 1546 if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes)) { 1547 FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 576); 1548 } 1549 if (FLAG_IS_DEFAULT(PrefetchFieldsAhead)) { 1550 FLAG_SET_DEFAULT(PrefetchFieldsAhead, 1); 1551 } 1552 #endif 1553 1554 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && 1555 (cache_line_size > ContendedPaddingWidth)) 1556 ContendedPaddingWidth = cache_line_size; 1557 1558 // This machine allows unaligned memory accesses 1559 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 1560 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 1561 } 1562 1563 #ifndef PRODUCT 1564 if (log_is_enabled(Info, os, cpu)) { 1565 LogStream ls(Log(os, cpu)::info()); 1566 outputStream* log = &ls; 1567 log->print_cr("Logical CPUs per core: %u", 1568 logical_processors_per_package()); 1569 log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); 1570 log->print("UseSSE=%d", (int) UseSSE); 1571 if (UseAVX > 0) { 1572 log->print(" UseAVX=%d", (int) UseAVX); 1573 } 1574 if (UseAES) { 1575 log->print(" UseAES=1"); 1576 } 1577 #ifdef COMPILER2 1578 if (MaxVectorSize > 0) { 1579 log->print(" MaxVectorSize=%d", (int) MaxVectorSize); 1580 } 1581 #endif 1582 log->cr(); 1583 log->print("Allocation"); 1584 if (AllocatePrefetchStyle <= 0 || (UseSSE == 0 && !supports_3dnow_prefetch())) { 1585 log->print_cr(": no prefetching"); 1586 } else { 1587 log->print(" prefetching: "); 1588 if (UseSSE == 0 && supports_3dnow_prefetch()) { 1589 log->print("PREFETCHW"); 1590 } else if (UseSSE >= 1) { 1591 if (AllocatePrefetchInstr == 0) { 1592 log->print("PREFETCHNTA"); 1593 } else if (AllocatePrefetchInstr == 1) { 1594 log->print("PREFETCHT0"); 1595 } else if (AllocatePrefetchInstr == 2) { 1596 log->print("PREFETCHT2"); 1597 } else if (AllocatePrefetchInstr == 3) { 1598 log->print("PREFETCHW"); 1599 } 1600 } 1601 if (AllocatePrefetchLines > 1) { 1602 log->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize); 1603 } else { 1604 log->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize); 1605 } 1606 } 1607 1608 if (PrefetchCopyIntervalInBytes > 0) { 1609 log->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes); 1610 } 1611 if (PrefetchScanIntervalInBytes > 0) { 1612 log->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes); 1613 } 1614 if (PrefetchFieldsAhead > 0) { 1615 log->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead); 1616 } 1617 if (ContendedPaddingWidth > 0) { 1618 log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth); 1619 } 1620 } 1621 #endif // !PRODUCT 1622 } 1623 1624 void VM_Version::print_platform_virtualization_info(outputStream* st) { 1625 VirtualizationType vrt = VM_Version::get_detected_virtualization(); 1626 if (vrt == XenHVM) { 1627 st->print_cr("Xen hardware-assisted virtualization detected"); 1628 } else if (vrt == KVM) { 1629 st->print_cr("KVM virtualization detected"); 1630 } else if (vrt == VMWare) { 1631 st->print_cr("VMWare virtualization detected"); 1632 VirtualizationSupport::print_virtualization_info(st); 1633 } else if (vrt == HyperV) { 1634 st->print_cr("HyperV virtualization detected"); 1635 } 1636 } 1637 1638 void VM_Version::check_virt_cpuid(uint32_t idx, uint32_t *regs) { 1639 // TODO support 32 bit 1640 #if defined(_LP64) 1641 #if defined(_MSC_VER) 1642 // Allocate space for the code 1643 const int code_size = 100; 1644 ResourceMark rm; 1645 CodeBuffer cb("detect_virt", code_size, 0); 1646 MacroAssembler* a = new MacroAssembler(&cb); 1647 address code = a->pc(); 1648 void (*test)(uint32_t idx, uint32_t *regs) = (void(*)(uint32_t idx, uint32_t *regs))code; 1649 1650 a->movq(r9, rbx); // save nonvolatile register 1651 1652 // next line would not work on 32-bit 1653 a->movq(rax, c_rarg0 /* rcx */); 1654 a->movq(r8, c_rarg1 /* rdx */); 1655 a->cpuid(); 1656 a->movl(Address(r8, 0), rax); 1657 a->movl(Address(r8, 4), rbx); 1658 a->movl(Address(r8, 8), rcx); 1659 a->movl(Address(r8, 12), rdx); 1660 1661 a->movq(rbx, r9); // restore nonvolatile register 1662 a->ret(0); 1663 1664 uint32_t *code_end = (uint32_t *)a->pc(); 1665 a->flush(); 1666 1667 // execute code 1668 (*test)(idx, regs); 1669 #elif defined(__GNUC__) 1670 __asm__ volatile ( 1671 " cpuid;" 1672 " mov %%eax,(%1);" 1673 " mov %%ebx,4(%1);" 1674 " mov %%ecx,8(%1);" 1675 " mov %%edx,12(%1);" 1676 : "+a" (idx) 1677 : "S" (regs) 1678 : "ebx", "ecx", "edx", "memory" ); 1679 #endif 1680 #endif 1681 } 1682 1683 1684 bool VM_Version::use_biased_locking() { 1685 #if INCLUDE_RTM_OPT 1686 // RTM locking is most useful when there is high lock contention and 1687 // low data contention. With high lock contention the lock is usually 1688 // inflated and biased locking is not suitable for that case. 1689 // RTM locking code requires that biased locking is off. 1690 // Note: we can't switch off UseBiasedLocking in get_processor_features() 1691 // because it is used by Thread::allocate() which is called before 1692 // VM_Version::initialize(). 1693 if (UseRTMLocking && UseBiasedLocking) { 1694 if (FLAG_IS_DEFAULT(UseBiasedLocking)) { 1695 FLAG_SET_DEFAULT(UseBiasedLocking, false); 1696 } else { 1697 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); 1698 UseBiasedLocking = false; 1699 } 1700 } 1701 #endif 1702 return UseBiasedLocking; 1703 } 1704 1705 // On Xen, the cpuid instruction returns 1706 // eax / registers[0]: Version of Xen 1707 // ebx / registers[1]: chars 'XenV' 1708 // ecx / registers[2]: chars 'MMXe' 1709 // edx / registers[3]: chars 'nVMM' 1710 // 1711 // On KVM / VMWare / MS Hyper-V, the cpuid instruction returns 1712 // ebx / registers[1]: chars 'KVMK' / 'VMwa' / 'Micr' 1713 // ecx / registers[2]: chars 'VMKV' / 'reVM' / 'osof' 1714 // edx / registers[3]: chars 'M' / 'ware' / 't Hv' 1715 // 1716 // more information : 1717 // https://kb.vmware.com/s/article/1009458 1718 // 1719 void VM_Version::check_virtualizations() { 1720 #if defined(_LP64) 1721 uint32_t registers[4]; 1722 char signature[13]; 1723 uint32_t base; 1724 signature[12] = '\0'; 1725 memset((void*)registers, 0, 4*sizeof(uint32_t)); 1726 1727 for (base = 0x40000000; base < 0x40010000; base += 0x100) { 1728 check_virt_cpuid(base, registers); 1729 1730 *(uint32_t *)(signature + 0) = registers[1]; 1731 *(uint32_t *)(signature + 4) = registers[2]; 1732 *(uint32_t *)(signature + 8) = registers[3]; 1733 1734 if (strncmp("VMwareVMware", signature, 12) == 0) { 1735 Abstract_VM_Version::_detected_virtualization = VMWare; 1736 // check for extended metrics from guestlib 1737 VirtualizationSupport::initialize(); 1738 } 1739 1740 if (strncmp("Microsoft Hv", signature, 12) == 0) { 1741 Abstract_VM_Version::_detected_virtualization = HyperV; 1742 } 1743 1744 if (strncmp("KVMKVMKVM", signature, 9) == 0) { 1745 Abstract_VM_Version::_detected_virtualization = KVM; 1746 } 1747 1748 if (strncmp("XenVMMXenVMM", signature, 12) == 0) { 1749 Abstract_VM_Version::_detected_virtualization = XenHVM; 1750 } 1751 } 1752 #endif 1753 } 1754 1755 void VM_Version::initialize() { 1756 ResourceMark rm; 1757 // Making this stub must be FIRST use of assembler 1758 1759 stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size); 1760 if (stub_blob == NULL) { 1761 vm_exit_during_initialization("Unable to allocate get_cpu_info_stub"); 1762 } 1763 CodeBuffer c(stub_blob); 1764 VM_Version_StubGenerator g(&c); 1765 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, 1766 g.generate_get_cpu_info()); 1767 1768 get_processor_features(); 1769 if (cpu_family() > 4) { // it supports CPUID 1770 check_virtualizations(); 1771 } 1772 }