1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/macroAssembler.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logStream.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "runtime/java.hpp" 33 #include "runtime/os.hpp" 34 #include "runtime/stubCodeGenerator.hpp" 35 #include "utilities/virtualizationSupport.hpp" 36 #include "vm_version_x86.hpp" 37 38 39 int VM_Version::_cpu; 40 int VM_Version::_model; 41 int VM_Version::_stepping; 42 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; 43 44 // Address of instruction which causes SEGV 45 address VM_Version::_cpuinfo_segv_addr = 0; 46 // Address of instruction after the one which causes SEGV 47 address VM_Version::_cpuinfo_cont_addr = 0; 48 49 static BufferBlob* stub_blob; 50 static const int stub_size = 1100; 51 52 extern "C" { 53 typedef void (*get_cpu_info_stub_t)(void*); 54 } 55 static get_cpu_info_stub_t get_cpu_info_stub = NULL; 56 57 58 class VM_Version_StubGenerator: public StubCodeGenerator { 59 public: 60 61 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 62 63 address generate_get_cpu_info() { 64 // Flags to test CPU type. 65 const uint32_t HS_EFL_AC = 0x40000; 66 const uint32_t HS_EFL_ID = 0x200000; 67 // Values for when we don't have a CPUID instruction. 68 const int CPU_FAMILY_SHIFT = 8; 69 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); 70 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); 71 bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2); 72 73 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; 74 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, ext_cpuid8, done, wrapup; 75 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check; 76 77 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); 78 # define __ _masm-> 79 80 address start = __ pc(); 81 82 // 83 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info); 84 // 85 // LP64: rcx and rdx are first and second argument registers on windows 86 87 __ push(rbp); 88 #ifdef _LP64 89 __ mov(rbp, c_rarg0); // cpuid_info address 90 #else 91 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address 92 #endif 93 __ push(rbx); 94 __ push(rsi); 95 __ pushf(); // preserve rbx, and flags 96 __ pop(rax); 97 __ push(rax); 98 __ mov(rcx, rax); 99 // 100 // if we are unable to change the AC flag, we have a 386 101 // 102 __ xorl(rax, HS_EFL_AC); 103 __ push(rax); 104 __ popf(); 105 __ pushf(); 106 __ pop(rax); 107 __ cmpptr(rax, rcx); 108 __ jccb(Assembler::notEqual, detect_486); 109 110 __ movl(rax, CPU_FAMILY_386); 111 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 112 __ jmp(done); 113 114 // 115 // If we are unable to change the ID flag, we have a 486 which does 116 // not support the "cpuid" instruction. 117 // 118 __ bind(detect_486); 119 __ mov(rax, rcx); 120 __ xorl(rax, HS_EFL_ID); 121 __ push(rax); 122 __ popf(); 123 __ pushf(); 124 __ pop(rax); 125 __ cmpptr(rcx, rax); 126 __ jccb(Assembler::notEqual, detect_586); 127 128 __ bind(cpu486); 129 __ movl(rax, CPU_FAMILY_486); 130 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); 131 __ jmp(done); 132 133 // 134 // At this point, we have a chip which supports the "cpuid" instruction 135 // 136 __ bind(detect_586); 137 __ xorl(rax, rax); 138 __ cpuid(); 139 __ orl(rax, rax); 140 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input 141 // value of at least 1, we give up and 142 // assume a 486 143 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 144 __ movl(Address(rsi, 0), rax); 145 __ movl(Address(rsi, 4), rbx); 146 __ movl(Address(rsi, 8), rcx); 147 __ movl(Address(rsi,12), rdx); 148 149 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? 150 __ jccb(Assembler::belowEqual, std_cpuid4); 151 152 // 153 // cpuid(0xB) Processor Topology 154 // 155 __ movl(rax, 0xb); 156 __ xorl(rcx, rcx); // Threads level 157 __ cpuid(); 158 159 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); 160 __ movl(Address(rsi, 0), rax); 161 __ movl(Address(rsi, 4), rbx); 162 __ movl(Address(rsi, 8), rcx); 163 __ movl(Address(rsi,12), rdx); 164 165 __ movl(rax, 0xb); 166 __ movl(rcx, 1); // Cores level 167 __ cpuid(); 168 __ push(rax); 169 __ andl(rax, 0x1f); // Determine if valid topology level 170 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 171 __ andl(rax, 0xffff); 172 __ pop(rax); 173 __ jccb(Assembler::equal, std_cpuid4); 174 175 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); 176 __ movl(Address(rsi, 0), rax); 177 __ movl(Address(rsi, 4), rbx); 178 __ movl(Address(rsi, 8), rcx); 179 __ movl(Address(rsi,12), rdx); 180 181 __ movl(rax, 0xb); 182 __ movl(rcx, 2); // Packages level 183 __ cpuid(); 184 __ push(rax); 185 __ andl(rax, 0x1f); // Determine if valid topology level 186 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 187 __ andl(rax, 0xffff); 188 __ pop(rax); 189 __ jccb(Assembler::equal, std_cpuid4); 190 191 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); 192 __ movl(Address(rsi, 0), rax); 193 __ movl(Address(rsi, 4), rbx); 194 __ movl(Address(rsi, 8), rcx); 195 __ movl(Address(rsi,12), rdx); 196 197 // 198 // cpuid(0x4) Deterministic cache params 199 // 200 __ bind(std_cpuid4); 201 __ movl(rax, 4); 202 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? 203 __ jccb(Assembler::greater, std_cpuid1); 204 205 __ xorl(rcx, rcx); // L1 cache 206 __ cpuid(); 207 __ push(rax); 208 __ andl(rax, 0x1f); // Determine if valid cache parameters used 209 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache 210 __ pop(rax); 211 __ jccb(Assembler::equal, std_cpuid1); 212 213 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); 214 __ movl(Address(rsi, 0), rax); 215 __ movl(Address(rsi, 4), rbx); 216 __ movl(Address(rsi, 8), rcx); 217 __ movl(Address(rsi,12), rdx); 218 219 // 220 // Standard cpuid(0x1) 221 // 222 __ bind(std_cpuid1); 223 __ movl(rax, 1); 224 __ cpuid(); 225 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 226 __ movl(Address(rsi, 0), rax); 227 __ movl(Address(rsi, 4), rbx); 228 __ movl(Address(rsi, 8), rcx); 229 __ movl(Address(rsi,12), rdx); 230 231 // 232 // Check if OS has enabled XGETBV instruction to access XCR0 233 // (OSXSAVE feature flag) and CPU supports AVX 234 // 235 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 236 __ cmpl(rcx, 0x18000000); 237 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported 238 239 // 240 // XCR0, XFEATURE_ENABLED_MASK register 241 // 242 __ xorl(rcx, rcx); // zero for XCR0 register 243 __ xgetbv(); 244 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); 245 __ movl(Address(rsi, 0), rax); 246 __ movl(Address(rsi, 4), rdx); 247 248 // 249 // cpuid(0x7) Structured Extended Features 250 // 251 __ bind(sef_cpuid); 252 __ movl(rax, 7); 253 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported? 254 __ jccb(Assembler::greater, ext_cpuid); 255 256 __ xorl(rcx, rcx); 257 __ cpuid(); 258 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 259 __ movl(Address(rsi, 0), rax); 260 __ movl(Address(rsi, 4), rbx); 261 __ movl(Address(rsi, 8), rcx); 262 __ movl(Address(rsi, 12), rdx); 263 264 // 265 // Extended cpuid(0x80000000) 266 // 267 __ bind(ext_cpuid); 268 __ movl(rax, 0x80000000); 269 __ cpuid(); 270 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? 271 __ jcc(Assembler::belowEqual, done); 272 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? 273 __ jcc(Assembler::belowEqual, ext_cpuid1); 274 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? 275 __ jccb(Assembler::belowEqual, ext_cpuid5); 276 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? 277 __ jccb(Assembler::belowEqual, ext_cpuid7); 278 __ cmpl(rax, 0x80000008); // Is cpuid(0x80000009 and above) supported? 279 __ jccb(Assembler::belowEqual, ext_cpuid8); 280 __ cmpl(rax, 0x8000001E); // Is cpuid(0x8000001E) supported? 281 __ jccb(Assembler::below, ext_cpuid8); 282 // 283 // Extended cpuid(0x8000001E) 284 // 285 __ movl(rax, 0x8000001E); 286 __ cpuid(); 287 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1E_offset()))); 288 __ movl(Address(rsi, 0), rax); 289 __ movl(Address(rsi, 4), rbx); 290 __ movl(Address(rsi, 8), rcx); 291 __ movl(Address(rsi,12), rdx); 292 293 // 294 // Extended cpuid(0x80000008) 295 // 296 __ bind(ext_cpuid8); 297 __ movl(rax, 0x80000008); 298 __ cpuid(); 299 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); 300 __ movl(Address(rsi, 0), rax); 301 __ movl(Address(rsi, 4), rbx); 302 __ movl(Address(rsi, 8), rcx); 303 __ movl(Address(rsi,12), rdx); 304 305 // 306 // Extended cpuid(0x80000007) 307 // 308 __ bind(ext_cpuid7); 309 __ movl(rax, 0x80000007); 310 __ cpuid(); 311 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset()))); 312 __ movl(Address(rsi, 0), rax); 313 __ movl(Address(rsi, 4), rbx); 314 __ movl(Address(rsi, 8), rcx); 315 __ movl(Address(rsi,12), rdx); 316 317 // 318 // Extended cpuid(0x80000005) 319 // 320 __ bind(ext_cpuid5); 321 __ movl(rax, 0x80000005); 322 __ cpuid(); 323 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); 324 __ movl(Address(rsi, 0), rax); 325 __ movl(Address(rsi, 4), rbx); 326 __ movl(Address(rsi, 8), rcx); 327 __ movl(Address(rsi,12), rdx); 328 329 // 330 // Extended cpuid(0x80000001) 331 // 332 __ bind(ext_cpuid1); 333 __ movl(rax, 0x80000001); 334 __ cpuid(); 335 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); 336 __ movl(Address(rsi, 0), rax); 337 __ movl(Address(rsi, 4), rbx); 338 __ movl(Address(rsi, 8), rcx); 339 __ movl(Address(rsi,12), rdx); 340 341 // 342 // Check if OS has enabled XGETBV instruction to access XCR0 343 // (OSXSAVE feature flag) and CPU supports AVX 344 // 345 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 346 __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx 347 __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx 348 __ cmpl(rcx, 0x18000000); 349 __ jccb(Assembler::notEqual, done); // jump if AVX is not supported 350 351 __ movl(rax, 0x6); 352 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 353 __ cmpl(rax, 0x6); 354 __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported 355 356 // we need to bridge farther than imm8, so we use this island as a thunk 357 __ bind(done); 358 __ jmp(wrapup); 359 360 __ bind(start_simd_check); 361 // 362 // Some OSs have a bug when upper 128/256bits of YMM/ZMM 363 // registers are not restored after a signal processing. 364 // Generate SEGV here (reference through NULL) 365 // and check upper YMM/ZMM bits after it. 366 // 367 intx saved_useavx = UseAVX; 368 intx saved_usesse = UseSSE; 369 370 // If UseAVX is unitialized or is set by the user to include EVEX 371 if (use_evex) { 372 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 373 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 374 __ movl(rax, 0x10000); 375 __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm 376 __ cmpl(rax, 0x10000); 377 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 378 // check _cpuid_info.xem_xcr0_eax.bits.opmask 379 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 380 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 381 __ movl(rax, 0xE0); 382 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 383 __ cmpl(rax, 0xE0); 384 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported 385 386 if (FLAG_IS_DEFAULT(UseAVX)) { 387 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 388 __ movl(rax, Address(rsi, 0)); 389 __ cmpl(rax, 0x50654); // If it is Skylake 390 __ jcc(Assembler::equal, legacy_setup); 391 } 392 // EVEX setup: run in lowest evex mode 393 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 394 UseAVX = 3; 395 UseSSE = 2; 396 #ifdef _WINDOWS 397 // xmm5-xmm15 are not preserved by caller on windows 398 // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx 399 __ subptr(rsp, 64); 400 __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit); 401 #ifdef _LP64 402 __ subptr(rsp, 64); 403 __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit); 404 __ subptr(rsp, 64); 405 __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit); 406 #endif // _LP64 407 #endif // _WINDOWS 408 409 // load value into all 64 bytes of zmm7 register 410 __ movl(rcx, VM_Version::ymm_test_value()); 411 __ movdl(xmm0, rcx); 412 __ vpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit); 413 __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit); 414 #ifdef _LP64 415 __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit); 416 __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit); 417 #endif 418 VM_Version::clean_cpuFeatures(); 419 __ jmp(save_restore_except); 420 } 421 422 __ bind(legacy_setup); 423 // AVX setup 424 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 425 UseAVX = 1; 426 UseSSE = 2; 427 #ifdef _WINDOWS 428 __ subptr(rsp, 32); 429 __ vmovdqu(Address(rsp, 0), xmm7); 430 #ifdef _LP64 431 __ subptr(rsp, 32); 432 __ vmovdqu(Address(rsp, 0), xmm8); 433 __ subptr(rsp, 32); 434 __ vmovdqu(Address(rsp, 0), xmm15); 435 #endif // _LP64 436 #endif // _WINDOWS 437 438 // load value into all 32 bytes of ymm7 register 439 __ movl(rcx, VM_Version::ymm_test_value()); 440 441 __ movdl(xmm0, rcx); 442 __ pshufd(xmm0, xmm0, 0x00); 443 __ vinsertf128_high(xmm0, xmm0); 444 __ vmovdqu(xmm7, xmm0); 445 #ifdef _LP64 446 __ vmovdqu(xmm8, xmm0); 447 __ vmovdqu(xmm15, xmm0); 448 #endif 449 VM_Version::clean_cpuFeatures(); 450 451 __ bind(save_restore_except); 452 __ xorl(rsi, rsi); 453 VM_Version::set_cpuinfo_segv_addr(__ pc()); 454 // Generate SEGV 455 __ movl(rax, Address(rsi, 0)); 456 457 VM_Version::set_cpuinfo_cont_addr(__ pc()); 458 // Returns here after signal. Save xmm0 to check it later. 459 460 // If UseAVX is unitialized or is set by the user to include EVEX 461 if (use_evex) { 462 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f 463 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset()))); 464 __ movl(rax, 0x10000); 465 __ andl(rax, Address(rsi, 4)); 466 __ cmpl(rax, 0x10000); 467 __ jcc(Assembler::notEqual, legacy_save_restore); 468 // check _cpuid_info.xem_xcr0_eax.bits.opmask 469 // check _cpuid_info.xem_xcr0_eax.bits.zmm512 470 // check _cpuid_info.xem_xcr0_eax.bits.zmm32 471 __ movl(rax, 0xE0); 472 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm 473 __ cmpl(rax, 0xE0); 474 __ jcc(Assembler::notEqual, legacy_save_restore); 475 476 if (FLAG_IS_DEFAULT(UseAVX)) { 477 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 478 __ movl(rax, Address(rsi, 0)); 479 __ cmpl(rax, 0x50654); // If it is Skylake 480 __ jcc(Assembler::equal, legacy_save_restore); 481 } 482 // EVEX check: run in lowest evex mode 483 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts 484 UseAVX = 3; 485 UseSSE = 2; 486 __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset()))); 487 __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit); 488 __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit); 489 #ifdef _LP64 490 __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit); 491 __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit); 492 #endif 493 494 #ifdef _WINDOWS 495 #ifdef _LP64 496 __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit); 497 __ addptr(rsp, 64); 498 __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit); 499 __ addptr(rsp, 64); 500 #endif // _LP64 501 __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit); 502 __ addptr(rsp, 64); 503 #endif // _WINDOWS 504 generate_vzeroupper(wrapup); 505 VM_Version::clean_cpuFeatures(); 506 UseAVX = saved_useavx; 507 UseSSE = saved_usesse; 508 __ jmp(wrapup); 509 } 510 511 __ bind(legacy_save_restore); 512 // AVX check 513 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts 514 UseAVX = 1; 515 UseSSE = 2; 516 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); 517 __ vmovdqu(Address(rsi, 0), xmm0); 518 __ vmovdqu(Address(rsi, 32), xmm7); 519 #ifdef _LP64 520 __ vmovdqu(Address(rsi, 64), xmm8); 521 __ vmovdqu(Address(rsi, 96), xmm15); 522 #endif 523 524 #ifdef _WINDOWS 525 #ifdef _LP64 526 __ vmovdqu(xmm15, Address(rsp, 0)); 527 __ addptr(rsp, 32); 528 __ vmovdqu(xmm8, Address(rsp, 0)); 529 __ addptr(rsp, 32); 530 #endif // _LP64 531 __ vmovdqu(xmm7, Address(rsp, 0)); 532 __ addptr(rsp, 32); 533 #endif // _WINDOWS 534 generate_vzeroupper(wrapup); 535 VM_Version::clean_cpuFeatures(); 536 UseAVX = saved_useavx; 537 UseSSE = saved_usesse; 538 539 __ bind(wrapup); 540 __ popf(); 541 __ pop(rsi); 542 __ pop(rbx); 543 __ pop(rbp); 544 __ ret(0); 545 546 # undef __ 547 548 return start; 549 }; 550 void generate_vzeroupper(Label& L_wrapup) { 551 # define __ _masm-> 552 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); 553 __ cmpl(Address(rsi, 4), 0x756e6547); // 'uneG' 554 __ jcc(Assembler::notEqual, L_wrapup); 555 __ movl(rcx, 0x0FFF0FF0); 556 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); 557 __ andl(rcx, Address(rsi, 0)); 558 __ cmpl(rcx, 0x00050670); // If it is Xeon Phi 3200/5200/7200 559 __ jcc(Assembler::equal, L_wrapup); 560 __ cmpl(rcx, 0x00080650); // If it is Future Xeon Phi 561 __ jcc(Assembler::equal, L_wrapup); 562 __ vzeroupper(); 563 # undef __ 564 } 565 }; 566 567 void VM_Version::get_processor_features() { 568 569 _cpu = 4; // 486 by default 570 _model = 0; 571 _stepping = 0; 572 _features = 0; 573 _logical_processors_per_package = 1; 574 // i486 internal cache is both I&D and has a 16-byte line size 575 _L1_data_cache_line_size = 16; 576 577 // Get raw processor info 578 579 get_cpu_info_stub(&_cpuid_info); 580 581 assert_is_initialized(); 582 _cpu = extended_cpu_family(); 583 _model = extended_cpu_model(); 584 _stepping = cpu_stepping(); 585 586 if (cpu_family() > 4) { // it supports CPUID 587 _features = feature_flags(); 588 // Logical processors are only available on P4s and above, 589 // and only if hyperthreading is available. 590 _logical_processors_per_package = logical_processor_count(); 591 _L1_data_cache_line_size = L1_line_size(); 592 } 593 594 _supports_cx8 = supports_cmpxchg8(); 595 // xchg and xadd instructions 596 _supports_atomic_getset4 = true; 597 _supports_atomic_getadd4 = true; 598 LP64_ONLY(_supports_atomic_getset8 = true); 599 LP64_ONLY(_supports_atomic_getadd8 = true); 600 601 #ifdef _LP64 602 // OS should support SSE for x64 and hardware should support at least SSE2. 603 if (!VM_Version::supports_sse2()) { 604 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); 605 } 606 // in 64 bit the use of SSE2 is the minimum 607 if (UseSSE < 2) UseSSE = 2; 608 #endif 609 610 #ifdef AMD64 611 // flush_icache_stub have to be generated first. 612 // That is why Icache line size is hard coded in ICache class, 613 // see icache_x86.hpp. It is also the reason why we can't use 614 // clflush instruction in 32-bit VM since it could be running 615 // on CPU which does not support it. 616 // 617 // The only thing we can do is to verify that flushed 618 // ICache::line_size has correct value. 619 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported"); 620 // clflush_size is size in quadwords (8 bytes). 621 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported"); 622 #endif 623 624 // If the OS doesn't support SSE, we can't use this feature even if the HW does 625 if (!os::supports_sse()) 626 _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); 627 628 if (UseSSE < 4) { 629 _features &= ~CPU_SSE4_1; 630 _features &= ~CPU_SSE4_2; 631 } 632 633 if (UseSSE < 3) { 634 _features &= ~CPU_SSE3; 635 _features &= ~CPU_SSSE3; 636 _features &= ~CPU_SSE4A; 637 } 638 639 if (UseSSE < 2) 640 _features &= ~CPU_SSE2; 641 642 if (UseSSE < 1) 643 _features &= ~CPU_SSE; 644 645 //since AVX instructions is slower than SSE in some ZX cpus, force USEAVX=0. 646 if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7))) { 647 UseAVX = 0; 648 } 649 650 // first try initial setting and detect what we can support 651 int use_avx_limit = 0; 652 if (UseAVX > 0) { 653 if (UseAVX > 2 && supports_evex()) { 654 use_avx_limit = 3; 655 } else if (UseAVX > 1 && supports_avx2()) { 656 use_avx_limit = 2; 657 } else if (UseAVX > 0 && supports_avx()) { 658 use_avx_limit = 1; 659 } else { 660 use_avx_limit = 0; 661 } 662 } 663 if (FLAG_IS_DEFAULT(UseAVX)) { 664 // Don't use AVX-512 on older Skylakes unless explicitly requested. 665 if (use_avx_limit > 2 && is_intel_skylake() && _stepping < 5) { 666 FLAG_SET_DEFAULT(UseAVX, 2); 667 } else { 668 FLAG_SET_DEFAULT(UseAVX, use_avx_limit); 669 } 670 } 671 if (UseAVX > use_avx_limit) { 672 warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit); 673 FLAG_SET_DEFAULT(UseAVX, use_avx_limit); 674 } else if (UseAVX < 0) { 675 warning("UseAVX=%d is not valid, setting it to UseAVX=0", (int) UseAVX); 676 FLAG_SET_DEFAULT(UseAVX, 0); 677 } 678 679 if (UseAVX < 3) { 680 _features &= ~CPU_AVX512F; 681 _features &= ~CPU_AVX512DQ; 682 _features &= ~CPU_AVX512CD; 683 _features &= ~CPU_AVX512BW; 684 _features &= ~CPU_AVX512VL; 685 _features &= ~CPU_AVX512_VPOPCNTDQ; 686 _features &= ~CPU_AVX512_VPCLMULQDQ; 687 _features &= ~CPU_VAES; 688 } 689 690 if (UseAVX < 2) 691 _features &= ~CPU_AVX2; 692 693 if (UseAVX < 1) { 694 _features &= ~CPU_AVX; 695 _features &= ~CPU_VZEROUPPER; 696 } 697 698 if (logical_processors_per_package() == 1) { 699 // HT processor could be installed on a system which doesn't support HT. 700 _features &= ~CPU_HT; 701 } 702 703 if( is_intel() ) { // Intel cpus specific settings 704 if (is_knights_family()) { 705 _features &= ~CPU_VZEROUPPER; 706 } 707 } 708 709 char buf[256]; 710 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 711 cores_per_cpu(), threads_per_core(), 712 cpu_family(), _model, _stepping, 713 (supports_cmov() ? ", cmov" : ""), 714 (supports_cmpxchg8() ? ", cx8" : ""), 715 (supports_fxsr() ? ", fxsr" : ""), 716 (supports_mmx() ? ", mmx" : ""), 717 (supports_sse() ? ", sse" : ""), 718 (supports_sse2() ? ", sse2" : ""), 719 (supports_sse3() ? ", sse3" : ""), 720 (supports_ssse3()? ", ssse3": ""), 721 (supports_sse4_1() ? ", sse4.1" : ""), 722 (supports_sse4_2() ? ", sse4.2" : ""), 723 (supports_popcnt() ? ", popcnt" : ""), 724 (supports_avx() ? ", avx" : ""), 725 (supports_avx2() ? ", avx2" : ""), 726 (supports_aes() ? ", aes" : ""), 727 (supports_clmul() ? ", clmul" : ""), 728 (supports_erms() ? ", erms" : ""), 729 (supports_rtm() ? ", rtm" : ""), 730 (supports_mmx_ext() ? ", mmxext" : ""), 731 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), 732 (supports_lzcnt() ? ", lzcnt": ""), 733 (supports_sse4a() ? ", sse4a": ""), 734 (supports_ht() ? ", ht": ""), 735 (supports_tsc() ? ", tsc": ""), 736 (supports_tscinv_bit() ? ", tscinvbit": ""), 737 (supports_tscinv() ? ", tscinv": ""), 738 (supports_bmi1() ? ", bmi1" : ""), 739 (supports_bmi2() ? ", bmi2" : ""), 740 (supports_adx() ? ", adx" : ""), 741 (supports_evex() ? ", evex" : ""), 742 (supports_sha() ? ", sha" : ""), 743 (supports_fma() ? ", fma" : "")); 744 _features_string = os::strdup(buf); 745 746 // UseSSE is set to the smaller of what hardware supports and what 747 // the command line requires. I.e., you cannot set UseSSE to 2 on 748 // older Pentiums which do not support it. 749 int use_sse_limit = 0; 750 if (UseSSE > 0) { 751 if (UseSSE > 3 && supports_sse4_1()) { 752 use_sse_limit = 4; 753 } else if (UseSSE > 2 && supports_sse3()) { 754 use_sse_limit = 3; 755 } else if (UseSSE > 1 && supports_sse2()) { 756 use_sse_limit = 2; 757 } else if (UseSSE > 0 && supports_sse()) { 758 use_sse_limit = 1; 759 } else { 760 use_sse_limit = 0; 761 } 762 } 763 if (FLAG_IS_DEFAULT(UseSSE)) { 764 FLAG_SET_DEFAULT(UseSSE, use_sse_limit); 765 } else if (UseSSE > use_sse_limit) { 766 warning("UseSSE=%d is not supported on this CPU, setting it to UseSSE=%d", (int) UseSSE, use_sse_limit); 767 FLAG_SET_DEFAULT(UseSSE, use_sse_limit); 768 } else if (UseSSE < 0) { 769 warning("UseSSE=%d is not valid, setting it to UseSSE=0", (int) UseSSE); 770 FLAG_SET_DEFAULT(UseSSE, 0); 771 } 772 773 // Use AES instructions if available. 774 if (supports_aes()) { 775 if (FLAG_IS_DEFAULT(UseAES)) { 776 FLAG_SET_DEFAULT(UseAES, true); 777 } 778 if (!UseAES) { 779 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 780 warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); 781 } 782 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 783 } else { 784 if (UseSSE > 2) { 785 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 786 FLAG_SET_DEFAULT(UseAESIntrinsics, true); 787 } 788 } else { 789 // The AES intrinsic stubs require AES instruction support (of course) 790 // but also require sse3 mode or higher for instructions it use. 791 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 792 warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled."); 793 } 794 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 795 } 796 797 // --AES-CTR begins-- 798 if (!UseAESIntrinsics) { 799 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 800 warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled."); 801 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 802 } 803 } else { 804 if(supports_sse4_1()) { 805 if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 806 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true); 807 } 808 } else { 809 // The AES-CTR intrinsic stubs require AES instruction support (of course) 810 // but also require sse4.1 mode or higher for instructions it use. 811 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 812 warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled."); 813 } 814 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 815 } 816 } 817 // --AES-CTR ends-- 818 } 819 } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) { 820 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 821 warning("AES instructions are not available on this CPU"); 822 FLAG_SET_DEFAULT(UseAES, false); 823 } 824 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 825 warning("AES intrinsics are not available on this CPU"); 826 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 827 } 828 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) { 829 warning("AES-CTR intrinsics are not available on this CPU"); 830 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 831 } 832 } 833 834 // Use CLMUL instructions if available. 835 if (supports_clmul()) { 836 if (FLAG_IS_DEFAULT(UseCLMUL)) { 837 UseCLMUL = true; 838 } 839 } else if (UseCLMUL) { 840 if (!FLAG_IS_DEFAULT(UseCLMUL)) 841 warning("CLMUL instructions not available on this CPU (AVX may also be required)"); 842 FLAG_SET_DEFAULT(UseCLMUL, false); 843 } 844 845 if (UseCLMUL && (UseSSE > 2)) { 846 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 847 UseCRC32Intrinsics = true; 848 } 849 } else if (UseCRC32Intrinsics) { 850 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 851 warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)"); 852 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 853 } 854 855 if (supports_sse4_2() && supports_clmul()) { 856 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 857 UseCRC32CIntrinsics = true; 858 } 859 } else if (UseCRC32CIntrinsics) { 860 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 861 warning("CRC32C intrinsics are not available on this CPU"); 862 } 863 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 864 } 865 866 // GHASH/GCM intrinsics 867 if (UseCLMUL && (UseSSE > 2)) { 868 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 869 UseGHASHIntrinsics = true; 870 } 871 } else if (UseGHASHIntrinsics) { 872 if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) 873 warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU"); 874 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 875 } 876 877 // Base64 Intrinsics (Check the condition for which the intrinsic will be active) 878 if ((UseAVX > 2) && supports_avx512vl() && supports_avx512bw()) { 879 if (FLAG_IS_DEFAULT(UseBASE64Intrinsics)) { 880 UseBASE64Intrinsics = true; 881 } 882 } else if (UseBASE64Intrinsics) { 883 if (!FLAG_IS_DEFAULT(UseBASE64Intrinsics)) 884 warning("Base64 intrinsic requires EVEX instructions on this CPU"); 885 FLAG_SET_DEFAULT(UseBASE64Intrinsics, false); 886 } 887 888 if (supports_fma() && UseSSE >= 2) { // Check UseSSE since FMA code uses SSE instructions 889 if (FLAG_IS_DEFAULT(UseFMA)) { 890 UseFMA = true; 891 } 892 } else if (UseFMA) { 893 warning("FMA instructions are not available on this CPU"); 894 FLAG_SET_DEFAULT(UseFMA, false); 895 } 896 897 if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) { 898 if (FLAG_IS_DEFAULT(UseSHA)) { 899 UseSHA = true; 900 } 901 } else if (UseSHA) { 902 warning("SHA instructions are not available on this CPU"); 903 FLAG_SET_DEFAULT(UseSHA, false); 904 } 905 906 if (supports_sha() && supports_sse4_1() && UseSHA) { 907 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { 908 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); 909 } 910 } else if (UseSHA1Intrinsics) { 911 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 912 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 913 } 914 915 if (supports_sse4_1() && UseSHA) { 916 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { 917 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); 918 } 919 } else if (UseSHA256Intrinsics) { 920 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 921 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 922 } 923 924 if (!FLAG_IS_DEFAULT(AVX3Threshold)) { 925 if (!is_power_of_2(AVX3Threshold)) { 926 warning("AVX3Threshold must be a power of 2"); 927 FLAG_SET_DEFAULT(AVX3Threshold, 4096); 928 } 929 } 930 931 #ifdef _LP64 932 // These are only supported on 64-bit 933 if (UseSHA && supports_avx2() && supports_bmi2()) { 934 if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) { 935 FLAG_SET_DEFAULT(UseSHA512Intrinsics, true); 936 } 937 } else 938 #endif 939 if (UseSHA512Intrinsics) { 940 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 941 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 942 } 943 944 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { 945 FLAG_SET_DEFAULT(UseSHA, false); 946 } 947 948 if (UseAdler32Intrinsics) { 949 warning("Adler32Intrinsics not available on this CPU."); 950 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 951 } 952 953 if (!supports_rtm() && UseRTMLocking) { 954 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 955 // setting during arguments processing. See use_biased_locking(). 956 // VM_Version_init() is executed after UseBiasedLocking is used 957 // in Thread::allocate(). 958 vm_exit_during_initialization("RTM instructions are not available on this CPU"); 959 } 960 961 #if INCLUDE_RTM_OPT 962 if (UseRTMLocking) { 963 if (is_client_compilation_mode_vm()) { 964 // Only C2 does RTM locking optimization. 965 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 966 // setting during arguments processing. See use_biased_locking(). 967 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 968 } 969 if (is_intel_family_core()) { 970 if ((_model == CPU_MODEL_HASWELL_E3) || 971 (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) || 972 (_model == CPU_MODEL_BROADWELL && _stepping < 4)) { 973 // currently a collision between SKL and HSW_E3 974 if (!UnlockExperimentalVMOptions && UseAVX < 3) { 975 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this " 976 "platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag."); 977 } else { 978 warning("UseRTMLocking is only available as experimental option on this platform."); 979 } 980 } 981 } 982 if (!FLAG_IS_CMDLINE(UseRTMLocking)) { 983 // RTM locking should be used only for applications with 984 // high lock contention. For now we do not use it by default. 985 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); 986 } 987 } else { // !UseRTMLocking 988 if (UseRTMForStackLocks) { 989 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { 990 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); 991 } 992 FLAG_SET_DEFAULT(UseRTMForStackLocks, false); 993 } 994 if (UseRTMDeopt) { 995 FLAG_SET_DEFAULT(UseRTMDeopt, false); 996 } 997 if (PrintPreciseRTMLockingStatistics) { 998 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); 999 } 1000 } 1001 #else 1002 if (UseRTMLocking) { 1003 // Only C2 does RTM locking optimization. 1004 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 1005 // setting during arguments processing. See use_biased_locking(). 1006 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 1007 } 1008 #endif 1009 1010 #ifdef COMPILER2 1011 if (UseFPUForSpilling) { 1012 if (UseSSE < 2) { 1013 // Only supported with SSE2+ 1014 FLAG_SET_DEFAULT(UseFPUForSpilling, false); 1015 } 1016 } 1017 #endif 1018 #if COMPILER2_OR_JVMCI 1019 if (MaxVectorSize > 0) { 1020 if (!is_power_of_2(MaxVectorSize)) { 1021 warning("MaxVectorSize must be a power of 2"); 1022 FLAG_SET_DEFAULT(MaxVectorSize, 64); 1023 } 1024 if (UseSSE < 2) { 1025 // Vectors (in XMM) are only supported with SSE2+ 1026 if (MaxVectorSize > 0) { 1027 if (!FLAG_IS_DEFAULT(MaxVectorSize)) 1028 warning("MaxVectorSize must be 0"); 1029 FLAG_SET_DEFAULT(MaxVectorSize, 0); 1030 } 1031 } 1032 else if (UseAVX == 0 || !os_supports_avx_vectors()) { 1033 // 32 bytes vectors (in YMM) are only supported with AVX+ 1034 if (MaxVectorSize > 16) { 1035 if (!FLAG_IS_DEFAULT(MaxVectorSize)) 1036 warning("MaxVectorSize must be <= 16"); 1037 FLAG_SET_DEFAULT(MaxVectorSize, 16); 1038 } 1039 } 1040 else if (UseAVX == 1 || UseAVX == 2) { 1041 // 64 bytes vectors (in ZMM) are only supported with AVX 3 1042 if (MaxVectorSize > 32) { 1043 if (!FLAG_IS_DEFAULT(MaxVectorSize)) 1044 warning("MaxVectorSize must be <= 32"); 1045 FLAG_SET_DEFAULT(MaxVectorSize, 32); 1046 } 1047 } 1048 else if (UseAVX > 2 ) { 1049 if (MaxVectorSize > 64) { 1050 if (!FLAG_IS_DEFAULT(MaxVectorSize)) 1051 warning("MaxVectorSize must be <= 64"); 1052 FLAG_SET_DEFAULT(MaxVectorSize, 64); 1053 } 1054 } 1055 #if defined(COMPILER2) && defined(ASSERT) 1056 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { 1057 tty->print_cr("State of YMM registers after signal handle:"); 1058 int nreg = 2 LP64_ONLY(+2); 1059 const char* ymm_name[4] = {"0", "7", "8", "15"}; 1060 for (int i = 0; i < nreg; i++) { 1061 tty->print("YMM%s:", ymm_name[i]); 1062 for (int j = 7; j >=0; j--) { 1063 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); 1064 } 1065 tty->cr(); 1066 } 1067 } 1068 #endif // COMPILER2 && ASSERT 1069 } 1070 #endif // COMPILER2_OR_JVMCI 1071 1072 #ifdef COMPILER2 1073 #ifdef _LP64 1074 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 1075 UseMultiplyToLenIntrinsic = true; 1076 } 1077 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 1078 UseSquareToLenIntrinsic = true; 1079 } 1080 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 1081 UseMulAddIntrinsic = true; 1082 } 1083 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 1084 UseMontgomeryMultiplyIntrinsic = true; 1085 } 1086 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 1087 UseMontgomerySquareIntrinsic = true; 1088 } 1089 #else 1090 if (UseMultiplyToLenIntrinsic) { 1091 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 1092 warning("multiplyToLen intrinsic is not available in 32-bit VM"); 1093 } 1094 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); 1095 } 1096 if (UseMontgomeryMultiplyIntrinsic) { 1097 if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 1098 warning("montgomeryMultiply intrinsic is not available in 32-bit VM"); 1099 } 1100 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false); 1101 } 1102 if (UseMontgomerySquareIntrinsic) { 1103 if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 1104 warning("montgomerySquare intrinsic is not available in 32-bit VM"); 1105 } 1106 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false); 1107 } 1108 if (UseSquareToLenIntrinsic) { 1109 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 1110 warning("squareToLen intrinsic is not available in 32-bit VM"); 1111 } 1112 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false); 1113 } 1114 if (UseMulAddIntrinsic) { 1115 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 1116 warning("mulAdd intrinsic is not available in 32-bit VM"); 1117 } 1118 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false); 1119 } 1120 #endif 1121 #endif // COMPILER2 1122 1123 // On new cpus instructions which update whole XMM register should be used 1124 // to prevent partial register stall due to dependencies on high half. 1125 // 1126 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) 1127 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) 1128 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). 1129 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). 1130 1131 1132 if (is_zx()) { // ZX cpus specific settings 1133 if (FLAG_IS_DEFAULT(UseStoreImmI16)) { 1134 UseStoreImmI16 = false; // don't use it on ZX cpus 1135 } 1136 if ((cpu_family() == 6) || (cpu_family() == 7)) { 1137 if (FLAG_IS_DEFAULT(UseAddressNop)) { 1138 // Use it on all ZX cpus 1139 UseAddressNop = true; 1140 } 1141 } 1142 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) { 1143 UseXmmLoadAndClearUpper = true; // use movsd on all ZX cpus 1144 } 1145 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) { 1146 if (supports_sse3()) { 1147 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new ZX cpus 1148 } else { 1149 UseXmmRegToRegMoveAll = false; 1150 } 1151 } 1152 if (((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse3()) { // new ZX cpus 1153 #ifdef COMPILER2 1154 if (FLAG_IS_DEFAULT(MaxLoopPad)) { 1155 // For new ZX cpus do the next optimization: 1156 // don't align the beginning of a loop if there are enough instructions 1157 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 1158 // in current fetch line (OptoLoopAlignment) or the padding 1159 // is big (> MaxLoopPad). 1160 // Set MaxLoopPad to 11 for new ZX cpus to reduce number of 1161 // generated NOP instructions. 11 is the largest size of one 1162 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 1163 MaxLoopPad = 11; 1164 } 1165 #endif // COMPILER2 1166 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1167 UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus 1168 } 1169 if (supports_sse4_2()) { // new ZX cpus 1170 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1171 UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus 1172 } 1173 } 1174 if (supports_sse4_2()) { 1175 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1176 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1177 } 1178 } else { 1179 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1180 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1181 } 1182 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1183 } 1184 } 1185 1186 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 1187 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1188 } 1189 } 1190 1191 if( is_amd() ) { // AMD cpus specific settings 1192 if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) { 1193 // Use it on new AMD cpus starting from Opteron. 1194 UseAddressNop = true; 1195 } 1196 if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) { 1197 // Use it on new AMD cpus starting from Opteron. 1198 UseNewLongLShift = true; 1199 } 1200 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 1201 if (supports_sse4a()) { 1202 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron 1203 } else { 1204 UseXmmLoadAndClearUpper = false; 1205 } 1206 } 1207 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 1208 if( supports_sse4a() ) { 1209 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' 1210 } else { 1211 UseXmmRegToRegMoveAll = false; 1212 } 1213 } 1214 if( FLAG_IS_DEFAULT(UseXmmI2F) ) { 1215 if( supports_sse4a() ) { 1216 UseXmmI2F = true; 1217 } else { 1218 UseXmmI2F = false; 1219 } 1220 } 1221 if( FLAG_IS_DEFAULT(UseXmmI2D) ) { 1222 if( supports_sse4a() ) { 1223 UseXmmI2D = true; 1224 } else { 1225 UseXmmI2D = false; 1226 } 1227 } 1228 if (supports_sse4_2()) { 1229 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1230 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1231 } 1232 } else { 1233 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1234 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1235 } 1236 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1237 } 1238 1239 // some defaults for AMD family 15h 1240 if ( cpu_family() == 0x15 ) { 1241 // On family 15h processors default is no sw prefetch 1242 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 1243 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0); 1244 } 1245 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW 1246 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 1247 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1248 } 1249 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy 1250 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1251 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true); 1252 } 1253 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1254 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true); 1255 } 1256 } 1257 1258 #ifdef COMPILER2 1259 if (cpu_family() < 0x17 && MaxVectorSize > 16) { 1260 // Limit vectors size to 16 bytes on AMD cpus < 17h. 1261 FLAG_SET_DEFAULT(MaxVectorSize, 16); 1262 } 1263 #endif // COMPILER2 1264 1265 // Some defaults for AMD family 17h 1266 if ( cpu_family() == 0x17 ) { 1267 // On family 17h processors use XMM and UnalignedLoadStores for Array Copy 1268 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1269 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true); 1270 } 1271 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1272 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true); 1273 } 1274 #ifdef COMPILER2 1275 if (supports_sse4_2() && FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1276 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1277 } 1278 #endif 1279 } 1280 } 1281 1282 if( is_intel() ) { // Intel cpus specific settings 1283 if( FLAG_IS_DEFAULT(UseStoreImmI16) ) { 1284 UseStoreImmI16 = false; // don't use it on Intel cpus 1285 } 1286 if( cpu_family() == 6 || cpu_family() == 15 ) { 1287 if( FLAG_IS_DEFAULT(UseAddressNop) ) { 1288 // Use it on all Intel cpus starting from PentiumPro 1289 UseAddressNop = true; 1290 } 1291 } 1292 if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { 1293 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus 1294 } 1295 if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { 1296 if( supports_sse3() ) { 1297 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus 1298 } else { 1299 UseXmmRegToRegMoveAll = false; 1300 } 1301 } 1302 if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus 1303 #ifdef COMPILER2 1304 if( FLAG_IS_DEFAULT(MaxLoopPad) ) { 1305 // For new Intel cpus do the next optimization: 1306 // don't align the beginning of a loop if there are enough instructions 1307 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) 1308 // in current fetch line (OptoLoopAlignment) or the padding 1309 // is big (> MaxLoopPad). 1310 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of 1311 // generated NOP instructions. 11 is the largest size of one 1312 // address NOP instruction '0F 1F' (see Assembler::nop(i)). 1313 MaxLoopPad = 11; 1314 } 1315 #endif // COMPILER2 1316 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) { 1317 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus 1318 } 1319 if (supports_sse4_2() && supports_ht()) { // Newest Intel cpus 1320 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1321 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1322 } 1323 } 1324 if (supports_sse4_2()) { 1325 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { 1326 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 1327 } 1328 } else { 1329 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 1330 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled."); 1331 } 1332 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false); 1333 } 1334 } 1335 if (is_atom_family() || is_knights_family()) { 1336 #ifdef COMPILER2 1337 if (FLAG_IS_DEFAULT(OptoScheduling)) { 1338 OptoScheduling = true; 1339 } 1340 #endif 1341 if (supports_sse4_2()) { // Silvermont 1342 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) { 1343 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus 1344 } 1345 } 1346 if (FLAG_IS_DEFAULT(UseIncDec)) { 1347 FLAG_SET_DEFAULT(UseIncDec, false); 1348 } 1349 } 1350 if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) { 1351 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1352 } 1353 } 1354 1355 #ifdef _LP64 1356 if (UseSSE42Intrinsics) { 1357 if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1358 UseVectorizedMismatchIntrinsic = true; 1359 } 1360 } else if (UseVectorizedMismatchIntrinsic) { 1361 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) 1362 warning("vectorizedMismatch intrinsics are not available on this CPU"); 1363 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1364 } 1365 #else 1366 if (UseVectorizedMismatchIntrinsic) { 1367 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) { 1368 warning("vectorizedMismatch intrinsic is not available in 32-bit VM"); 1369 } 1370 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 1371 } 1372 #endif // _LP64 1373 1374 // Use count leading zeros count instruction if available. 1375 if (supports_lzcnt()) { 1376 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { 1377 UseCountLeadingZerosInstruction = true; 1378 } 1379 } else if (UseCountLeadingZerosInstruction) { 1380 warning("lzcnt instruction is not available on this CPU"); 1381 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false); 1382 } 1383 1384 // Use count trailing zeros instruction if available 1385 if (supports_bmi1()) { 1386 // tzcnt does not require VEX prefix 1387 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { 1388 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1389 // Don't use tzcnt if BMI1 is switched off on command line. 1390 UseCountTrailingZerosInstruction = false; 1391 } else { 1392 UseCountTrailingZerosInstruction = true; 1393 } 1394 } 1395 } else if (UseCountTrailingZerosInstruction) { 1396 warning("tzcnt instruction is not available on this CPU"); 1397 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); 1398 } 1399 1400 // BMI instructions (except tzcnt) use an encoding with VEX prefix. 1401 // VEX prefix is generated only when AVX > 0. 1402 if (supports_bmi1() && supports_avx()) { 1403 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { 1404 UseBMI1Instructions = true; 1405 } 1406 } else if (UseBMI1Instructions) { 1407 warning("BMI1 instructions are not available on this CPU (AVX is also required)"); 1408 FLAG_SET_DEFAULT(UseBMI1Instructions, false); 1409 } 1410 1411 if (supports_bmi2() && supports_avx()) { 1412 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) { 1413 UseBMI2Instructions = true; 1414 } 1415 } else if (UseBMI2Instructions) { 1416 warning("BMI2 instructions are not available on this CPU (AVX is also required)"); 1417 FLAG_SET_DEFAULT(UseBMI2Instructions, false); 1418 } 1419 1420 // Use population count instruction if available. 1421 if (supports_popcnt()) { 1422 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 1423 UsePopCountInstruction = true; 1424 } 1425 } else if (UsePopCountInstruction) { 1426 warning("POPCNT instruction is not available on this CPU"); 1427 FLAG_SET_DEFAULT(UsePopCountInstruction, false); 1428 } 1429 1430 // Use fast-string operations if available. 1431 if (supports_erms()) { 1432 if (FLAG_IS_DEFAULT(UseFastStosb)) { 1433 UseFastStosb = true; 1434 } 1435 } else if (UseFastStosb) { 1436 warning("fast-string operations are not available on this CPU"); 1437 FLAG_SET_DEFAULT(UseFastStosb, false); 1438 } 1439 1440 // Use XMM/YMM MOVDQU instruction for Object Initialization 1441 if (!UseFastStosb && UseSSE >= 2 && UseUnalignedLoadStores) { 1442 if (FLAG_IS_DEFAULT(UseXMMForObjInit)) { 1443 UseXMMForObjInit = true; 1444 } 1445 } else if (UseXMMForObjInit) { 1446 warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off."); 1447 FLAG_SET_DEFAULT(UseXMMForObjInit, false); 1448 } 1449 1450 #ifdef COMPILER2 1451 if (FLAG_IS_DEFAULT(AlignVector)) { 1452 // Modern processors allow misaligned memory operations for vectors. 1453 AlignVector = !UseUnalignedLoadStores; 1454 } 1455 #endif // COMPILER2 1456 1457 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 1458 if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) { 1459 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0); 1460 } else if (!supports_sse() && supports_3dnow_prefetch()) { 1461 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3); 1462 } 1463 } 1464 1465 // Allocation prefetch settings 1466 intx cache_line_size = prefetch_data_size(); 1467 if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize) && 1468 (cache_line_size > AllocatePrefetchStepSize)) { 1469 FLAG_SET_DEFAULT(AllocatePrefetchStepSize, cache_line_size); 1470 } 1471 1472 if ((AllocatePrefetchDistance == 0) && (AllocatePrefetchStyle != 0)) { 1473 assert(!FLAG_IS_DEFAULT(AllocatePrefetchDistance), "default value should not be 0"); 1474 if (!FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 1475 warning("AllocatePrefetchDistance is set to 0 which disable prefetching. Ignoring AllocatePrefetchStyle flag."); 1476 } 1477 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0); 1478 } 1479 1480 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { 1481 bool use_watermark_prefetch = (AllocatePrefetchStyle == 2); 1482 FLAG_SET_DEFAULT(AllocatePrefetchDistance, allocate_prefetch_distance(use_watermark_prefetch)); 1483 } 1484 1485 if (is_intel() && cpu_family() == 6 && supports_sse3()) { 1486 if (FLAG_IS_DEFAULT(AllocatePrefetchLines) && 1487 supports_sse4_2() && supports_ht()) { // Nehalem based cpus 1488 FLAG_SET_DEFAULT(AllocatePrefetchLines, 4); 1489 } 1490 #ifdef COMPILER2 1491 if (FLAG_IS_DEFAULT(UseFPUForSpilling) && supports_sse4_2()) { 1492 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1493 } 1494 #endif 1495 } 1496 1497 if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse4_2()) { 1498 #ifdef COMPILER2 1499 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { 1500 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 1501 } 1502 #endif 1503 } 1504 1505 #ifdef _LP64 1506 // Prefetch settings 1507 1508 // Prefetch interval for gc copy/scan == 9 dcache lines. Derived from 1509 // 50-warehouse specjbb runs on a 2-way 1.8ghz opteron using a 4gb heap. 1510 // Tested intervals from 128 to 2048 in increments of 64 == one cache line. 1511 // 256 bytes (4 dcache lines) was the nearest runner-up to 576. 1512 1513 // gc copy/scan is disabled if prefetchw isn't supported, because 1514 // Prefetch::write emits an inlined prefetchw on Linux. 1515 // Do not use the 3dnow prefetchw instruction. It isn't supported on em64t. 1516 // The used prefetcht0 instruction works for both amd64 and em64t. 1517 1518 if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes)) { 1519 FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 576); 1520 } 1521 if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes)) { 1522 FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 576); 1523 } 1524 if (FLAG_IS_DEFAULT(PrefetchFieldsAhead)) { 1525 FLAG_SET_DEFAULT(PrefetchFieldsAhead, 1); 1526 } 1527 #endif 1528 1529 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && 1530 (cache_line_size > ContendedPaddingWidth)) 1531 ContendedPaddingWidth = cache_line_size; 1532 1533 // This machine allows unaligned memory accesses 1534 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 1535 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 1536 } 1537 1538 #ifndef PRODUCT 1539 if (log_is_enabled(Info, os, cpu)) { 1540 LogStream ls(Log(os, cpu)::info()); 1541 outputStream* log = &ls; 1542 log->print_cr("Logical CPUs per core: %u", 1543 logical_processors_per_package()); 1544 log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); 1545 log->print("UseSSE=%d", (int) UseSSE); 1546 if (UseAVX > 0) { 1547 log->print(" UseAVX=%d", (int) UseAVX); 1548 } 1549 if (UseAES) { 1550 log->print(" UseAES=1"); 1551 } 1552 #ifdef COMPILER2 1553 if (MaxVectorSize > 0) { 1554 log->print(" MaxVectorSize=%d", (int) MaxVectorSize); 1555 } 1556 #endif 1557 log->cr(); 1558 log->print("Allocation"); 1559 if (AllocatePrefetchStyle <= 0 || (UseSSE == 0 && !supports_3dnow_prefetch())) { 1560 log->print_cr(": no prefetching"); 1561 } else { 1562 log->print(" prefetching: "); 1563 if (UseSSE == 0 && supports_3dnow_prefetch()) { 1564 log->print("PREFETCHW"); 1565 } else if (UseSSE >= 1) { 1566 if (AllocatePrefetchInstr == 0) { 1567 log->print("PREFETCHNTA"); 1568 } else if (AllocatePrefetchInstr == 1) { 1569 log->print("PREFETCHT0"); 1570 } else if (AllocatePrefetchInstr == 2) { 1571 log->print("PREFETCHT2"); 1572 } else if (AllocatePrefetchInstr == 3) { 1573 log->print("PREFETCHW"); 1574 } 1575 } 1576 if (AllocatePrefetchLines > 1) { 1577 log->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize); 1578 } else { 1579 log->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize); 1580 } 1581 } 1582 1583 if (PrefetchCopyIntervalInBytes > 0) { 1584 log->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes); 1585 } 1586 if (PrefetchScanIntervalInBytes > 0) { 1587 log->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes); 1588 } 1589 if (PrefetchFieldsAhead > 0) { 1590 log->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead); 1591 } 1592 if (ContendedPaddingWidth > 0) { 1593 log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth); 1594 } 1595 } 1596 #endif // !PRODUCT 1597 } 1598 1599 void VM_Version::print_platform_virtualization_info(outputStream* st) { 1600 VirtualizationType vrt = VM_Version::get_detected_virtualization(); 1601 if (vrt == XenHVM) { 1602 st->print_cr("Xen hardware-assisted virtualization detected"); 1603 } else if (vrt == KVM) { 1604 st->print_cr("KVM virtualization detected"); 1605 } else if (vrt == VMWare) { 1606 st->print_cr("VMWare virtualization detected"); 1607 VirtualizationSupport::print_virtualization_info(st); 1608 } else if (vrt == HyperV) { 1609 st->print_cr("HyperV virtualization detected"); 1610 } 1611 } 1612 1613 void VM_Version::check_virt_cpuid(uint32_t idx, uint32_t *regs) { 1614 // TODO support 32 bit 1615 #if defined(_LP64) 1616 #if defined(_MSC_VER) 1617 // Allocate space for the code 1618 const int code_size = 100; 1619 ResourceMark rm; 1620 CodeBuffer cb("detect_virt", code_size, 0); 1621 MacroAssembler* a = new MacroAssembler(&cb); 1622 address code = a->pc(); 1623 void (*test)(uint32_t idx, uint32_t *regs) = (void(*)(uint32_t idx, uint32_t *regs))code; 1624 1625 a->movq(r9, rbx); // save nonvolatile register 1626 1627 // next line would not work on 32-bit 1628 a->movq(rax, c_rarg0 /* rcx */); 1629 a->movq(r8, c_rarg1 /* rdx */); 1630 a->cpuid(); 1631 a->movl(Address(r8, 0), rax); 1632 a->movl(Address(r8, 4), rbx); 1633 a->movl(Address(r8, 8), rcx); 1634 a->movl(Address(r8, 12), rdx); 1635 1636 a->movq(rbx, r9); // restore nonvolatile register 1637 a->ret(0); 1638 1639 uint32_t *code_end = (uint32_t *)a->pc(); 1640 a->flush(); 1641 1642 // execute code 1643 (*test)(idx, regs); 1644 #elif defined(__GNUC__) 1645 __asm__ volatile ( 1646 " cpuid;" 1647 " mov %%eax,(%1);" 1648 " mov %%ebx,4(%1);" 1649 " mov %%ecx,8(%1);" 1650 " mov %%edx,12(%1);" 1651 : "+a" (idx) 1652 : "S" (regs) 1653 : "ebx", "ecx", "edx", "memory" ); 1654 #endif 1655 #endif 1656 } 1657 1658 1659 bool VM_Version::use_biased_locking() { 1660 #if INCLUDE_RTM_OPT 1661 // RTM locking is most useful when there is high lock contention and 1662 // low data contention. With high lock contention the lock is usually 1663 // inflated and biased locking is not suitable for that case. 1664 // RTM locking code requires that biased locking is off. 1665 // Note: we can't switch off UseBiasedLocking in get_processor_features() 1666 // because it is used by Thread::allocate() which is called before 1667 // VM_Version::initialize(). 1668 if (UseRTMLocking && UseBiasedLocking) { 1669 if (FLAG_IS_DEFAULT(UseBiasedLocking)) { 1670 FLAG_SET_DEFAULT(UseBiasedLocking, false); 1671 } else { 1672 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); 1673 UseBiasedLocking = false; 1674 } 1675 } 1676 #endif 1677 return UseBiasedLocking; 1678 } 1679 1680 // On Xen, the cpuid instruction returns 1681 // eax / registers[0]: Version of Xen 1682 // ebx / registers[1]: chars 'XenV' 1683 // ecx / registers[2]: chars 'MMXe' 1684 // edx / registers[3]: chars 'nVMM' 1685 // 1686 // On KVM / VMWare / MS Hyper-V, the cpuid instruction returns 1687 // ebx / registers[1]: chars 'KVMK' / 'VMwa' / 'Micr' 1688 // ecx / registers[2]: chars 'VMKV' / 'reVM' / 'osof' 1689 // edx / registers[3]: chars 'M' / 'ware' / 't Hv' 1690 // 1691 // more information : 1692 // https://kb.vmware.com/s/article/1009458 1693 // 1694 void VM_Version::check_virtualizations() { 1695 #if defined(_LP64) 1696 uint32_t registers[4]; 1697 char signature[13]; 1698 uint32_t base; 1699 signature[12] = '\0'; 1700 memset((void*)registers, 0, 4*sizeof(uint32_t)); 1701 1702 for (base = 0x40000000; base < 0x40010000; base += 0x100) { 1703 check_virt_cpuid(base, registers); 1704 1705 *(uint32_t *)(signature + 0) = registers[1]; 1706 *(uint32_t *)(signature + 4) = registers[2]; 1707 *(uint32_t *)(signature + 8) = registers[3]; 1708 1709 if (strncmp("VMwareVMware", signature, 12) == 0) { 1710 Abstract_VM_Version::_detected_virtualization = VMWare; 1711 // check for extended metrics from guestlib 1712 VirtualizationSupport::initialize(); 1713 } 1714 1715 if (strncmp("Microsoft Hv", signature, 12) == 0) { 1716 Abstract_VM_Version::_detected_virtualization = HyperV; 1717 } 1718 1719 if (strncmp("KVMKVMKVM", signature, 9) == 0) { 1720 Abstract_VM_Version::_detected_virtualization = KVM; 1721 } 1722 1723 if (strncmp("XenVMMXenVMM", signature, 12) == 0) { 1724 Abstract_VM_Version::_detected_virtualization = XenHVM; 1725 } 1726 } 1727 #endif 1728 } 1729 1730 void VM_Version::initialize() { 1731 ResourceMark rm; 1732 // Making this stub must be FIRST use of assembler 1733 1734 stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size); 1735 if (stub_blob == NULL) { 1736 vm_exit_during_initialization("Unable to allocate get_cpu_info_stub"); 1737 } 1738 CodeBuffer c(stub_blob); 1739 VM_Version_StubGenerator g(&c); 1740 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t, 1741 g.generate_get_cpu_info()); 1742 1743 get_processor_features(); 1744 if (cpu_family() > 4) { // it supports CPUID 1745 check_virtualizations(); 1746 } 1747 }