1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "logging/log.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "runtime/java.hpp"
  31 #include "runtime/os.hpp"
  32 #include "runtime/stubCodeGenerator.hpp"
  33 #include "vm_version_x86.hpp"
  34 
  35 
  36 int VM_Version::_cpu;
  37 int VM_Version::_model;
  38 int VM_Version::_stepping;
  39 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
  40 
  41 // Address of instruction which causes SEGV
  42 address VM_Version::_cpuinfo_segv_addr = 0;
  43 // Address of instruction after the one which causes SEGV
  44 address VM_Version::_cpuinfo_cont_addr = 0;
  45 
  46 static BufferBlob* stub_blob;
  47 static const int stub_size = 1000;
  48 
  49 extern "C" {
  50   typedef void (*get_cpu_info_stub_t)(void*);
  51 }
  52 static get_cpu_info_stub_t get_cpu_info_stub = NULL;
  53 
  54 
  55 class VM_Version_StubGenerator: public StubCodeGenerator {
  56  public:
  57 
  58   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
  59 
  60   address generate_get_cpu_info() {
  61     // Flags to test CPU type.
  62     const uint32_t HS_EFL_AC = 0x40000;
  63     const uint32_t HS_EFL_ID = 0x200000;
  64     // Values for when we don't have a CPUID instruction.
  65     const int      CPU_FAMILY_SHIFT = 8;
  66     const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT);
  67     const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
  68     bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2);
  69 
  70     Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
  71     Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done, wrapup;
  72     Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
  73 
  74     StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
  75 #   define __ _masm->
  76 
  77     address start = __ pc();
  78 
  79     //
  80     // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info);
  81     //
  82     // LP64: rcx and rdx are first and second argument registers on windows
  83 
  84     __ push(rbp);
  85 #ifdef _LP64
  86     __ mov(rbp, c_rarg0); // cpuid_info address
  87 #else
  88     __ movptr(rbp, Address(rsp, 8)); // cpuid_info address
  89 #endif
  90     __ push(rbx);
  91     __ push(rsi);
  92     __ pushf();          // preserve rbx, and flags
  93     __ pop(rax);
  94     __ push(rax);
  95     __ mov(rcx, rax);
  96     //
  97     // if we are unable to change the AC flag, we have a 386
  98     //
  99     __ xorl(rax, HS_EFL_AC);
 100     __ push(rax);
 101     __ popf();
 102     __ pushf();
 103     __ pop(rax);
 104     __ cmpptr(rax, rcx);
 105     __ jccb(Assembler::notEqual, detect_486);
 106 
 107     __ movl(rax, CPU_FAMILY_386);
 108     __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
 109     __ jmp(done);
 110 
 111     //
 112     // If we are unable to change the ID flag, we have a 486 which does
 113     // not support the "cpuid" instruction.
 114     //
 115     __ bind(detect_486);
 116     __ mov(rax, rcx);
 117     __ xorl(rax, HS_EFL_ID);
 118     __ push(rax);
 119     __ popf();
 120     __ pushf();
 121     __ pop(rax);
 122     __ cmpptr(rcx, rax);
 123     __ jccb(Assembler::notEqual, detect_586);
 124 
 125     __ bind(cpu486);
 126     __ movl(rax, CPU_FAMILY_486);
 127     __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
 128     __ jmp(done);
 129 
 130     //
 131     // At this point, we have a chip which supports the "cpuid" instruction
 132     //
 133     __ bind(detect_586);
 134     __ xorl(rax, rax);
 135     __ cpuid();
 136     __ orl(rax, rax);
 137     __ jcc(Assembler::equal, cpu486);   // if cpuid doesn't support an input
 138                                         // value of at least 1, we give up and
 139                                         // assume a 486
 140     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
 141     __ movl(Address(rsi, 0), rax);
 142     __ movl(Address(rsi, 4), rbx);
 143     __ movl(Address(rsi, 8), rcx);
 144     __ movl(Address(rsi,12), rdx);
 145 
 146     __ cmpl(rax, 0xa);                  // Is cpuid(0xB) supported?
 147     __ jccb(Assembler::belowEqual, std_cpuid4);
 148 
 149     //
 150     // cpuid(0xB) Processor Topology
 151     //
 152     __ movl(rax, 0xb);
 153     __ xorl(rcx, rcx);   // Threads level
 154     __ cpuid();
 155 
 156     __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
 157     __ movl(Address(rsi, 0), rax);
 158     __ movl(Address(rsi, 4), rbx);
 159     __ movl(Address(rsi, 8), rcx);
 160     __ movl(Address(rsi,12), rdx);
 161 
 162     __ movl(rax, 0xb);
 163     __ movl(rcx, 1);     // Cores level
 164     __ cpuid();
 165     __ push(rax);
 166     __ andl(rax, 0x1f);  // Determine if valid topology level
 167     __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
 168     __ andl(rax, 0xffff);
 169     __ pop(rax);
 170     __ jccb(Assembler::equal, std_cpuid4);
 171 
 172     __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
 173     __ movl(Address(rsi, 0), rax);
 174     __ movl(Address(rsi, 4), rbx);
 175     __ movl(Address(rsi, 8), rcx);
 176     __ movl(Address(rsi,12), rdx);
 177 
 178     __ movl(rax, 0xb);
 179     __ movl(rcx, 2);     // Packages level
 180     __ cpuid();
 181     __ push(rax);
 182     __ andl(rax, 0x1f);  // Determine if valid topology level
 183     __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
 184     __ andl(rax, 0xffff);
 185     __ pop(rax);
 186     __ jccb(Assembler::equal, std_cpuid4);
 187 
 188     __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
 189     __ movl(Address(rsi, 0), rax);
 190     __ movl(Address(rsi, 4), rbx);
 191     __ movl(Address(rsi, 8), rcx);
 192     __ movl(Address(rsi,12), rdx);
 193 
 194     //
 195     // cpuid(0x4) Deterministic cache params
 196     //
 197     __ bind(std_cpuid4);
 198     __ movl(rax, 4);
 199     __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
 200     __ jccb(Assembler::greater, std_cpuid1);
 201 
 202     __ xorl(rcx, rcx);   // L1 cache
 203     __ cpuid();
 204     __ push(rax);
 205     __ andl(rax, 0x1f);  // Determine if valid cache parameters used
 206     __ orl(rax, rax);    // eax[4:0] == 0 indicates invalid cache
 207     __ pop(rax);
 208     __ jccb(Assembler::equal, std_cpuid1);
 209 
 210     __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
 211     __ movl(Address(rsi, 0), rax);
 212     __ movl(Address(rsi, 4), rbx);
 213     __ movl(Address(rsi, 8), rcx);
 214     __ movl(Address(rsi,12), rdx);
 215 
 216     //
 217     // Standard cpuid(0x1)
 218     //
 219     __ bind(std_cpuid1);
 220     __ movl(rax, 1);
 221     __ cpuid();
 222     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
 223     __ movl(Address(rsi, 0), rax);
 224     __ movl(Address(rsi, 4), rbx);
 225     __ movl(Address(rsi, 8), rcx);
 226     __ movl(Address(rsi,12), rdx);
 227 
 228     //
 229     // Check if OS has enabled XGETBV instruction to access XCR0
 230     // (OSXSAVE feature flag) and CPU supports AVX
 231     //
 232     __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
 233     __ cmpl(rcx, 0x18000000);
 234     __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
 235 
 236     //
 237     // XCR0, XFEATURE_ENABLED_MASK register
 238     //
 239     __ xorl(rcx, rcx);   // zero for XCR0 register
 240     __ xgetbv();
 241     __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset())));
 242     __ movl(Address(rsi, 0), rax);
 243     __ movl(Address(rsi, 4), rdx);
 244 
 245     //
 246     // cpuid(0x7) Structured Extended Features
 247     //
 248     __ bind(sef_cpuid);
 249     __ movl(rax, 7);
 250     __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported?
 251     __ jccb(Assembler::greater, ext_cpuid);
 252 
 253     __ xorl(rcx, rcx);
 254     __ cpuid();
 255     __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
 256     __ movl(Address(rsi, 0), rax);
 257     __ movl(Address(rsi, 4), rbx);
 258 
 259     //
 260     // Extended cpuid(0x80000000)
 261     //
 262     __ bind(ext_cpuid);
 263     __ movl(rax, 0x80000000);
 264     __ cpuid();
 265     __ cmpl(rax, 0x80000000);     // Is cpuid(0x80000001) supported?
 266     __ jcc(Assembler::belowEqual, done);
 267     __ cmpl(rax, 0x80000004);     // Is cpuid(0x80000005) supported?
 268     __ jccb(Assembler::belowEqual, ext_cpuid1);
 269     __ cmpl(rax, 0x80000006);     // Is cpuid(0x80000007) supported?
 270     __ jccb(Assembler::belowEqual, ext_cpuid5);
 271     __ cmpl(rax, 0x80000007);     // Is cpuid(0x80000008) supported?
 272     __ jccb(Assembler::belowEqual, ext_cpuid7);
 273     //
 274     // Extended cpuid(0x80000008)
 275     //
 276     __ movl(rax, 0x80000008);
 277     __ cpuid();
 278     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
 279     __ movl(Address(rsi, 0), rax);
 280     __ movl(Address(rsi, 4), rbx);
 281     __ movl(Address(rsi, 8), rcx);
 282     __ movl(Address(rsi,12), rdx);
 283 
 284     //
 285     // Extended cpuid(0x80000007)
 286     //
 287     __ bind(ext_cpuid7);
 288     __ movl(rax, 0x80000007);
 289     __ cpuid();
 290     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset())));
 291     __ movl(Address(rsi, 0), rax);
 292     __ movl(Address(rsi, 4), rbx);
 293     __ movl(Address(rsi, 8), rcx);
 294     __ movl(Address(rsi,12), rdx);
 295 
 296     //
 297     // Extended cpuid(0x80000005)
 298     //
 299     __ bind(ext_cpuid5);
 300     __ movl(rax, 0x80000005);
 301     __ cpuid();
 302     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
 303     __ movl(Address(rsi, 0), rax);
 304     __ movl(Address(rsi, 4), rbx);
 305     __ movl(Address(rsi, 8), rcx);
 306     __ movl(Address(rsi,12), rdx);
 307 
 308     //
 309     // Extended cpuid(0x80000001)
 310     //
 311     __ bind(ext_cpuid1);
 312     __ movl(rax, 0x80000001);
 313     __ cpuid();
 314     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
 315     __ movl(Address(rsi, 0), rax);
 316     __ movl(Address(rsi, 4), rbx);
 317     __ movl(Address(rsi, 8), rcx);
 318     __ movl(Address(rsi,12), rdx);
 319 
 320     //
 321     // Check if OS has enabled XGETBV instruction to access XCR0
 322     // (OSXSAVE feature flag) and CPU supports AVX
 323     //
 324     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
 325     __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
 326     __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx
 327     __ cmpl(rcx, 0x18000000);
 328     __ jccb(Assembler::notEqual, done); // jump if AVX is not supported
 329 
 330     __ movl(rax, 0x6);
 331     __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
 332     __ cmpl(rax, 0x6);
 333     __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported
 334 
 335     // we need to bridge farther than imm8, so we use this island as a thunk
 336     __ bind(done);
 337     __ jmp(wrapup);
 338 
 339     __ bind(start_simd_check);
 340     //
 341     // Some OSs have a bug when upper 128/256bits of YMM/ZMM
 342     // registers are not restored after a signal processing.
 343     // Generate SEGV here (reference through NULL)
 344     // and check upper YMM/ZMM bits after it.
 345     //
 346     intx saved_useavx = UseAVX;
 347     intx saved_usesse = UseSSE;
 348     // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
 349     __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
 350     __ movl(rax, 0x10000);
 351     __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm
 352     __ cmpl(rax, 0x10000);
 353     __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
 354     // check _cpuid_info.xem_xcr0_eax.bits.opmask
 355     // check _cpuid_info.xem_xcr0_eax.bits.zmm512
 356     // check _cpuid_info.xem_xcr0_eax.bits.zmm32
 357     __ movl(rax, 0xE0);
 358     __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
 359     __ cmpl(rax, 0xE0);
 360     __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
 361 
 362     // If UseAVX is unitialized or is set by the user to include EVEX
 363     if (use_evex) {
 364       // EVEX setup: run in lowest evex mode
 365       VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
 366       UseAVX = 3;
 367       UseSSE = 2;
 368 #ifdef _WINDOWS
 369       // xmm5-xmm15 are not preserved by caller on windows
 370       // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
 371       __ subptr(rsp, 64);
 372       __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit);
 373 #ifdef _LP64
 374       __ subptr(rsp, 64);
 375       __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit);
 376       __ subptr(rsp, 64);
 377       __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit);
 378 #endif // _LP64
 379 #endif // _WINDOWS
 380 
 381       // load value into all 64 bytes of zmm7 register
 382       __ movl(rcx, VM_Version::ymm_test_value());
 383       __ movdl(xmm0, rcx);
 384       __ movl(rcx, 0xffff);
 385       __ kmovwl(k1, rcx);
 386       __ evpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit);
 387       __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit);
 388 #ifdef _LP64
 389       __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit);
 390       __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit);
 391 #endif
 392       VM_Version::clean_cpuFeatures();
 393       __ jmp(save_restore_except);
 394     }
 395 
 396     __ bind(legacy_setup);
 397     // AVX setup
 398     VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
 399     UseAVX = 1;
 400     UseSSE = 2;
 401 #ifdef _WINDOWS
 402     __ subptr(rsp, 32);
 403     __ vmovdqu(Address(rsp, 0), xmm7);
 404 #ifdef _LP64
 405     __ subptr(rsp, 32);
 406     __ vmovdqu(Address(rsp, 0), xmm8);
 407     __ subptr(rsp, 32);
 408     __ vmovdqu(Address(rsp, 0), xmm15);
 409 #endif // _LP64
 410 #endif // _WINDOWS
 411 
 412     // load value into all 32 bytes of ymm7 register
 413     __ movl(rcx, VM_Version::ymm_test_value());
 414 
 415     __ movdl(xmm0, rcx);
 416     __ pshufd(xmm0, xmm0, 0x00);
 417     __ vinsertf128_high(xmm0, xmm0);
 418     __ vmovdqu(xmm7, xmm0);
 419 #ifdef _LP64
 420     __ vmovdqu(xmm8, xmm0);
 421     __ vmovdqu(xmm15, xmm0);
 422 #endif
 423     VM_Version::clean_cpuFeatures();
 424 
 425     __ bind(save_restore_except);
 426     __ xorl(rsi, rsi);
 427     VM_Version::set_cpuinfo_segv_addr(__ pc());
 428     // Generate SEGV
 429     __ movl(rax, Address(rsi, 0));
 430 
 431     VM_Version::set_cpuinfo_cont_addr(__ pc());
 432     // Returns here after signal. Save xmm0 to check it later.
 433 
 434     // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
 435     __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
 436     __ movl(rax, 0x10000);
 437     __ andl(rax, Address(rsi, 4));
 438     __ cmpl(rax, 0x10000);
 439     __ jcc(Assembler::notEqual, legacy_save_restore);
 440     // check _cpuid_info.xem_xcr0_eax.bits.opmask
 441     // check _cpuid_info.xem_xcr0_eax.bits.zmm512
 442     // check _cpuid_info.xem_xcr0_eax.bits.zmm32
 443     __ movl(rax, 0xE0);
 444     __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
 445     __ cmpl(rax, 0xE0);
 446     __ jcc(Assembler::notEqual, legacy_save_restore);
 447 
 448     // If UseAVX is unitialized or is set by the user to include EVEX
 449     if (use_evex) {
 450       // EVEX check: run in lowest evex mode
 451       VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
 452       UseAVX = 3;
 453       UseSSE = 2;
 454       __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset())));
 455       __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit);
 456       __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit);
 457 #ifdef _LP64
 458       __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit);
 459       __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit);
 460 #endif
 461 
 462 #ifdef _WINDOWS
 463 #ifdef _LP64
 464       __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit);
 465       __ addptr(rsp, 64);
 466       __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit);
 467       __ addptr(rsp, 64);
 468 #endif // _LP64
 469       __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit);
 470       __ addptr(rsp, 64);
 471 #endif // _WINDOWS
 472       generate_vzeroupper(wrapup);
 473       VM_Version::clean_cpuFeatures();
 474       UseAVX = saved_useavx;
 475       UseSSE = saved_usesse;
 476       __ jmp(wrapup);
 477    }
 478 
 479     __ bind(legacy_save_restore);
 480     // AVX check
 481     VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
 482     UseAVX = 1;
 483     UseSSE = 2;
 484     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset())));
 485     __ vmovdqu(Address(rsi, 0), xmm0);
 486     __ vmovdqu(Address(rsi, 32), xmm7);
 487 #ifdef _LP64
 488     __ vmovdqu(Address(rsi, 64), xmm8);
 489     __ vmovdqu(Address(rsi, 96), xmm15);
 490 #endif
 491 
 492 #ifdef _WINDOWS
 493 #ifdef _LP64
 494     __ vmovdqu(xmm15, Address(rsp, 0));
 495     __ addptr(rsp, 32);
 496     __ vmovdqu(xmm8, Address(rsp, 0));
 497     __ addptr(rsp, 32);
 498 #endif // _LP64
 499     __ vmovdqu(xmm7, Address(rsp, 0));
 500     __ addptr(rsp, 32);
 501 #endif // _WINDOWS
 502     generate_vzeroupper(wrapup);
 503     VM_Version::clean_cpuFeatures();
 504     UseAVX = saved_useavx;
 505     UseSSE = saved_usesse;
 506 
 507     __ bind(wrapup);
 508     __ popf();
 509     __ pop(rsi);
 510     __ pop(rbx);
 511     __ pop(rbp);
 512     __ ret(0);
 513 
 514 #   undef __
 515 
 516     return start;
 517   };
 518   void generate_vzeroupper(Label& L_wrapup) {
 519 #   define __ _masm->
 520     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
 521     __ cmpl(Address(rsi, 4), 0x756e6547);  // 'uneG'
 522     __ jcc(Assembler::notEqual, L_wrapup);
 523     __ movl(rcx, 0x0FFF0FF0);
 524     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
 525     __ andl(rcx, Address(rsi, 0));
 526     __ cmpl(rcx, 0x00050670);              // If it is Xeon Phi 3200/5200/7200
 527     __ jcc(Assembler::equal, L_wrapup);
 528     __ cmpl(rcx, 0x00080650);              // If it is Future Xeon Phi
 529     __ jcc(Assembler::equal, L_wrapup);
 530     __ vzeroupper();
 531 #   undef __
 532   }
 533 };
 534 
 535 void VM_Version::get_processor_features() {
 536 
 537   _cpu = 4; // 486 by default
 538   _model = 0;
 539   _stepping = 0;
 540   _features = 0;
 541   _logical_processors_per_package = 1;
 542   // i486 internal cache is both I&D and has a 16-byte line size
 543   _L1_data_cache_line_size = 16;
 544 
 545   // Get raw processor info
 546 
 547   get_cpu_info_stub(&_cpuid_info);
 548 
 549   assert_is_initialized();
 550   _cpu = extended_cpu_family();
 551   _model = extended_cpu_model();
 552   _stepping = cpu_stepping();
 553 
 554   if (cpu_family() > 4) { // it supports CPUID
 555     _features = feature_flags();
 556     // Logical processors are only available on P4s and above,
 557     // and only if hyperthreading is available.
 558     _logical_processors_per_package = logical_processor_count();
 559     _L1_data_cache_line_size = L1_line_size();
 560   }
 561 
 562   _supports_cx8 = supports_cmpxchg8();
 563   // xchg and xadd instructions
 564   _supports_atomic_getset4 = true;
 565   _supports_atomic_getadd4 = true;
 566   LP64_ONLY(_supports_atomic_getset8 = true);
 567   LP64_ONLY(_supports_atomic_getadd8 = true);
 568 
 569 #ifdef _LP64
 570   // OS should support SSE for x64 and hardware should support at least SSE2.
 571   if (!VM_Version::supports_sse2()) {
 572     vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
 573   }
 574   // in 64 bit the use of SSE2 is the minimum
 575   if (UseSSE < 2) UseSSE = 2;
 576 #endif
 577 
 578 #ifdef AMD64
 579   // flush_icache_stub have to be generated first.
 580   // That is why Icache line size is hard coded in ICache class,
 581   // see icache_x86.hpp. It is also the reason why we can't use
 582   // clflush instruction in 32-bit VM since it could be running
 583   // on CPU which does not support it.
 584   //
 585   // The only thing we can do is to verify that flushed
 586   // ICache::line_size has correct value.
 587   guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
 588   // clflush_size is size in quadwords (8 bytes).
 589   guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
 590 #endif
 591 
 592   // If the OS doesn't support SSE, we can't use this feature even if the HW does
 593   if (!os::supports_sse())
 594     _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);
 595 
 596   if (UseSSE < 4) {
 597     _features &= ~CPU_SSE4_1;
 598     _features &= ~CPU_SSE4_2;
 599   }
 600 
 601   if (UseSSE < 3) {
 602     _features &= ~CPU_SSE3;
 603     _features &= ~CPU_SSSE3;
 604     _features &= ~CPU_SSE4A;
 605   }
 606 
 607   if (UseSSE < 2)
 608     _features &= ~CPU_SSE2;
 609 
 610   if (UseSSE < 1)
 611     _features &= ~CPU_SSE;
 612 
 613   // first try initial setting and detect what we can support
 614   if (UseAVX > 0) {
 615     if (UseAVX > 2 && supports_evex()) {
 616       UseAVX = 3;
 617     } else if (UseAVX > 1 && supports_avx2()) {
 618       UseAVX = 2;
 619     } else if (UseAVX > 0 && supports_avx()) {
 620       UseAVX = 1;
 621     } else {
 622       UseAVX = 0;
 623     }
 624   } else if (UseAVX < 0) {
 625     UseAVX = 0;
 626   }
 627 
 628   if (UseAVX < 3) {
 629     _features &= ~CPU_AVX512F;
 630     _features &= ~CPU_AVX512DQ;
 631     _features &= ~CPU_AVX512CD;
 632     _features &= ~CPU_AVX512BW;
 633     _features &= ~CPU_AVX512VL;
 634   }
 635 
 636   if (UseAVX < 2)
 637     _features &= ~CPU_AVX2;
 638 
 639   if (UseAVX < 1) {
 640     _features &= ~CPU_AVX;
 641     _features &= ~CPU_VZEROUPPER;
 642   }
 643 
 644   if (!UseAES && !FLAG_IS_DEFAULT(UseAES))
 645     _features &= ~CPU_AES;
 646 
 647   if (logical_processors_per_package() == 1) {
 648     // HT processor could be installed on a system which doesn't support HT.
 649     _features &= ~CPU_HT;
 650   }
 651 
 652   if( is_intel() ) { // Intel cpus specific settings
 653     if ((cpu_family() == 0x06) &&
 654         ((extended_cpu_model() == 0x57) ||   // Xeon Phi 3200/5200/7200
 655         (extended_cpu_model() == 0x85))) {  // Future Xeon Phi
 656       _features &= ~CPU_VZEROUPPER;
 657     }
 658   }
 659 
 660   char buf[256];
 661   jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
 662                cores_per_cpu(), threads_per_core(),
 663                cpu_family(), _model, _stepping,
 664                (supports_cmov() ? ", cmov" : ""),
 665                (supports_cmpxchg8() ? ", cx8" : ""),
 666                (supports_fxsr() ? ", fxsr" : ""),
 667                (supports_mmx()  ? ", mmx"  : ""),
 668                (supports_sse()  ? ", sse"  : ""),
 669                (supports_sse2() ? ", sse2" : ""),
 670                (supports_sse3() ? ", sse3" : ""),
 671                (supports_ssse3()? ", ssse3": ""),
 672                (supports_sse4_1() ? ", sse4.1" : ""),
 673                (supports_sse4_2() ? ", sse4.2" : ""),
 674                (supports_popcnt() ? ", popcnt" : ""),
 675                (supports_avx()    ? ", avx" : ""),
 676                (supports_avx2()   ? ", avx2" : ""),
 677                (supports_aes()    ? ", aes" : ""),
 678                (supports_clmul()  ? ", clmul" : ""),
 679                (supports_erms()   ? ", erms" : ""),
 680                (supports_rtm()    ? ", rtm" : ""),
 681                (supports_mmx_ext() ? ", mmxext" : ""),
 682                (supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
 683                (supports_lzcnt()   ? ", lzcnt": ""),
 684                (supports_sse4a()   ? ", sse4a": ""),
 685                (supports_ht() ? ", ht": ""),
 686                (supports_tsc() ? ", tsc": ""),
 687                (supports_tscinv_bit() ? ", tscinvbit": ""),
 688                (supports_tscinv() ? ", tscinv": ""),
 689                (supports_bmi1() ? ", bmi1" : ""),
 690                (supports_bmi2() ? ", bmi2" : ""),
 691                (supports_adx() ? ", adx" : ""),
 692                (supports_evex() ? ", evex" : ""),
 693                (supports_sha() ? ", sha" : ""),
 694                (supports_fma() ? ", fma" : ""));
 695   _features_string = os::strdup(buf);
 696 
 697   // UseSSE is set to the smaller of what hardware supports and what
 698   // the command line requires.  I.e., you cannot set UseSSE to 2 on
 699   // older Pentiums which do not support it.
 700   if (UseSSE > 4) UseSSE=4;
 701   if (UseSSE < 0) UseSSE=0;
 702   if (!supports_sse4_1()) // Drop to 3 if no SSE4 support
 703     UseSSE = MIN2((intx)3,UseSSE);
 704   if (!supports_sse3()) // Drop to 2 if no SSE3 support
 705     UseSSE = MIN2((intx)2,UseSSE);
 706   if (!supports_sse2()) // Drop to 1 if no SSE2 support
 707     UseSSE = MIN2((intx)1,UseSSE);
 708   if (!supports_sse ()) // Drop to 0 if no SSE  support
 709     UseSSE = 0;
 710 
 711   // Use AES instructions if available.
 712   if (supports_aes()) {
 713     if (FLAG_IS_DEFAULT(UseAES)) {
 714       FLAG_SET_DEFAULT(UseAES, true);
 715     }
 716     if (!UseAES) {
 717       if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
 718         warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
 719       }
 720       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
 721     } else {
 722       if (UseSSE > 2) {
 723         if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
 724           FLAG_SET_DEFAULT(UseAESIntrinsics, true);
 725         }
 726       } else {
 727         // The AES intrinsic stubs require AES instruction support (of course)
 728         // but also require sse3 mode or higher for instructions it use.
 729         if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
 730           warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled.");
 731         }
 732         FLAG_SET_DEFAULT(UseAESIntrinsics, false);
 733       }
 734 
 735       // --AES-CTR begins--
 736       if (!UseAESIntrinsics) {
 737         if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
 738           warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled.");
 739           FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
 740         }
 741       } else {
 742         if(supports_sse4_1()) {
 743           if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
 744             FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
 745           }
 746         } else {
 747            // The AES-CTR intrinsic stubs require AES instruction support (of course)
 748            // but also require sse4.1 mode or higher for instructions it use.
 749           if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
 750              warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled.");
 751            }
 752            FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
 753         }
 754       }
 755       // --AES-CTR ends--
 756     }
 757   } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) {
 758     if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
 759       warning("AES instructions are not available on this CPU");
 760       FLAG_SET_DEFAULT(UseAES, false);
 761     }
 762     if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
 763       warning("AES intrinsics are not available on this CPU");
 764       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
 765     }
 766     if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
 767       warning("AES-CTR intrinsics are not available on this CPU");
 768       FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
 769     }
 770   }
 771 
 772   // Use CLMUL instructions if available.
 773   if (supports_clmul()) {
 774     if (FLAG_IS_DEFAULT(UseCLMUL)) {
 775       UseCLMUL = true;
 776     }
 777   } else if (UseCLMUL) {
 778     if (!FLAG_IS_DEFAULT(UseCLMUL))
 779       warning("CLMUL instructions not available on this CPU (AVX may also be required)");
 780     FLAG_SET_DEFAULT(UseCLMUL, false);
 781   }
 782 
 783   if (UseCLMUL && (UseSSE > 2)) {
 784     if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
 785       UseCRC32Intrinsics = true;
 786     }
 787   } else if (UseCRC32Intrinsics) {
 788     if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
 789       warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)");
 790     FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
 791   }
 792 
 793   if (supports_sse4_2() && supports_clmul()) {
 794     if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
 795       UseCRC32CIntrinsics = true;
 796     }
 797   } else if (UseCRC32CIntrinsics) {
 798     if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
 799       warning("CRC32C intrinsics are not available on this CPU");
 800     }
 801     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
 802   }
 803 
 804   // GHASH/GCM intrinsics
 805   if (UseCLMUL && (UseSSE > 2)) {
 806     if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
 807       UseGHASHIntrinsics = true;
 808     }
 809   } else if (UseGHASHIntrinsics) {
 810     if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics))
 811       warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU");
 812     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
 813   }
 814 
 815   if (supports_fma() && UseSSE >= 2) {
 816     if (FLAG_IS_DEFAULT(UseFMA)) {
 817       UseFMA = true;
 818     }
 819   } else if (UseFMA) {
 820     warning("FMA instructions are not available on this CPU");
 821     FLAG_SET_DEFAULT(UseFMA, false);
 822   }
 823 
 824   if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) {
 825     if (FLAG_IS_DEFAULT(UseSHA)) {
 826       UseSHA = true;
 827     }
 828   } else if (UseSHA) {
 829     warning("SHA instructions are not available on this CPU");
 830     FLAG_SET_DEFAULT(UseSHA, false);
 831   }
 832 
 833   if (supports_sha() && UseSHA) {
 834     if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
 835       FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
 836     }
 837   } else if (UseSHA1Intrinsics) {
 838     warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
 839     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
 840   }
 841 
 842   if (UseSHA) {
 843     if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
 844       FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
 845     }
 846   } else if (UseSHA256Intrinsics) {
 847     warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
 848     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
 849   }
 850 
 851   if (UseSHA) {
 852     if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
 853       FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
 854     }
 855   } else if (UseSHA512Intrinsics) {
 856     warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
 857     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
 858   }
 859 
 860   if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
 861     FLAG_SET_DEFAULT(UseSHA, false);
 862   }
 863 
 864   if (UseAdler32Intrinsics) {
 865     warning("Adler32Intrinsics not available on this CPU.");
 866     FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
 867   }
 868 
 869   if (!supports_rtm() && UseRTMLocking) {
 870     // Can't continue because UseRTMLocking affects UseBiasedLocking flag
 871     // setting during arguments processing. See use_biased_locking().
 872     // VM_Version_init() is executed after UseBiasedLocking is used
 873     // in Thread::allocate().
 874     vm_exit_during_initialization("RTM instructions are not available on this CPU");
 875   }
 876 
 877 #if INCLUDE_RTM_OPT
 878   if (UseRTMLocking) {
 879     if (is_client_compilation_mode_vm()) {
 880       // Only C2 does RTM locking optimization.
 881       // Can't continue because UseRTMLocking affects UseBiasedLocking flag
 882       // setting during arguments processing. See use_biased_locking().
 883       vm_exit_during_initialization("RTM locking optimization is not supported in emulated client VM");
 884     }
 885     if (is_intel_family_core()) {
 886       if ((_model == CPU_MODEL_HASWELL_E3) ||
 887           (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) ||
 888           (_model == CPU_MODEL_BROADWELL  && _stepping < 4)) {
 889         // currently a collision between SKL and HSW_E3
 890         if (!UnlockExperimentalVMOptions && UseAVX < 3) {
 891           vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
 892         } else {
 893           warning("UseRTMLocking is only available as experimental option on this platform.");
 894         }
 895       }
 896     }
 897     if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
 898       // RTM locking should be used only for applications with
 899       // high lock contention. For now we do not use it by default.
 900       vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
 901     }
 902     if (!is_power_of_2(RTMTotalCountIncrRate)) {
 903       warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64");
 904       FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
 905     }
 906     if (RTMAbortRatio < 0 || RTMAbortRatio > 100) {
 907       warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50");
 908       FLAG_SET_DEFAULT(RTMAbortRatio, 50);
 909     }
 910   } else { // !UseRTMLocking
 911     if (UseRTMForStackLocks) {
 912       if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
 913         warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
 914       }
 915       FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
 916     }
 917     if (UseRTMDeopt) {
 918       FLAG_SET_DEFAULT(UseRTMDeopt, false);
 919     }
 920     if (PrintPreciseRTMLockingStatistics) {
 921       FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
 922     }
 923   }
 924 #else
 925   if (UseRTMLocking) {
 926     // Only C2 does RTM locking optimization.
 927     // Can't continue because UseRTMLocking affects UseBiasedLocking flag
 928     // setting during arguments processing. See use_biased_locking().
 929     vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
 930   }
 931 #endif
 932 
 933 #ifdef COMPILER2
 934   if (UseFPUForSpilling) {
 935     if (UseSSE < 2) {
 936       // Only supported with SSE2+
 937       FLAG_SET_DEFAULT(UseFPUForSpilling, false);
 938     }
 939   }
 940 #endif
 941 #if defined(COMPILER2) || INCLUDE_JVMCI
 942   if (MaxVectorSize > 0) {
 943     if (!is_power_of_2(MaxVectorSize)) {
 944       warning("MaxVectorSize must be a power of 2");
 945       FLAG_SET_DEFAULT(MaxVectorSize, 64);
 946     }
 947     if (UseSSE < 2) {
 948       // Vectors (in XMM) are only supported with SSE2+
 949       if (MaxVectorSize > 0) {
 950         if (!FLAG_IS_DEFAULT(MaxVectorSize))
 951           warning("MaxVectorSize must be 0");
 952         FLAG_SET_DEFAULT(MaxVectorSize, 0);
 953       }
 954     }
 955     else if (UseAVX == 0 || !os_supports_avx_vectors()) {
 956       // 32 bytes vectors (in YMM) are only supported with AVX+
 957       if (MaxVectorSize > 16) {
 958         if (!FLAG_IS_DEFAULT(MaxVectorSize))
 959           warning("MaxVectorSize must be <= 16");
 960         FLAG_SET_DEFAULT(MaxVectorSize, 16);
 961       }
 962     }
 963     else if (UseAVX == 1 || UseAVX == 2) {
 964       // 64 bytes vectors (in ZMM) are only supported with AVX 3
 965       if (MaxVectorSize > 32) {
 966         if (!FLAG_IS_DEFAULT(MaxVectorSize))
 967           warning("MaxVectorSize must be <= 32");
 968         FLAG_SET_DEFAULT(MaxVectorSize, 32);
 969       }
 970     }
 971     else if (UseAVX > 2 ) {
 972       if (MaxVectorSize > 64) {
 973         if (!FLAG_IS_DEFAULT(MaxVectorSize))
 974           warning("MaxVectorSize must be <= 64");
 975         FLAG_SET_DEFAULT(MaxVectorSize, 64);
 976       }
 977     }
 978 #if defined(COMPILER2) && defined(ASSERT)
 979     if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
 980       tty->print_cr("State of YMM registers after signal handle:");
 981       int nreg = 2 LP64_ONLY(+2);
 982       const char* ymm_name[4] = {"0", "7", "8", "15"};
 983       for (int i = 0; i < nreg; i++) {
 984         tty->print("YMM%s:", ymm_name[i]);
 985         for (int j = 7; j >=0; j--) {
 986           tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
 987         }
 988         tty->cr();
 989       }
 990     }
 991 #endif // COMPILER2 && ASSERT
 992   }
 993 #endif // COMPILER2 || INCLUDE_JVMCI
 994 
 995 #ifdef COMPILER2
 996 #ifdef _LP64
 997   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
 998     UseMultiplyToLenIntrinsic = true;
 999   }
1000   if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
1001     UseSquareToLenIntrinsic = true;
1002   }
1003   if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
1004     UseMulAddIntrinsic = true;
1005   }
1006   if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
1007     UseMontgomeryMultiplyIntrinsic = true;
1008   }
1009   if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
1010     UseMontgomerySquareIntrinsic = true;
1011   }
1012 #else
1013   if (UseMultiplyToLenIntrinsic) {
1014     if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
1015       warning("multiplyToLen intrinsic is not available in 32-bit VM");
1016     }
1017     FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
1018   }
1019   if (UseMontgomeryMultiplyIntrinsic) {
1020     if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
1021       warning("montgomeryMultiply intrinsic is not available in 32-bit VM");
1022     }
1023     FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false);
1024   }
1025   if (UseMontgomerySquareIntrinsic) {
1026     if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
1027       warning("montgomerySquare intrinsic is not available in 32-bit VM");
1028     }
1029     FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false);
1030   }
1031   if (UseSquareToLenIntrinsic) {
1032     if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
1033       warning("squareToLen intrinsic is not available in 32-bit VM");
1034     }
1035     FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false);
1036   }
1037   if (UseMulAddIntrinsic) {
1038     if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
1039       warning("mulAdd intrinsic is not available in 32-bit VM");
1040     }
1041     FLAG_SET_DEFAULT(UseMulAddIntrinsic, false);
1042   }
1043 #endif
1044 #endif // COMPILER2
1045 
1046   // On new cpus instructions which update whole XMM register should be used
1047   // to prevent partial register stall due to dependencies on high half.
1048   //
1049   // UseXmmLoadAndClearUpper == true  --> movsd(xmm, mem)
1050   // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem)
1051   // UseXmmRegToRegMoveAll == true  --> movaps(xmm, xmm), movapd(xmm, xmm).
1052   // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm),  movsd(xmm, xmm).
1053 
1054   if( is_amd() ) { // AMD cpus specific settings
1055     if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) {
1056       // Use it on new AMD cpus starting from Opteron.
1057       UseAddressNop = true;
1058     }
1059     if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) {
1060       // Use it on new AMD cpus starting from Opteron.
1061       UseNewLongLShift = true;
1062     }
1063     if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
1064       if (supports_sse4a()) {
1065         UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
1066       } else {
1067         UseXmmLoadAndClearUpper = false;
1068       }
1069     }
1070     if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) {
1071       if( supports_sse4a() ) {
1072         UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h'
1073       } else {
1074         UseXmmRegToRegMoveAll = false;
1075       }
1076     }
1077     if( FLAG_IS_DEFAULT(UseXmmI2F) ) {
1078       if( supports_sse4a() ) {
1079         UseXmmI2F = true;
1080       } else {
1081         UseXmmI2F = false;
1082       }
1083     }
1084     if( FLAG_IS_DEFAULT(UseXmmI2D) ) {
1085       if( supports_sse4a() ) {
1086         UseXmmI2D = true;
1087       } else {
1088         UseXmmI2D = false;
1089       }
1090     }
1091     if (supports_sse4_2()) {
1092       if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1093         FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
1094       }
1095     } else {
1096       if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1097         warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
1098       }
1099       FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
1100     }
1101 
1102     // some defaults for AMD family 15h
1103     if ( cpu_family() == 0x15 ) {
1104       // On family 15h processors default is no sw prefetch
1105       if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
1106         AllocatePrefetchStyle = 0;
1107       }
1108       // Also, if some other prefetch style is specified, default instruction type is PREFETCHW
1109       if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
1110         AllocatePrefetchInstr = 3;
1111       }
1112       // On family 15h processors use XMM and UnalignedLoadStores for Array Copy
1113       if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1114         UseXMMForArrayCopy = true;
1115       }
1116       if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1117         UseUnalignedLoadStores = true;
1118       }
1119     }
1120 
1121 #ifdef COMPILER2
1122     if (MaxVectorSize > 16) {
1123       // Limit vectors size to 16 bytes on current AMD cpus.
1124       FLAG_SET_DEFAULT(MaxVectorSize, 16);
1125     }
1126 #endif // COMPILER2
1127   }
1128 
1129   if( is_intel() ) { // Intel cpus specific settings
1130     if( FLAG_IS_DEFAULT(UseStoreImmI16) ) {
1131       UseStoreImmI16 = false; // don't use it on Intel cpus
1132     }
1133     if( cpu_family() == 6 || cpu_family() == 15 ) {
1134       if( FLAG_IS_DEFAULT(UseAddressNop) ) {
1135         // Use it on all Intel cpus starting from PentiumPro
1136         UseAddressNop = true;
1137       }
1138     }
1139     if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
1140       UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus
1141     }
1142     if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) {
1143       if( supports_sse3() ) {
1144         UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus
1145       } else {
1146         UseXmmRegToRegMoveAll = false;
1147       }
1148     }
1149     if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus
1150 #ifdef COMPILER2
1151       if( FLAG_IS_DEFAULT(MaxLoopPad) ) {
1152         // For new Intel cpus do the next optimization:
1153         // don't align the beginning of a loop if there are enough instructions
1154         // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
1155         // in current fetch line (OptoLoopAlignment) or the padding
1156         // is big (> MaxLoopPad).
1157         // Set MaxLoopPad to 11 for new Intel cpus to reduce number of
1158         // generated NOP instructions. 11 is the largest size of one
1159         // address NOP instruction '0F 1F' (see Assembler::nop(i)).
1160         MaxLoopPad = 11;
1161       }
1162 #endif // COMPILER2
1163       if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1164         UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
1165       }
1166       if (supports_sse4_2() && supports_ht()) { // Newest Intel cpus
1167         if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1168           UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
1169         }
1170       }
1171       if (supports_sse4_2()) {
1172         if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1173           FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
1174         }
1175       } else {
1176         if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1177           warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
1178         }
1179         FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
1180       }
1181     }
1182     if ((cpu_family() == 0x06) &&
1183         ((extended_cpu_model() == 0x36) || // Centerton
1184          (extended_cpu_model() == 0x37) || // Silvermont
1185          (extended_cpu_model() == 0x4D))) {
1186 #ifdef COMPILER2
1187       if (FLAG_IS_DEFAULT(OptoScheduling)) {
1188         OptoScheduling = true;
1189       }
1190 #endif
1191       if (supports_sse4_2()) { // Silvermont
1192         if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1193           UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
1194         }
1195       }
1196     }
1197     if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
1198       AllocatePrefetchInstr = 3;
1199     }
1200   }
1201 
1202 #ifdef _LP64
1203   if (UseSSE42Intrinsics) {
1204     if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) {
1205       UseVectorizedMismatchIntrinsic = true;
1206     }
1207   } else if (UseVectorizedMismatchIntrinsic) {
1208     if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic))
1209       warning("vectorizedMismatch intrinsics are not available on this CPU");
1210     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
1211   }
1212 #else
1213   if (UseVectorizedMismatchIntrinsic) {
1214     if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) {
1215       warning("vectorizedMismatch intrinsic is not available in 32-bit VM");
1216     }
1217     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
1218   }
1219 #endif // _LP64
1220 
1221   // Use count leading zeros count instruction if available.
1222   if (supports_lzcnt()) {
1223     if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
1224       UseCountLeadingZerosInstruction = true;
1225     }
1226    } else if (UseCountLeadingZerosInstruction) {
1227     warning("lzcnt instruction is not available on this CPU");
1228     FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false);
1229   }
1230 
1231   // Use count trailing zeros instruction if available
1232   if (supports_bmi1()) {
1233     // tzcnt does not require VEX prefix
1234     if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
1235       if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) {
1236         // Don't use tzcnt if BMI1 is switched off on command line.
1237         UseCountTrailingZerosInstruction = false;
1238       } else {
1239         UseCountTrailingZerosInstruction = true;
1240       }
1241     }
1242   } else if (UseCountTrailingZerosInstruction) {
1243     warning("tzcnt instruction is not available on this CPU");
1244     FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
1245   }
1246 
1247   // BMI instructions (except tzcnt) use an encoding with VEX prefix.
1248   // VEX prefix is generated only when AVX > 0.
1249   if (supports_bmi1() && supports_avx()) {
1250     if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
1251       UseBMI1Instructions = true;
1252     }
1253   } else if (UseBMI1Instructions) {
1254     warning("BMI1 instructions are not available on this CPU (AVX is also required)");
1255     FLAG_SET_DEFAULT(UseBMI1Instructions, false);
1256   }
1257 
1258   if (supports_bmi2() && supports_avx()) {
1259     if (FLAG_IS_DEFAULT(UseBMI2Instructions)) {
1260       UseBMI2Instructions = true;
1261     }
1262   } else if (UseBMI2Instructions) {
1263     warning("BMI2 instructions are not available on this CPU (AVX is also required)");
1264     FLAG_SET_DEFAULT(UseBMI2Instructions, false);
1265   }
1266 
1267   // Use population count instruction if available.
1268   if (supports_popcnt()) {
1269     if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
1270       UsePopCountInstruction = true;
1271     }
1272   } else if (UsePopCountInstruction) {
1273     warning("POPCNT instruction is not available on this CPU");
1274     FLAG_SET_DEFAULT(UsePopCountInstruction, false);
1275   }
1276 
1277   // Use fast-string operations if available.
1278   if (supports_erms()) {
1279     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1280       UseFastStosb = true;
1281     }
1282   } else if (UseFastStosb) {
1283     warning("fast-string operations are not available on this CPU");
1284     FLAG_SET_DEFAULT(UseFastStosb, false);
1285   }
1286 
1287 #ifdef COMPILER2
1288   if (FLAG_IS_DEFAULT(AlignVector)) {
1289     // Modern processors allow misaligned memory operations for vectors.
1290     AlignVector = !UseUnalignedLoadStores;
1291   }
1292 #endif // COMPILER2
1293 
1294   if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0;
1295   if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3;
1296 
1297   // Allocation prefetch settings
1298   intx cache_line_size = prefetch_data_size();
1299   if( cache_line_size > AllocatePrefetchStepSize )
1300     AllocatePrefetchStepSize = cache_line_size;
1301 
1302   AllocatePrefetchDistance = allocate_prefetch_distance();
1303   AllocatePrefetchStyle    = allocate_prefetch_style();
1304 
1305   if (is_intel() && cpu_family() == 6 && supports_sse3()) {
1306     if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core
1307 #ifdef _LP64
1308       AllocatePrefetchDistance = 384;
1309 #else
1310       AllocatePrefetchDistance = 320;
1311 #endif
1312     }
1313     if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
1314       AllocatePrefetchDistance = 192;
1315       if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
1316         FLAG_SET_DEFAULT(AllocatePrefetchLines, 4);
1317       }
1318     }
1319 #ifdef COMPILER2
1320     if (supports_sse4_2()) {
1321       if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
1322         FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1323       }
1324     }
1325 #endif
1326   }
1327 
1328 #ifdef _LP64
1329   // Prefetch settings
1330   PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes();
1331   PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes();
1332   PrefetchFieldsAhead         = prefetch_fields_ahead();
1333 #endif
1334 
1335   if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
1336      (cache_line_size > ContendedPaddingWidth))
1337      ContendedPaddingWidth = cache_line_size;
1338 
1339   // This machine allows unaligned memory accesses
1340   if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
1341     FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
1342   }
1343 
1344 #ifndef PRODUCT
1345   if (log_is_enabled(Info, os, cpu)) {
1346     outputStream* log = Log(os, cpu)::info_stream();
1347     log->print_cr("Logical CPUs per core: %u",
1348                   logical_processors_per_package());
1349     log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
1350     log->print("UseSSE=%d", (int) UseSSE);
1351     if (UseAVX > 0) {
1352       log->print("  UseAVX=%d", (int) UseAVX);
1353     }
1354     if (UseAES) {
1355       log->print("  UseAES=1");
1356     }
1357 #ifdef COMPILER2
1358     if (MaxVectorSize > 0) {
1359       log->print("  MaxVectorSize=%d", (int) MaxVectorSize);
1360     }
1361 #endif
1362     log->cr();
1363     log->print("Allocation");
1364     if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
1365       log->print_cr(": no prefetching");
1366     } else {
1367       log->print(" prefetching: ");
1368       if (UseSSE == 0 && supports_3dnow_prefetch()) {
1369         log->print("PREFETCHW");
1370       } else if (UseSSE >= 1) {
1371         if (AllocatePrefetchInstr == 0) {
1372           log->print("PREFETCHNTA");
1373         } else if (AllocatePrefetchInstr == 1) {
1374           log->print("PREFETCHT0");
1375         } else if (AllocatePrefetchInstr == 2) {
1376           log->print("PREFETCHT2");
1377         } else if (AllocatePrefetchInstr == 3) {
1378           log->print("PREFETCHW");
1379         }
1380       }
1381       if (AllocatePrefetchLines > 1) {
1382         log->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize);
1383       } else {
1384         log->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize);
1385       }
1386     }
1387 
1388     if (PrefetchCopyIntervalInBytes > 0) {
1389       log->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes);
1390     }
1391     if (PrefetchScanIntervalInBytes > 0) {
1392       log->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes);
1393     }
1394     if (PrefetchFieldsAhead > 0) {
1395       log->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead);
1396     }
1397     if (ContendedPaddingWidth > 0) {
1398       log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth);
1399     }
1400   }
1401 #endif // !PRODUCT
1402 }
1403 
1404 bool VM_Version::use_biased_locking() {
1405 #if INCLUDE_RTM_OPT
1406   // RTM locking is most useful when there is high lock contention and
1407   // low data contention.  With high lock contention the lock is usually
1408   // inflated and biased locking is not suitable for that case.
1409   // RTM locking code requires that biased locking is off.
1410   // Note: we can't switch off UseBiasedLocking in get_processor_features()
1411   // because it is used by Thread::allocate() which is called before
1412   // VM_Version::initialize().
1413   if (UseRTMLocking && UseBiasedLocking) {
1414     if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
1415       FLAG_SET_DEFAULT(UseBiasedLocking, false);
1416     } else {
1417       warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
1418       UseBiasedLocking = false;
1419     }
1420   }
1421 #endif
1422   return UseBiasedLocking;
1423 }
1424 
1425 void VM_Version::initialize() {
1426   ResourceMark rm;
1427   // Making this stub must be FIRST use of assembler
1428 
1429   stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size);
1430   if (stub_blob == NULL) {
1431     vm_exit_during_initialization("Unable to allocate get_cpu_info_stub");
1432   }
1433   CodeBuffer c(stub_blob);
1434   VM_Version_StubGenerator g(&c);
1435   get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
1436                                      g.generate_get_cpu_info());
1437 
1438   get_processor_features();
1439 }