1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "runtime/java.hpp"
  30 #include "runtime/os.hpp"
  31 #include "runtime/stubCodeGenerator.hpp"
  32 #include "vm_version_x86.hpp"
  33 
  34 
  35 int VM_Version::_cpu;
  36 int VM_Version::_model;
  37 int VM_Version::_stepping;
  38 int VM_Version::_cpuFeatures;
  39 const char*           VM_Version::_features_str = "";
  40 VM_Version::CpuidInfo VM_Version::_cpuid_info   = { 0, };
  41 
  42 // Address of instruction which causes SEGV
  43 address VM_Version::_cpuinfo_segv_addr = 0;
  44 // Address of instruction after the one which causes SEGV
  45 address VM_Version::_cpuinfo_cont_addr = 0;
  46 
  47 static BufferBlob* stub_blob;
  48 static const int stub_size = 600;
  49 
  50 extern "C" {
  51   typedef void (*get_cpu_info_stub_t)(void*);
  52 }
  53 static get_cpu_info_stub_t get_cpu_info_stub = NULL;
  54 
  55 
  56 class VM_Version_StubGenerator: public StubCodeGenerator {
  57  public:
  58 
  59   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
  60 
  61   address generate_get_cpu_info() {
  62     // Flags to test CPU type.
  63     const uint32_t HS_EFL_AC           = 0x40000;
  64     const uint32_t HS_EFL_ID           = 0x200000;
  65     // Values for when we don't have a CPUID instruction.
  66     const int      CPU_FAMILY_SHIFT = 8;
  67     const uint32_t CPU_FAMILY_386   = (3 << CPU_FAMILY_SHIFT);
  68     const uint32_t CPU_FAMILY_486   = (4 << CPU_FAMILY_SHIFT);
  69 
  70     Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
  71     Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done;
  72 
  73     StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
  74 #   define __ _masm->
  75 
  76     address start = __ pc();
  77 
  78     //
  79     // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info);
  80     //
  81     // LP64: rcx and rdx are first and second argument registers on windows
  82 
  83     __ push(rbp);
  84 #ifdef _LP64
  85     __ mov(rbp, c_rarg0); // cpuid_info address
  86 #else
  87     __ movptr(rbp, Address(rsp, 8)); // cpuid_info address
  88 #endif
  89     __ push(rbx);
  90     __ push(rsi);
  91     __ pushf();          // preserve rbx, and flags
  92     __ pop(rax);
  93     __ push(rax);
  94     __ mov(rcx, rax);
  95     //
  96     // if we are unable to change the AC flag, we have a 386
  97     //
  98     __ xorl(rax, HS_EFL_AC);
  99     __ push(rax);
 100     __ popf();
 101     __ pushf();
 102     __ pop(rax);
 103     __ cmpptr(rax, rcx);
 104     __ jccb(Assembler::notEqual, detect_486);
 105 
 106     __ movl(rax, CPU_FAMILY_386);
 107     __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
 108     __ jmp(done);
 109 
 110     //
 111     // If we are unable to change the ID flag, we have a 486 which does
 112     // not support the "cpuid" instruction.
 113     //
 114     __ bind(detect_486);
 115     __ mov(rax, rcx);
 116     __ xorl(rax, HS_EFL_ID);
 117     __ push(rax);
 118     __ popf();
 119     __ pushf();
 120     __ pop(rax);
 121     __ cmpptr(rcx, rax);
 122     __ jccb(Assembler::notEqual, detect_586);
 123 
 124     __ bind(cpu486);
 125     __ movl(rax, CPU_FAMILY_486);
 126     __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
 127     __ jmp(done);
 128 
 129     //
 130     // At this point, we have a chip which supports the "cpuid" instruction
 131     //
 132     __ bind(detect_586);
 133     __ xorl(rax, rax);
 134     __ cpuid();
 135     __ orl(rax, rax);
 136     __ jcc(Assembler::equal, cpu486);   // if cpuid doesn't support an input
 137                                         // value of at least 1, we give up and
 138                                         // assume a 486
 139     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
 140     __ movl(Address(rsi, 0), rax);
 141     __ movl(Address(rsi, 4), rbx);
 142     __ movl(Address(rsi, 8), rcx);
 143     __ movl(Address(rsi,12), rdx);
 144 
 145     __ cmpl(rax, 0xa);                  // Is cpuid(0xB) supported?
 146     __ jccb(Assembler::belowEqual, std_cpuid4);
 147 
 148     //
 149     // cpuid(0xB) Processor Topology
 150     //
 151     __ movl(rax, 0xb);
 152     __ xorl(rcx, rcx);   // Threads level
 153     __ cpuid();
 154 
 155     __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
 156     __ movl(Address(rsi, 0), rax);
 157     __ movl(Address(rsi, 4), rbx);
 158     __ movl(Address(rsi, 8), rcx);
 159     __ movl(Address(rsi,12), rdx);
 160 
 161     __ movl(rax, 0xb);
 162     __ movl(rcx, 1);     // Cores level
 163     __ cpuid();
 164     __ push(rax);
 165     __ andl(rax, 0x1f);  // Determine if valid topology level
 166     __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
 167     __ andl(rax, 0xffff);
 168     __ pop(rax);
 169     __ jccb(Assembler::equal, std_cpuid4);
 170 
 171     __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
 172     __ movl(Address(rsi, 0), rax);
 173     __ movl(Address(rsi, 4), rbx);
 174     __ movl(Address(rsi, 8), rcx);
 175     __ movl(Address(rsi,12), rdx);
 176 
 177     __ movl(rax, 0xb);
 178     __ movl(rcx, 2);     // Packages level
 179     __ cpuid();
 180     __ push(rax);
 181     __ andl(rax, 0x1f);  // Determine if valid topology level
 182     __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
 183     __ andl(rax, 0xffff);
 184     __ pop(rax);
 185     __ jccb(Assembler::equal, std_cpuid4);
 186 
 187     __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
 188     __ movl(Address(rsi, 0), rax);
 189     __ movl(Address(rsi, 4), rbx);
 190     __ movl(Address(rsi, 8), rcx);
 191     __ movl(Address(rsi,12), rdx);
 192 
 193     //
 194     // cpuid(0x4) Deterministic cache params
 195     //
 196     __ bind(std_cpuid4);
 197     __ movl(rax, 4);
 198     __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
 199     __ jccb(Assembler::greater, std_cpuid1);
 200 
 201     __ xorl(rcx, rcx);   // L1 cache
 202     __ cpuid();
 203     __ push(rax);
 204     __ andl(rax, 0x1f);  // Determine if valid cache parameters used
 205     __ orl(rax, rax);    // eax[4:0] == 0 indicates invalid cache
 206     __ pop(rax);
 207     __ jccb(Assembler::equal, std_cpuid1);
 208 
 209     __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
 210     __ movl(Address(rsi, 0), rax);
 211     __ movl(Address(rsi, 4), rbx);
 212     __ movl(Address(rsi, 8), rcx);
 213     __ movl(Address(rsi,12), rdx);
 214 
 215     //
 216     // Standard cpuid(0x1)
 217     //
 218     __ bind(std_cpuid1);
 219     __ movl(rax, 1);
 220     __ cpuid();
 221     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
 222     __ movl(Address(rsi, 0), rax);
 223     __ movl(Address(rsi, 4), rbx);
 224     __ movl(Address(rsi, 8), rcx);
 225     __ movl(Address(rsi,12), rdx);
 226 
 227     //
 228     // Check if OS has enabled XGETBV instruction to access XCR0
 229     // (OSXSAVE feature flag) and CPU supports AVX
 230     //
 231     __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
 232     __ cmpl(rcx, 0x18000000);
 233     __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
 234 
 235     //
 236     // XCR0, XFEATURE_ENABLED_MASK register
 237     //
 238     __ xorl(rcx, rcx);   // zero for XCR0 register
 239     __ xgetbv();
 240     __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset())));
 241     __ movl(Address(rsi, 0), rax);
 242     __ movl(Address(rsi, 4), rdx);
 243 
 244     __ andl(rax, 0x6); // xcr0 bits sse | ymm
 245     __ cmpl(rax, 0x6);
 246     __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
 247 
 248     //
 249     // Some OSs have a bug when upper 128bits of YMM
 250     // registers are not restored after a signal processing.
 251     // Generate SEGV here (reference through NULL)
 252     // and check upper YMM bits after it.
 253     //
 254     VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
 255     intx saved_useavx = UseAVX;
 256     intx saved_usesse = UseSSE;
 257     UseAVX = 1;
 258     UseSSE = 2;
 259 
 260     // load value into all 32 bytes of ymm7 register
 261     __ movl(rcx, VM_Version::ymm_test_value());
 262 
 263     __ movdl(xmm0, rcx);
 264     __ pshufd(xmm0, xmm0, 0x00);
 265     __ vinsertf128h(xmm0, xmm0, xmm0);
 266     __ vmovdqu(xmm7, xmm0);
 267 #ifdef _LP64
 268     __ vmovdqu(xmm8,  xmm0);
 269     __ vmovdqu(xmm15, xmm0);
 270 #endif
 271 
 272     __ xorl(rsi, rsi);
 273     VM_Version::set_cpuinfo_segv_addr( __ pc() );
 274     // Generate SEGV
 275     __ movl(rax, Address(rsi, 0));
 276 
 277     VM_Version::set_cpuinfo_cont_addr( __ pc() );
 278     // Returns here after signal. Save xmm0 to check it later.
 279     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset())));
 280     __ vmovdqu(Address(rsi,  0), xmm0);
 281     __ vmovdqu(Address(rsi, 32), xmm7);
 282 #ifdef _LP64
 283     __ vmovdqu(Address(rsi, 64), xmm8);
 284     __ vmovdqu(Address(rsi, 96), xmm15);
 285 #endif
 286 
 287     VM_Version::clean_cpuFeatures();
 288     UseAVX = saved_useavx;
 289     UseSSE = saved_usesse;
 290 
 291     //
 292     // cpuid(0x7) Structured Extended Features
 293     //
 294     __ bind(sef_cpuid);
 295     __ movl(rax, 7);
 296     __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported?
 297     __ jccb(Assembler::greater, ext_cpuid);
 298 
 299     __ xorl(rcx, rcx);
 300     __ cpuid();
 301     __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
 302     __ movl(Address(rsi, 0), rax);
 303     __ movl(Address(rsi, 4), rbx);
 304 
 305     //
 306     // Extended cpuid(0x80000000)
 307     //
 308     __ bind(ext_cpuid);
 309     __ movl(rax, 0x80000000);
 310     __ cpuid();
 311     __ cmpl(rax, 0x80000000);     // Is cpuid(0x80000001) supported?
 312     __ jcc(Assembler::belowEqual, done);
 313     __ cmpl(rax, 0x80000004);     // Is cpuid(0x80000005) supported?
 314     __ jccb(Assembler::belowEqual, ext_cpuid1);
 315     __ cmpl(rax, 0x80000006);     // Is cpuid(0x80000007) supported?
 316     __ jccb(Assembler::belowEqual, ext_cpuid5);
 317     __ cmpl(rax, 0x80000007);     // Is cpuid(0x80000008) supported?
 318     __ jccb(Assembler::belowEqual, ext_cpuid7);
 319     //
 320     // Extended cpuid(0x80000008)
 321     //
 322     __ movl(rax, 0x80000008);
 323     __ cpuid();
 324     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
 325     __ movl(Address(rsi, 0), rax);
 326     __ movl(Address(rsi, 4), rbx);
 327     __ movl(Address(rsi, 8), rcx);
 328     __ movl(Address(rsi,12), rdx);
 329 
 330     //
 331     // Extended cpuid(0x80000007)
 332     //
 333     __ bind(ext_cpuid7);
 334     __ movl(rax, 0x80000007);
 335     __ cpuid();
 336     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset())));
 337     __ movl(Address(rsi, 0), rax);
 338     __ movl(Address(rsi, 4), rbx);
 339     __ movl(Address(rsi, 8), rcx);
 340     __ movl(Address(rsi,12), rdx);
 341 
 342     //
 343     // Extended cpuid(0x80000005)
 344     //
 345     __ bind(ext_cpuid5);
 346     __ movl(rax, 0x80000005);
 347     __ cpuid();
 348     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
 349     __ movl(Address(rsi, 0), rax);
 350     __ movl(Address(rsi, 4), rbx);
 351     __ movl(Address(rsi, 8), rcx);
 352     __ movl(Address(rsi,12), rdx);
 353 
 354     //
 355     // Extended cpuid(0x80000001)
 356     //
 357     __ bind(ext_cpuid1);
 358     __ movl(rax, 0x80000001);
 359     __ cpuid();
 360     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
 361     __ movl(Address(rsi, 0), rax);
 362     __ movl(Address(rsi, 4), rbx);
 363     __ movl(Address(rsi, 8), rcx);
 364     __ movl(Address(rsi,12), rdx);
 365 
 366     //
 367     // return
 368     //
 369     __ bind(done);
 370     __ popf();
 371     __ pop(rsi);
 372     __ pop(rbx);
 373     __ pop(rbp);
 374     __ ret(0);
 375 
 376 #   undef __
 377 
 378     return start;
 379   };
 380 };
 381 
 382 void VM_Version::get_processor_features() {
 383 
 384   _cpu = 4; // 486 by default
 385   _model = 0;
 386   _stepping = 0;
 387   _cpuFeatures = 0;
 388   _logical_processors_per_package = 1;
 389   // i486 internal cache is both I&D and has a 16-byte line size
 390   _L1_data_cache_line_size = 16;
 391 
 392   if (!Use486InstrsOnly) {
 393     // Get raw processor info
 394 
 395     get_cpu_info_stub(&_cpuid_info);
 396 
 397     assert_is_initialized();
 398     _cpu = extended_cpu_family();
 399     _model = extended_cpu_model();
 400     _stepping = cpu_stepping();
 401 
 402     if (cpu_family() > 4) { // it supports CPUID
 403       _cpuFeatures = feature_flags();
 404       // Logical processors are only available on P4s and above,
 405       // and only if hyperthreading is available.
 406       _logical_processors_per_package = logical_processor_count();
 407       _L1_data_cache_line_size = L1_line_size();
 408     }
 409   }
 410 
 411   _supports_cx8 = supports_cmpxchg8();
 412   // xchg and xadd instructions
 413   _supports_atomic_getset4 = true;
 414   _supports_atomic_getadd4 = true;
 415   LP64_ONLY(_supports_atomic_getset8 = true);
 416   LP64_ONLY(_supports_atomic_getadd8 = true);
 417 
 418 #ifdef _LP64
 419   // OS should support SSE for x64 and hardware should support at least SSE2.
 420   if (!VM_Version::supports_sse2()) {
 421     vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
 422   }
 423   // in 64 bit the use of SSE2 is the minimum
 424   if (UseSSE < 2) UseSSE = 2;
 425 #endif
 426 
 427 #ifdef AMD64
 428   // flush_icache_stub have to be generated first.
 429   // That is why Icache line size is hard coded in ICache class,
 430   // see icache_x86.hpp. It is also the reason why we can't use
 431   // clflush instruction in 32-bit VM since it could be running
 432   // on CPU which does not support it.
 433   //
 434   // The only thing we can do is to verify that flushed
 435   // ICache::line_size has correct value.
 436   guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
 437   // clflush_size is size in quadwords (8 bytes).
 438   guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
 439 #endif
 440 
 441   // If the OS doesn't support SSE, we can't use this feature even if the HW does
 442   if (!os::supports_sse())
 443     _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);
 444 
 445   if (UseSSE < 4) {
 446     _cpuFeatures &= ~CPU_SSE4_1;
 447     _cpuFeatures &= ~CPU_SSE4_2;
 448   }
 449 
 450   if (UseSSE < 3) {
 451     _cpuFeatures &= ~CPU_SSE3;
 452     _cpuFeatures &= ~CPU_SSSE3;
 453     _cpuFeatures &= ~CPU_SSE4A;
 454   }
 455 
 456   if (UseSSE < 2)
 457     _cpuFeatures &= ~CPU_SSE2;
 458 
 459   if (UseSSE < 1)
 460     _cpuFeatures &= ~CPU_SSE;
 461 
 462   if (UseAVX < 2)
 463     _cpuFeatures &= ~CPU_AVX2;
 464 
 465   if (UseAVX < 1)
 466     _cpuFeatures &= ~CPU_AVX;
 467 
 468   if (!UseAES && !FLAG_IS_DEFAULT(UseAES))
 469     _cpuFeatures &= ~CPU_AES;
 470 
 471   if (logical_processors_per_package() == 1) {
 472     // HT processor could be installed on a system which doesn't support HT.
 473     _cpuFeatures &= ~CPU_HT;
 474   }
 475 
 476   char buf[256];
 477   jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
 478                cores_per_cpu(), threads_per_core(),
 479                cpu_family(), _model, _stepping,
 480                (supports_cmov() ? ", cmov" : ""),
 481                (supports_cmpxchg8() ? ", cx8" : ""),
 482                (supports_fxsr() ? ", fxsr" : ""),
 483                (supports_mmx()  ? ", mmx"  : ""),
 484                (supports_sse()  ? ", sse"  : ""),
 485                (supports_sse2() ? ", sse2" : ""),
 486                (supports_sse3() ? ", sse3" : ""),
 487                (supports_ssse3()? ", ssse3": ""),
 488                (supports_sse4_1() ? ", sse4.1" : ""),
 489                (supports_sse4_2() ? ", sse4.2" : ""),
 490                (supports_popcnt() ? ", popcnt" : ""),
 491                (supports_avx()    ? ", avx" : ""),
 492                (supports_avx2()   ? ", avx2" : ""),
 493                (supports_aes()    ? ", aes" : ""),
 494                (supports_clmul()  ? ", clmul" : ""),
 495                (supports_erms()   ? ", erms" : ""),
 496                (supports_rtm()    ? ", rtm" : ""),
 497                (supports_mmx_ext() ? ", mmxext" : ""),
 498                (supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
 499                (supports_lzcnt()   ? ", lzcnt": ""),
 500                (supports_sse4a()   ? ", sse4a": ""),
 501                (supports_ht() ? ", ht": ""),
 502                (supports_tsc() ? ", tsc": ""),
 503                (supports_tscinv_bit() ? ", tscinvbit": ""),
 504                (supports_tscinv() ? ", tscinv": ""),
 505                (supports_bmi1() ? ", bmi1" : ""),
 506                (supports_bmi2() ? ", bmi2" : ""),
 507                (supports_adx() ? ", adx" : ""));
 508   _features_str = os::strdup(buf);
 509 
 510   // UseSSE is set to the smaller of what hardware supports and what
 511   // the command line requires.  I.e., you cannot set UseSSE to 2 on
 512   // older Pentiums which do not support it.
 513   if (UseSSE > 4) UseSSE=4;
 514   if (UseSSE < 0) UseSSE=0;
 515   if (!supports_sse4_1()) // Drop to 3 if no SSE4 support
 516     UseSSE = MIN2((intx)3,UseSSE);
 517   if (!supports_sse3()) // Drop to 2 if no SSE3 support
 518     UseSSE = MIN2((intx)2,UseSSE);
 519   if (!supports_sse2()) // Drop to 1 if no SSE2 support
 520     UseSSE = MIN2((intx)1,UseSSE);
 521   if (!supports_sse ()) // Drop to 0 if no SSE  support
 522     UseSSE = 0;
 523 
 524   if (UseAVX > 2) UseAVX=2;
 525   if (UseAVX < 0) UseAVX=0;
 526   if (!supports_avx2()) // Drop to 1 if no AVX2 support
 527     UseAVX = MIN2((intx)1,UseAVX);
 528   if (!supports_avx ()) // Drop to 0 if no AVX  support
 529     UseAVX = 0;
 530 
 531   // Use AES instructions if available.
 532   if (supports_aes()) {
 533     if (FLAG_IS_DEFAULT(UseAES)) {
 534       UseAES = true;
 535     }
 536   } else if (UseAES) {
 537     if (!FLAG_IS_DEFAULT(UseAES))
 538       warning("AES instructions are not available on this CPU");
 539     FLAG_SET_DEFAULT(UseAES, false);
 540   }
 541 
 542   // Use CLMUL instructions if available.
 543   if (supports_clmul()) {
 544     if (FLAG_IS_DEFAULT(UseCLMUL)) {
 545       UseCLMUL = true;
 546     }
 547   } else if (UseCLMUL) {
 548     if (!FLAG_IS_DEFAULT(UseCLMUL))
 549       warning("CLMUL instructions not available on this CPU (AVX may also be required)");
 550     FLAG_SET_DEFAULT(UseCLMUL, false);
 551   }
 552 
 553   if (UseCLMUL && (UseSSE > 2)) {
 554     if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
 555       UseCRC32Intrinsics = true;
 556     }
 557   } else if (UseCRC32Intrinsics) {
 558     if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
 559       warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)");
 560     FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
 561   }
 562 
 563   // The AES intrinsic stubs require AES instruction support (of course)
 564   // but also require sse3 mode for instructions it use.
 565   if (UseAES && (UseSSE > 2)) {
 566     if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
 567       UseAESIntrinsics = true;
 568     }
 569   } else if (UseAESIntrinsics) {
 570     if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
 571       warning("AES intrinsics are not available on this CPU");
 572     FLAG_SET_DEFAULT(UseAESIntrinsics, false);
 573   }
 574 
 575   if (UseSHA) {
 576     warning("SHA instructions are not available on this CPU");
 577     FLAG_SET_DEFAULT(UseSHA, false);
 578   }
 579   if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
 580     warning("SHA intrinsics are not available on this CPU");
 581     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
 582     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
 583     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
 584   }
 585 
 586   // Adjust RTM (Restricted Transactional Memory) flags
 587   if (!supports_rtm() && UseRTMLocking) {
 588     // Can't continue because UseRTMLocking affects UseBiasedLocking flag
 589     // setting during arguments processing. See use_biased_locking().
 590     // VM_Version_init() is executed after UseBiasedLocking is used
 591     // in Thread::allocate().
 592     vm_exit_during_initialization("RTM instructions are not available on this CPU");
 593   }
 594 
 595 #if INCLUDE_RTM_OPT
 596   if (UseRTMLocking) {
 597     if (is_intel_family_core()) {
 598       if ((_model == CPU_MODEL_HASWELL_E3) ||
 599           (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) ||
 600           (_model == CPU_MODEL_BROADWELL  && _stepping < 4)) {
 601         if (!UnlockExperimentalVMOptions) {
 602           vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
 603         } else {
 604           warning("UseRTMLocking is only available as experimental option on this platform.");
 605         }
 606       }
 607     }
 608     if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
 609       // RTM locking should be used only for applications with
 610       // high lock contention. For now we do not use it by default.
 611       vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
 612     }
 613     if (!is_power_of_2(RTMTotalCountIncrRate)) {
 614       warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64");
 615       FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
 616     }
 617     if (RTMAbortRatio < 0 || RTMAbortRatio > 100) {
 618       warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50");
 619       FLAG_SET_DEFAULT(RTMAbortRatio, 50);
 620     }
 621   } else { // !UseRTMLocking
 622     if (UseRTMForStackLocks) {
 623       if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
 624         warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
 625       }
 626       FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
 627     }
 628     if (UseRTMDeopt) {
 629       FLAG_SET_DEFAULT(UseRTMDeopt, false);
 630     }
 631     if (PrintPreciseRTMLockingStatistics) {
 632       FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
 633     }
 634   }
 635 #else
 636   if (UseRTMLocking) {
 637     // Only C2 does RTM locking optimization.
 638     // Can't continue because UseRTMLocking affects UseBiasedLocking flag
 639     // setting during arguments processing. See use_biased_locking().
 640     vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
 641   }
 642 #endif
 643 
 644 #ifdef COMPILER2
 645   if (UseFPUForSpilling) {
 646     if (UseSSE < 2) {
 647       // Only supported with SSE2+
 648       FLAG_SET_DEFAULT(UseFPUForSpilling, false);
 649     }
 650   }
 651   if (MaxVectorSize > 0) {
 652     if (!is_power_of_2(MaxVectorSize)) {
 653       warning("MaxVectorSize must be a power of 2");
 654       FLAG_SET_DEFAULT(MaxVectorSize, 32);
 655     }
 656     if (MaxVectorSize > 32) {
 657       FLAG_SET_DEFAULT(MaxVectorSize, 32);
 658     }
 659     if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) {
 660       // 32 bytes vectors (in YMM) are only supported with AVX+
 661       FLAG_SET_DEFAULT(MaxVectorSize, 16);
 662     }
 663     if (UseSSE < 2) {
 664       // Vectors (in XMM) are only supported with SSE2+
 665       FLAG_SET_DEFAULT(MaxVectorSize, 0);
 666     }
 667 #ifdef ASSERT
 668     if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
 669       tty->print_cr("State of YMM registers after signal handle:");
 670       int nreg = 2 LP64_ONLY(+2);
 671       const char* ymm_name[4] = {"0", "7", "8", "15"};
 672       for (int i = 0; i < nreg; i++) {
 673         tty->print("YMM%s:", ymm_name[i]);
 674         for (int j = 7; j >=0; j--) {
 675           tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
 676         }
 677         tty->cr();
 678       }
 679     }
 680 #endif
 681   }
 682 
 683 #ifdef _LP64
 684   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
 685     UseMultiplyToLenIntrinsic = true;
 686   }
 687   if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
 688     UseSquareToLenIntrinsic = true;
 689   }
 690   if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
 691     UseMulAddIntrinsic = true;
 692   }
 693 #else
 694   if (UseMultiplyToLenIntrinsic) {
 695     if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
 696       warning("multiplyToLen intrinsic is not available in 32-bit VM");
 697     }
 698     FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
 699   }
 700   if (UseSquareToLenIntrinsic) {
 701     if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
 702       warning("squareToLen intrinsic is not available in 32-bit VM");
 703     }
 704     FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false);
 705   }
 706   if (UseMulAddIntrinsic) {
 707     if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
 708       warning("mulAdd intrinsic is not available in 32-bit VM");
 709     }
 710     FLAG_SET_DEFAULT(UseMulAddIntrinsic, false);
 711   }
 712 #endif
 713 #endif // COMPILER2
 714 
 715   // On new cpus instructions which update whole XMM register should be used
 716   // to prevent partial register stall due to dependencies on high half.
 717   //
 718   // UseXmmLoadAndClearUpper == true  --> movsd(xmm, mem)
 719   // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem)
 720   // UseXmmRegToRegMoveAll == true  --> movaps(xmm, xmm), movapd(xmm, xmm).
 721   // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm),  movsd(xmm, xmm).
 722 
 723   if( is_amd() ) { // AMD cpus specific settings
 724     if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) {
 725       // Use it on new AMD cpus starting from Opteron.
 726       UseAddressNop = true;
 727     }
 728     if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) {
 729       // Use it on new AMD cpus starting from Opteron.
 730       UseNewLongLShift = true;
 731     }
 732     if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
 733       if( supports_sse4a() ) {
 734         UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
 735       } else {
 736         UseXmmLoadAndClearUpper = false;
 737       }
 738     }
 739     if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) {
 740       if( supports_sse4a() ) {
 741         UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h'
 742       } else {
 743         UseXmmRegToRegMoveAll = false;
 744       }
 745     }
 746     if( FLAG_IS_DEFAULT(UseXmmI2F) ) {
 747       if( supports_sse4a() ) {
 748         UseXmmI2F = true;
 749       } else {
 750         UseXmmI2F = false;
 751       }
 752     }
 753     if( FLAG_IS_DEFAULT(UseXmmI2D) ) {
 754       if( supports_sse4a() ) {
 755         UseXmmI2D = true;
 756       } else {
 757         UseXmmI2D = false;
 758       }
 759     }
 760     if( FLAG_IS_DEFAULT(UseSSE42Intrinsics) ) {
 761       if( supports_sse4_2() && UseSSE >= 4 ) {
 762         UseSSE42Intrinsics = true;
 763       }
 764     }
 765 
 766     // some defaults for AMD family 15h
 767     if ( cpu_family() == 0x15 ) {
 768       // On family 15h processors default is no sw prefetch
 769       if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
 770         AllocatePrefetchStyle = 0;
 771       }
 772       // Also, if some other prefetch style is specified, default instruction type is PREFETCHW
 773       if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
 774         AllocatePrefetchInstr = 3;
 775       }
 776       // On family 15h processors use XMM and UnalignedLoadStores for Array Copy
 777       if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
 778         UseXMMForArrayCopy = true;
 779       }
 780       if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
 781         UseUnalignedLoadStores = true;
 782       }
 783     }
 784 
 785 #ifdef COMPILER2
 786     if (MaxVectorSize > 16) {
 787       // Limit vectors size to 16 bytes on current AMD cpus.
 788       FLAG_SET_DEFAULT(MaxVectorSize, 16);
 789     }
 790 #endif // COMPILER2
 791   }
 792 
 793   if( is_intel() ) { // Intel cpus specific settings
 794     if( FLAG_IS_DEFAULT(UseStoreImmI16) ) {
 795       UseStoreImmI16 = false; // don't use it on Intel cpus
 796     }
 797     if( cpu_family() == 6 || cpu_family() == 15 ) {
 798       if( FLAG_IS_DEFAULT(UseAddressNop) ) {
 799         // Use it on all Intel cpus starting from PentiumPro
 800         UseAddressNop = true;
 801       }
 802     }
 803     if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
 804       UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus
 805     }
 806     if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) {
 807       if( supports_sse3() ) {
 808         UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus
 809       } else {
 810         UseXmmRegToRegMoveAll = false;
 811       }
 812     }
 813     if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus
 814 #ifdef COMPILER2
 815       if( FLAG_IS_DEFAULT(MaxLoopPad) ) {
 816         // For new Intel cpus do the next optimization:
 817         // don't align the beginning of a loop if there are enough instructions
 818         // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
 819         // in current fetch line (OptoLoopAlignment) or the padding
 820         // is big (> MaxLoopPad).
 821         // Set MaxLoopPad to 11 for new Intel cpus to reduce number of
 822         // generated NOP instructions. 11 is the largest size of one
 823         // address NOP instruction '0F 1F' (see Assembler::nop(i)).
 824         MaxLoopPad = 11;
 825       }
 826 #endif // COMPILER2
 827       if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
 828         UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
 829       }
 830       if (supports_sse4_2() && supports_ht()) { // Newest Intel cpus
 831         if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
 832           UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
 833         }
 834       }
 835       if (supports_sse4_2() && UseSSE >= 4) {
 836         if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
 837           UseSSE42Intrinsics = true;
 838         }
 839       }
 840     }
 841     if ((cpu_family() == 0x06) &&
 842         ((extended_cpu_model() == 0x36) || // Centerton
 843          (extended_cpu_model() == 0x37) || // Silvermont
 844          (extended_cpu_model() == 0x4D))) {
 845 #ifdef COMPILER2
 846       if (FLAG_IS_DEFAULT(OptoScheduling)) {
 847         OptoScheduling = true;
 848       }
 849 #endif
 850       if (supports_sse4_2()) { // Silvermont
 851         if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
 852           UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
 853         }
 854       }
 855     }
 856     if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
 857       AllocatePrefetchInstr = 3;
 858     }
 859   }
 860 
 861   // Use count leading zeros count instruction if available.
 862   if (supports_lzcnt()) {
 863     if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
 864       UseCountLeadingZerosInstruction = true;
 865     }
 866    } else if (UseCountLeadingZerosInstruction) {
 867     warning("lzcnt instruction is not available on this CPU");
 868     FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false);
 869   }
 870 
 871   // Use count trailing zeros instruction if available
 872   if (supports_bmi1()) {
 873     // tzcnt does not require VEX prefix
 874     if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
 875       if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) {
 876         // Don't use tzcnt if BMI1 is switched off on command line.
 877         UseCountTrailingZerosInstruction = false;
 878       } else {
 879         UseCountTrailingZerosInstruction = true;
 880       }
 881     }
 882   } else if (UseCountTrailingZerosInstruction) {
 883     warning("tzcnt instruction is not available on this CPU");
 884     FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
 885   }
 886 
 887   // BMI instructions (except tzcnt) use an encoding with VEX prefix.
 888   // VEX prefix is generated only when AVX > 0.
 889   if (supports_bmi1() && supports_avx()) {
 890     if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
 891       UseBMI1Instructions = true;
 892     }
 893   } else if (UseBMI1Instructions) {
 894     warning("BMI1 instructions are not available on this CPU (AVX is also required)");
 895     FLAG_SET_DEFAULT(UseBMI1Instructions, false);
 896   }
 897 
 898   if (supports_bmi2() && supports_avx()) {
 899     if (FLAG_IS_DEFAULT(UseBMI2Instructions)) {
 900       UseBMI2Instructions = true;
 901     }
 902   } else if (UseBMI2Instructions) {
 903     warning("BMI2 instructions are not available on this CPU (AVX is also required)");
 904     FLAG_SET_DEFAULT(UseBMI2Instructions, false);
 905   }
 906 
 907   // Use population count instruction if available.
 908   if (supports_popcnt()) {
 909     if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
 910       UsePopCountInstruction = true;
 911     }
 912   } else if (UsePopCountInstruction) {
 913     warning("POPCNT instruction is not available on this CPU");
 914     FLAG_SET_DEFAULT(UsePopCountInstruction, false);
 915   }
 916 
 917   // Use fast-string operations if available.
 918   if (supports_erms()) {
 919     if (FLAG_IS_DEFAULT(UseFastStosb)) {
 920       UseFastStosb = true;
 921     }
 922   } else if (UseFastStosb) {
 923     warning("fast-string operations are not available on this CPU");
 924     FLAG_SET_DEFAULT(UseFastStosb, false);
 925   }
 926 
 927 #ifdef COMPILER2
 928   if (FLAG_IS_DEFAULT(AlignVector)) {
 929     // Modern processors allow misaligned memory operations for vectors.
 930     AlignVector = !UseUnalignedLoadStores;
 931   }
 932 #endif // COMPILER2
 933 
 934   assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value");
 935 
 936   // set valid Prefetch instruction
 937   if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0;
 938   if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3;
 939   if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0;
 940   if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3;
 941 
 942   // Allocation prefetch settings
 943   intx cache_line_size = prefetch_data_size();
 944   if( cache_line_size > AllocatePrefetchStepSize )
 945     AllocatePrefetchStepSize = cache_line_size;
 946 
 947   assert(AllocatePrefetchLines > 0, "invalid value");
 948   if( AllocatePrefetchLines < 1 )     // set valid value in product VM
 949     AllocatePrefetchLines = 3;
 950   assert(AllocateInstancePrefetchLines > 0, "invalid value");
 951   if( AllocateInstancePrefetchLines < 1 ) // set valid value in product VM
 952     AllocateInstancePrefetchLines = 1;
 953 
 954   AllocatePrefetchDistance = allocate_prefetch_distance();
 955   AllocatePrefetchStyle    = allocate_prefetch_style();
 956 
 957   if (is_intel() && cpu_family() == 6 && supports_sse3()) {
 958     if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core
 959 #ifdef _LP64
 960       AllocatePrefetchDistance = 384;
 961 #else
 962       AllocatePrefetchDistance = 320;
 963 #endif
 964     }
 965     if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
 966       AllocatePrefetchDistance = 192;
 967       AllocatePrefetchLines = 4;
 968     }
 969 #ifdef COMPILER2
 970     if (supports_sse4_2()) {
 971       if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
 972         FLAG_SET_DEFAULT(UseFPUForSpilling, true);
 973       }
 974     }
 975 #endif
 976   }
 977   assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
 978 
 979 #ifdef _LP64
 980   // Prefetch settings
 981   PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes();
 982   PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes();
 983   PrefetchFieldsAhead         = prefetch_fields_ahead();
 984 #endif
 985 
 986   if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
 987      (cache_line_size > ContendedPaddingWidth))
 988      ContendedPaddingWidth = cache_line_size;
 989 
 990   // This machine allows unaligned memory accesses
 991   if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
 992     FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
 993   }
 994 
 995 #ifndef PRODUCT
 996   if (PrintMiscellaneous && Verbose) {
 997     tty->print_cr("Logical CPUs per core: %u",
 998                   logical_processors_per_package());
 999     tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
1000     tty->print("UseSSE=%d", (int) UseSSE);
1001     if (UseAVX > 0) {
1002       tty->print("  UseAVX=%d", (int) UseAVX);
1003     }
1004     if (UseAES) {
1005       tty->print("  UseAES=1");
1006     }
1007 #ifdef COMPILER2
1008     if (MaxVectorSize > 0) {
1009       tty->print("  MaxVectorSize=%d", (int) MaxVectorSize);
1010     }
1011 #endif
1012     tty->cr();
1013     tty->print("Allocation");
1014     if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
1015       tty->print_cr(": no prefetching");
1016     } else {
1017       tty->print(" prefetching: ");
1018       if (UseSSE == 0 && supports_3dnow_prefetch()) {
1019         tty->print("PREFETCHW");
1020       } else if (UseSSE >= 1) {
1021         if (AllocatePrefetchInstr == 0) {
1022           tty->print("PREFETCHNTA");
1023         } else if (AllocatePrefetchInstr == 1) {
1024           tty->print("PREFETCHT0");
1025         } else if (AllocatePrefetchInstr == 2) {
1026           tty->print("PREFETCHT2");
1027         } else if (AllocatePrefetchInstr == 3) {
1028           tty->print("PREFETCHW");
1029         }
1030       }
1031       if (AllocatePrefetchLines > 1) {
1032         tty->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize);
1033       } else {
1034         tty->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize);
1035       }
1036     }
1037 
1038     if (PrefetchCopyIntervalInBytes > 0) {
1039       tty->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes);
1040     }
1041     if (PrefetchScanIntervalInBytes > 0) {
1042       tty->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes);
1043     }
1044     if (PrefetchFieldsAhead > 0) {
1045       tty->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead);
1046     }
1047     if (ContendedPaddingWidth > 0) {
1048       tty->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth);
1049     }
1050   }
1051 #endif // !PRODUCT
1052 }
1053 
1054 bool VM_Version::use_biased_locking() {
1055 #if INCLUDE_RTM_OPT
1056   // RTM locking is most useful when there is high lock contention and
1057   // low data contention.  With high lock contention the lock is usually
1058   // inflated and biased locking is not suitable for that case.
1059   // RTM locking code requires that biased locking is off.
1060   // Note: we can't switch off UseBiasedLocking in get_processor_features()
1061   // because it is used by Thread::allocate() which is called before
1062   // VM_Version::initialize().
1063   if (UseRTMLocking && UseBiasedLocking) {
1064     if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
1065       FLAG_SET_DEFAULT(UseBiasedLocking, false);
1066     } else {
1067       warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
1068       UseBiasedLocking = false;
1069     }
1070   }
1071 #endif
1072   return UseBiasedLocking;
1073 }
1074 
1075 void VM_Version::initialize() {
1076   ResourceMark rm;
1077   // Making this stub must be FIRST use of assembler
1078 
1079   stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size);
1080   if (stub_blob == NULL) {
1081     vm_exit_during_initialization("Unable to allocate get_cpu_info_stub");
1082   }
1083   CodeBuffer c(stub_blob);
1084   VM_Version_StubGenerator g(&c);
1085   get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
1086                                      g.generate_get_cpu_info());
1087 
1088   get_processor_features();
1089 }