33 #include "runtime/os.hpp"
34 #include "runtime/stubCodeGenerator.hpp"
35 #include "runtime/vm_version.hpp"
36 #include "utilities/powerOfTwo.hpp"
37 #include "utilities/virtualizationSupport.hpp"
38
39 #include OS_HEADER_INLINE(os)
40
41 int VM_Version::_cpu;
42 int VM_Version::_model;
43 int VM_Version::_stepping;
44 bool VM_Version::_has_intel_jcc_erratum;
45 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
46
47 // Address of instruction which causes SEGV
48 address VM_Version::_cpuinfo_segv_addr = 0;
49 // Address of instruction after the one which causes SEGV
50 address VM_Version::_cpuinfo_cont_addr = 0;
51
52 static BufferBlob* stub_blob;
53 static const int stub_size = 1100;
54
55 extern "C" {
56 typedef void (*get_cpu_info_stub_t)(void*);
57 }
58 static get_cpu_info_stub_t get_cpu_info_stub = NULL;
59
60
61 class VM_Version_StubGenerator: public StubCodeGenerator {
62 public:
63
64 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
65
66 address generate_get_cpu_info() {
67 // Flags to test CPU type.
68 const uint32_t HS_EFL_AC = 0x40000;
69 const uint32_t HS_EFL_ID = 0x200000;
70 // Values for when we don't have a CPUID instruction.
71 const int CPU_FAMILY_SHIFT = 8;
72 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT);
73 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
74 bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2);
75
76 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
77 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, ext_cpuid8, done, wrapup;
78 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
551 return start;
552 };
553 void generate_vzeroupper(Label& L_wrapup) {
554 # define __ _masm->
555 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
556 __ cmpl(Address(rsi, 4), 0x756e6547); // 'uneG'
557 __ jcc(Assembler::notEqual, L_wrapup);
558 __ movl(rcx, 0x0FFF0FF0);
559 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
560 __ andl(rcx, Address(rsi, 0));
561 __ cmpl(rcx, 0x00050670); // If it is Xeon Phi 3200/5200/7200
562 __ jcc(Assembler::equal, L_wrapup);
563 __ cmpl(rcx, 0x00080650); // If it is Future Xeon Phi
564 __ jcc(Assembler::equal, L_wrapup);
565 // vzeroupper() will use a pre-computed instruction sequence that we
566 // can't compute until after we've determined CPU capabilities. Use
567 // uncached variant here directly to be able to bootstrap correctly
568 __ vzeroupper_uncached();
569 # undef __
570 }
571 };
572
573 void VM_Version::get_processor_features() {
574
575 _cpu = 4; // 486 by default
576 _model = 0;
577 _stepping = 0;
578 _features = 0;
579 _logical_processors_per_package = 1;
580 // i486 internal cache is both I&D and has a 16-byte line size
581 _L1_data_cache_line_size = 16;
582
583 // Get raw processor info
584
585 get_cpu_info_stub(&_cpuid_info);
586
587 assert_is_initialized();
588 _cpu = extended_cpu_family();
589 _model = extended_cpu_model();
590 _stepping = cpu_stepping();
1654 if (PrefetchFieldsAhead > 0) {
1655 log->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead);
1656 }
1657 if (ContendedPaddingWidth > 0) {
1658 log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth);
1659 }
1660 }
1661 #endif // !PRODUCT
1662 }
1663
1664 void VM_Version::print_platform_virtualization_info(outputStream* st) {
1665 VirtualizationType vrt = VM_Version::get_detected_virtualization();
1666 if (vrt == XenHVM) {
1667 st->print_cr("Xen hardware-assisted virtualization detected");
1668 } else if (vrt == KVM) {
1669 st->print_cr("KVM virtualization detected");
1670 } else if (vrt == VMWare) {
1671 st->print_cr("VMWare virtualization detected");
1672 VirtualizationSupport::print_virtualization_info(st);
1673 } else if (vrt == HyperV) {
1674 st->print_cr("HyperV virtualization detected");
1675 }
1676 }
1677
1678 void VM_Version::check_virt_cpuid(uint32_t idx, uint32_t *regs) {
1679 // TODO support 32 bit
1680 #if defined(_LP64)
1681 #if defined(_MSC_VER)
1682 // Allocate space for the code
1683 const int code_size = 100;
1684 ResourceMark rm;
1685 CodeBuffer cb("detect_virt", code_size, 0);
1686 MacroAssembler* a = new MacroAssembler(&cb);
1687 address code = a->pc();
1688 void (*test)(uint32_t idx, uint32_t *regs) = (void(*)(uint32_t idx, uint32_t *regs))code;
1689
1690 a->movq(r9, rbx); // save nonvolatile register
1691
1692 // next line would not work on 32-bit
1693 a->movq(rax, c_rarg0 /* rcx */);
1694 a->movq(r8, c_rarg1 /* rdx */);
1695 a->cpuid();
1696 a->movl(Address(r8, 0), rax);
1697 a->movl(Address(r8, 4), rbx);
1698 a->movl(Address(r8, 8), rcx);
1699 a->movl(Address(r8, 12), rdx);
1700
1701 a->movq(rbx, r9); // restore nonvolatile register
1702 a->ret(0);
1703
1704 uint32_t *code_end = (uint32_t *)a->pc();
1705 a->flush();
1706
1707 // execute code
1708 (*test)(idx, regs);
1709 #elif defined(__GNUC__)
1710 __asm__ volatile (
1711 " cpuid;"
1712 " mov %%eax,(%1);"
1713 " mov %%ebx,4(%1);"
1714 " mov %%ecx,8(%1);"
1715 " mov %%edx,12(%1);"
1716 : "+a" (idx)
1717 : "S" (regs)
1718 : "ebx", "ecx", "edx", "memory" );
1719 #endif
1720 #endif
1721 }
1722
1723
1724 bool VM_Version::use_biased_locking() {
1725 #if INCLUDE_RTM_OPT
1726 // RTM locking is most useful when there is high lock contention and
1727 // low data contention. With high lock contention the lock is usually
1728 // inflated and biased locking is not suitable for that case.
1729 // RTM locking code requires that biased locking is off.
1730 // Note: we can't switch off UseBiasedLocking in get_processor_features()
1731 // because it is used by Thread::allocate() which is called before
1732 // VM_Version::initialize().
1733 if (UseRTMLocking && UseBiasedLocking) {
1734 if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
1735 FLAG_SET_DEFAULT(UseBiasedLocking, false);
1736 } else {
1737 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
1738 UseBiasedLocking = false;
1739 }
1740 }
1741 #endif
1742 return UseBiasedLocking;
1743 }
1804 // If we are running on another intel machine not recognized in the table, we are okay.
1805 return false;
1806 }
1807 }
1808
1809 // On Xen, the cpuid instruction returns
1810 // eax / registers[0]: Version of Xen
1811 // ebx / registers[1]: chars 'XenV'
1812 // ecx / registers[2]: chars 'MMXe'
1813 // edx / registers[3]: chars 'nVMM'
1814 //
1815 // On KVM / VMWare / MS Hyper-V, the cpuid instruction returns
1816 // ebx / registers[1]: chars 'KVMK' / 'VMwa' / 'Micr'
1817 // ecx / registers[2]: chars 'VMKV' / 'reVM' / 'osof'
1818 // edx / registers[3]: chars 'M' / 'ware' / 't Hv'
1819 //
1820 // more information :
1821 // https://kb.vmware.com/s/article/1009458
1822 //
1823 void VM_Version::check_virtualizations() {
1824 #if defined(_LP64)
1825 uint32_t registers[4];
1826 char signature[13];
1827 uint32_t base;
1828 signature[12] = '\0';
1829 memset((void*)registers, 0, 4*sizeof(uint32_t));
1830
1831 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
1832 check_virt_cpuid(base, registers);
1833
1834 *(uint32_t *)(signature + 0) = registers[1];
1835 *(uint32_t *)(signature + 4) = registers[2];
1836 *(uint32_t *)(signature + 8) = registers[3];
1837
1838 if (strncmp("VMwareVMware", signature, 12) == 0) {
1839 Abstract_VM_Version::_detected_virtualization = VMWare;
1840 // check for extended metrics from guestlib
1841 VirtualizationSupport::initialize();
1842 }
1843
1844 if (strncmp("Microsoft Hv", signature, 12) == 0) {
1845 Abstract_VM_Version::_detected_virtualization = HyperV;
1846 }
1847
1848 if (strncmp("KVMKVMKVM", signature, 9) == 0) {
1849 Abstract_VM_Version::_detected_virtualization = KVM;
1850 }
1851
1852 if (strncmp("XenVMMXenVMM", signature, 12) == 0) {
1853 Abstract_VM_Version::_detected_virtualization = XenHVM;
1854 }
1855 }
1856 #endif
1857 }
1858
1859 void VM_Version::initialize() {
1860 ResourceMark rm;
1861 // Making this stub must be FIRST use of assembler
1862
1863 stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size);
1864 if (stub_blob == NULL) {
1865 vm_exit_during_initialization("Unable to allocate get_cpu_info_stub");
1866 }
1867 CodeBuffer c(stub_blob);
1868 VM_Version_StubGenerator g(&c);
1869 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
1870 g.generate_get_cpu_info());
1871
1872 get_processor_features();
1873
1874 LP64_ONLY(Assembler::precompute_instructions();)
1875
1876 if (cpu_family() > 4) { // it supports CPUID
1877 check_virtualizations();
1878 }
1879 }
|
33 #include "runtime/os.hpp"
34 #include "runtime/stubCodeGenerator.hpp"
35 #include "runtime/vm_version.hpp"
36 #include "utilities/powerOfTwo.hpp"
37 #include "utilities/virtualizationSupport.hpp"
38
39 #include OS_HEADER_INLINE(os)
40
41 int VM_Version::_cpu;
42 int VM_Version::_model;
43 int VM_Version::_stepping;
44 bool VM_Version::_has_intel_jcc_erratum;
45 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
46
47 // Address of instruction which causes SEGV
48 address VM_Version::_cpuinfo_segv_addr = 0;
49 // Address of instruction after the one which causes SEGV
50 address VM_Version::_cpuinfo_cont_addr = 0;
51
52 static BufferBlob* stub_blob;
53 static const int stub_size = 2000;
54
55 extern "C" {
56 typedef void (*get_cpu_info_stub_t)(void*);
57 typedef void (*detect_virt_stub_t)(uint32_t, uint32_t*);
58 }
59 static get_cpu_info_stub_t get_cpu_info_stub = NULL;
60 static detect_virt_stub_t detect_virt_stub = NULL;
61
62
63 class VM_Version_StubGenerator: public StubCodeGenerator {
64 public:
65
66 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
67
68 address generate_get_cpu_info() {
69 // Flags to test CPU type.
70 const uint32_t HS_EFL_AC = 0x40000;
71 const uint32_t HS_EFL_ID = 0x200000;
72 // Values for when we don't have a CPUID instruction.
73 const int CPU_FAMILY_SHIFT = 8;
74 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT);
75 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
76 bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2);
77
78 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
79 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, ext_cpuid8, done, wrapup;
80 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
553 return start;
554 };
555 void generate_vzeroupper(Label& L_wrapup) {
556 # define __ _masm->
557 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
558 __ cmpl(Address(rsi, 4), 0x756e6547); // 'uneG'
559 __ jcc(Assembler::notEqual, L_wrapup);
560 __ movl(rcx, 0x0FFF0FF0);
561 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
562 __ andl(rcx, Address(rsi, 0));
563 __ cmpl(rcx, 0x00050670); // If it is Xeon Phi 3200/5200/7200
564 __ jcc(Assembler::equal, L_wrapup);
565 __ cmpl(rcx, 0x00080650); // If it is Future Xeon Phi
566 __ jcc(Assembler::equal, L_wrapup);
567 // vzeroupper() will use a pre-computed instruction sequence that we
568 // can't compute until after we've determined CPU capabilities. Use
569 // uncached variant here directly to be able to bootstrap correctly
570 __ vzeroupper_uncached();
571 # undef __
572 }
573 address generate_detect_virt() {
574 StubCodeMark mark(this, "VM_Version", "detect_virt_stub");
575 # define __ _masm->
576
577 address start = __ pc();
578
579 // Evacuate callee-saved registers
580 __ push(rbp);
581 __ push(rbx);
582 __ push(rsi); // for Windows
583 #ifdef _LP64
584 __ mov(rax, c_rarg0); // CPUID leaf
585 __ mov(rsi, c_rarg1); // register array address (eax, ebx, ecx, edx)
586 #else
587 __ movptr(rax, Address(rsp, 16)); // CPUID leaf
588 __ movptr(rsi, Address(rsp, 20)); // register array address
589 #endif
590
591 __ cpuid();
592
593 // Store result to register array
594 __ movl(Address(rsi, 0), rax);
595 __ movl(Address(rsi, 4), rbx);
596 __ movl(Address(rsi, 8), rcx);
597 __ movl(Address(rsi, 12), rdx);
598
599 // Epilogue
600 __ pop(rsi);
601 __ pop(rbx);
602 __ pop(rbp);
603 __ ret(0);
604
605 # undef __
606
607 return start;
608 };
609 };
610
611 void VM_Version::get_processor_features() {
612
613 _cpu = 4; // 486 by default
614 _model = 0;
615 _stepping = 0;
616 _features = 0;
617 _logical_processors_per_package = 1;
618 // i486 internal cache is both I&D and has a 16-byte line size
619 _L1_data_cache_line_size = 16;
620
621 // Get raw processor info
622
623 get_cpu_info_stub(&_cpuid_info);
624
625 assert_is_initialized();
626 _cpu = extended_cpu_family();
627 _model = extended_cpu_model();
628 _stepping = cpu_stepping();
1692 if (PrefetchFieldsAhead > 0) {
1693 log->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead);
1694 }
1695 if (ContendedPaddingWidth > 0) {
1696 log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth);
1697 }
1698 }
1699 #endif // !PRODUCT
1700 }
1701
1702 void VM_Version::print_platform_virtualization_info(outputStream* st) {
1703 VirtualizationType vrt = VM_Version::get_detected_virtualization();
1704 if (vrt == XenHVM) {
1705 st->print_cr("Xen hardware-assisted virtualization detected");
1706 } else if (vrt == KVM) {
1707 st->print_cr("KVM virtualization detected");
1708 } else if (vrt == VMWare) {
1709 st->print_cr("VMWare virtualization detected");
1710 VirtualizationSupport::print_virtualization_info(st);
1711 } else if (vrt == HyperV) {
1712 st->print_cr("Hyper-V virtualization detected");
1713 } else if (vrt == HyperVRole) {
1714 st->print_cr("Hyper-V role detected");
1715 }
1716 }
1717
1718 bool VM_Version::use_biased_locking() {
1719 #if INCLUDE_RTM_OPT
1720 // RTM locking is most useful when there is high lock contention and
1721 // low data contention. With high lock contention the lock is usually
1722 // inflated and biased locking is not suitable for that case.
1723 // RTM locking code requires that biased locking is off.
1724 // Note: we can't switch off UseBiasedLocking in get_processor_features()
1725 // because it is used by Thread::allocate() which is called before
1726 // VM_Version::initialize().
1727 if (UseRTMLocking && UseBiasedLocking) {
1728 if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
1729 FLAG_SET_DEFAULT(UseBiasedLocking, false);
1730 } else {
1731 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
1732 UseBiasedLocking = false;
1733 }
1734 }
1735 #endif
1736 return UseBiasedLocking;
1737 }
1798 // If we are running on another intel machine not recognized in the table, we are okay.
1799 return false;
1800 }
1801 }
1802
1803 // On Xen, the cpuid instruction returns
1804 // eax / registers[0]: Version of Xen
1805 // ebx / registers[1]: chars 'XenV'
1806 // ecx / registers[2]: chars 'MMXe'
1807 // edx / registers[3]: chars 'nVMM'
1808 //
1809 // On KVM / VMWare / MS Hyper-V, the cpuid instruction returns
1810 // ebx / registers[1]: chars 'KVMK' / 'VMwa' / 'Micr'
1811 // ecx / registers[2]: chars 'VMKV' / 'reVM' / 'osof'
1812 // edx / registers[3]: chars 'M' / 'ware' / 't Hv'
1813 //
1814 // more information :
1815 // https://kb.vmware.com/s/article/1009458
1816 //
1817 void VM_Version::check_virtualizations() {
1818 uint32_t registers[4] = {0};
1819 char signature[13] = {0};
1820
1821 // In case of Xen, it might be appear until 0x40010000.
1822 // https://lists.linuxfoundation.org/pipermail/virtualization/2012-May/019974.html
1823 for (int leaf = 0x40000000; leaf < 0x40010000; leaf += 0x100) {
1824 detect_virt_stub(leaf, registers);
1825 memcpy(signature, ®isters[1], 12);
1826
1827 if (strncmp("VMwareVMware", signature, 12) == 0) {
1828 Abstract_VM_Version::_detected_virtualization = VMWare;
1829 // check for extended metrics from guestlib
1830 VirtualizationSupport::initialize();
1831 } else if (strncmp("Microsoft Hv", signature, 12) == 0) {
1832 Abstract_VM_Version::_detected_virtualization = HyperV;
1833 #ifdef _WINDOWS
1834 // CPUID leaf 0x40000007 is available to the root partition only.
1835 // See Hypervisor Top Level Functional Specification section 2.4.8 for more details.
1836 // https://github.com/MicrosoftDocs/Virtualization-Documentation/raw/master/tlfs/Hypervisor%20Top%20Level%20Functional%20Specification%20v6.0b.pdf
1837 detect_virt_stub(0x40000007, registers);
1838 if ((registers[0] != 0x0) ||
1839 (registers[1] != 0x0) ||
1840 (registers[2] != 0x0) ||
1841 (registers[3] != 0x0)) {
1842 Abstract_VM_Version::_detected_virtualization = HyperVRole;
1843 }
1844 #endif
1845 } else if (strncmp("KVMKVMKVM", signature, 9) == 0) {
1846 Abstract_VM_Version::_detected_virtualization = KVM;
1847 } else if (strncmp("XenVMMXenVMM", signature, 12) == 0) {
1848 Abstract_VM_Version::_detected_virtualization = XenHVM;
1849 }
1850 }
1851 }
1852
1853 void VM_Version::initialize() {
1854 ResourceMark rm;
1855 // Making this stub must be FIRST use of assembler
1856 stub_blob = BufferBlob::create("VM_Version stub", stub_size);
1857 if (stub_blob == NULL) {
1858 vm_exit_during_initialization("Unable to allocate stub for VM_Version");
1859 }
1860 CodeBuffer c(stub_blob);
1861 VM_Version_StubGenerator g(&c);
1862
1863 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
1864 g.generate_get_cpu_info());
1865 detect_virt_stub = CAST_TO_FN_PTR(detect_virt_stub_t,
1866 g.generate_detect_virt());
1867
1868 get_processor_features();
1869
1870 LP64_ONLY(Assembler::precompute_instructions();)
1871
1872 if (VM_Version::supports_hv()) { // Supports hypervisor
1873 check_virtualizations();
1874 }
1875 }
|