src/cpu/x86/vm/vm_version_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8037226 Cdiff src/cpu/x86/vm/vm_version_x86.cpp

src/cpu/x86/vm/vm_version_x86.cpp

Print this page

        

*** 48,59 **** int VM_Version::_stepping; int VM_Version::_cpuFeatures; const char* VM_Version::_features_str = ""; VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; static BufferBlob* stub_blob; ! static const int stub_size = 550; extern "C" { typedef void (*getPsrInfo_stub_t)(void*); } static getPsrInfo_stub_t getPsrInfo_stub = NULL; --- 48,64 ---- int VM_Version::_stepping; int VM_Version::_cpuFeatures; const char* VM_Version::_features_str = ""; VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; + // Address of instruction which causes SEGV + address VM_Version::_cpuinfo_segv_addr = 0; + // Address of instruction which causes SEGV + address VM_Version::_cpuinfo_cont_addr = 0; + static BufferBlob* stub_blob; ! static const int stub_size = 600; extern "C" { typedef void (*getPsrInfo_stub_t)(void*); } static getPsrInfo_stub_t getPsrInfo_stub = NULL;
*** 232,254 **** // // Check if OS has enabled XGETBV instruction to access XCR0 // (OSXSAVE feature flag) and CPU supports AVX // ! __ andl(rcx, 0x18000000); __ cmpl(rcx, 0x18000000); ! __ jccb(Assembler::notEqual, sef_cpuid); // // XCR0, XFEATURE_ENABLED_MASK register // __ xorl(rcx, rcx); // zero for XCR0 register __ xgetbv(); __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rdx); // // cpuid(0x7) Structured Extended Features // __ bind(sef_cpuid); __ movl(rax, 7); --- 237,300 ---- // // Check if OS has enabled XGETBV instruction to access XCR0 // (OSXSAVE feature flag) and CPU supports AVX // ! __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx __ cmpl(rcx, 0x18000000); ! __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported // // XCR0, XFEATURE_ENABLED_MASK register // __ xorl(rcx, rcx); // zero for XCR0 register __ xgetbv(); __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); __ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 4), rdx); + __ andl(rax, 0x6); // xcr0 bits sse | ymm + __ cmpl(rax, 0x6); + __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported + + // + // Some OSs have a bug when upper 128bits of YMM + // registers are not restored after a signal processing. + // Generate SEGV here (reference through NULL) + // and check upper YMM bits after it. + // + VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts + + // load value into all 32 bytes of ymm7 register + __ movl(rcx, VM_Version::ymm_test_value()); + + __ movdl(xmm0, rcx); + __ pshufd(xmm0, xmm0, 0x00); + __ vinsertf128h(xmm0, xmm0, xmm0); + __ vmovdqu(xmm7, xmm0); + #ifdef _LP64 + __ vmovdqu(xmm8, xmm0); + __ vmovdqu(xmm15, xmm0); + #endif + + __ xorl(rsi, rsi); + VM_Version::set_cpuinfo_segv_addr( __ pc() ); + // Generate SEGV + __ movl(rax, Address(rsi, 0)); + + VM_Version::set_cpuinfo_cont_addr( __ pc() ); + // Returns here after signal. Save xmm0 to check it later. + __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset()))); + __ vmovdqu(Address(rsi, 0), xmm0); + __ vmovdqu(Address(rsi, 32), xmm7); + #ifdef _LP64 + __ vmovdqu(Address(rsi, 64), xmm8); + __ vmovdqu(Address(rsi, 96), xmm15); + #endif + + VM_Version::clean_cpuFeatures(); + // // cpuid(0x7) Structured Extended Features // __ bind(sef_cpuid); __ movl(rax, 7);
*** 538,555 **** FLAG_SET_DEFAULT(MaxVectorSize, 32); } if (MaxVectorSize > 32) { FLAG_SET_DEFAULT(MaxVectorSize, 32); } ! if (MaxVectorSize > 16 && UseAVX == 0) { ! // Only supported with AVX+ FLAG_SET_DEFAULT(MaxVectorSize, 16); } if (UseSSE < 2) { ! // Only supported with SSE2+ FLAG_SET_DEFAULT(MaxVectorSize, 0); } } #endif // On new cpus instructions which update whole XMM register should be used // to prevent partial register stall due to dependencies on high half. --- 584,615 ---- FLAG_SET_DEFAULT(MaxVectorSize, 32); } if (MaxVectorSize > 32) { FLAG_SET_DEFAULT(MaxVectorSize, 32); } ! if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) { ! // 32 bytes vectors (in YMM) are only supported with AVX+ FLAG_SET_DEFAULT(MaxVectorSize, 16); } if (UseSSE < 2) { ! // Vectors (in XMM) are only supported with SSE2+ FLAG_SET_DEFAULT(MaxVectorSize, 0); } + #ifdef ASSERT + if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) { + tty->print_cr("State of YMM registers after signal handle:"); + int nreg = 2 LP64_ONLY(+2); + const char* ymm_name[4] = {"0", "7", "8", "15"}; + for (int i = 0; i < nreg; i++) { + tty->print("YMM%s:", ymm_name[i]); + for (int j = 7; j >=0; j--) { + tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]); + } + tty->cr(); + } + } + #endif } #endif // On new cpus instructions which update whole XMM register should be used // to prevent partial register stall due to dependencies on high half.
*** 676,693 **** UseSSE42Intrinsics = true; } } } } - #if defined(COMPILER2) && defined(_ALLBSD_SOURCE) - if (MaxVectorSize > 16) { - // Limit vectors size to 16 bytes on BSD until it fixes - // restoring upper 128bit of YMM registers on return - // from signal handler. - FLAG_SET_DEFAULT(MaxVectorSize, 16); - } - #endif // COMPILER2 // Use count leading zeros count instruction if available. if (supports_lzcnt()) { if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { UseCountLeadingZerosInstruction = true; --- 736,745 ----
*** 812,821 **** --- 864,878 ---- tty->print(" UseAVX=%d",UseAVX); } if (UseAES) { tty->print(" UseAES=1"); } + #ifdef COMPILER2 + if (MaxVectorSize > 0) { + tty->print(" MaxVectorSize=%d", MaxVectorSize); + } + #endif tty->cr(); tty->print("Allocation"); if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { tty->print_cr(": no prefetching"); } else {
src/cpu/x86/vm/vm_version_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File