< prev index next >

src/hotspot/cpu/x86/globals_x86.hpp

Print this page

*** 87,223 **** define_pd_global(bool, PreserveFramePointer, false); define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong); ! #define ARCH_FLAGS(develop, \ ! product, \ ! diagnostic, \ ! experimental, \ ! notproduct, \ ! range, \ ! constraint) \ ! \ ! develop(bool, IEEEPrecision, true, \ ! "Enables IEEE precision (for INTEL only)") \ ! \ ! product(bool, UseStoreImmI16, true, \ ! "Use store immediate 16-bits value instruction on x86") \ ! \ ! product(intx, UseSSE, 99, \ ! "Highest supported SSE instructions set on x86/x64") \ ! range(0, 99) \ ! \ ! product(intx, UseAVX, 3, \ ! "Highest supported AVX instructions set on x86/x64") \ ! range(0, 99) \ ! \ ! product(bool, UseCLMUL, false, \ ! "Control whether CLMUL instructions can be used on x86/x64") \ ! \ ! diagnostic(bool, UseIncDec, true, \ ! "Use INC, DEC instructions on x86") \ ! \ ! product(bool, UseNewLongLShift, false, \ ! "Use optimized bitwise shift left") \ ! \ ! product(bool, UseAddressNop, false, \ ! "Use '0F 1F [addr]' NOP instructions on x86 cpus") \ ! \ ! product(bool, UseXmmLoadAndClearUpper, true, \ ! "Load low part of XMM register and clear upper part") \ ! \ ! product(bool, UseXmmRegToRegMoveAll, false, \ ! "Copy all XMM register bits when moving value between registers") \ ! \ ! product(bool, UseXmmI2D, false, \ ! "Use SSE2 CVTDQ2PD instruction to convert Integer to Double") \ ! \ ! product(bool, UseXmmI2F, false, \ ! "Use SSE2 CVTDQ2PS instruction to convert Integer to Float") \ ! \ ! product(bool, UseUnalignedLoadStores, false, \ ! "Use SSE2 MOVDQU instruction for Arraycopy") \ ! \ ! product(bool, UseXMMForObjInit, false, \ ! "Use XMM/YMM MOVDQU instruction for Object Initialization") \ ! \ ! product(bool, UseFastStosb, false, \ ! "Use fast-string operation for zeroing: rep stosb") \ ! \ ! /* Use Restricted Transactional Memory for lock eliding */ \ ! product(bool, UseRTMLocking, false, \ ! "Enable RTM lock eliding for inflated locks in compiled code") \ ! \ ! experimental(bool, UseRTMForStackLocks, false, \ ! "Enable RTM lock eliding for stack locks in compiled code") \ ! \ ! product(bool, UseRTMDeopt, false, \ ! "Perform deopt and recompilation based on RTM abort ratio") \ ! \ ! product(int, RTMRetryCount, 5, \ ! "Number of RTM retries on lock abort or busy") \ ! range(0, max_jint) \ ! \ ! experimental(int, RTMSpinLoopCount, 100, \ ! "Spin count for lock to become free before RTM retry") \ ! range(0, max_jint) \ ! \ ! experimental(int, RTMAbortThreshold, 1000, \ ! "Calculate abort ratio after this number of aborts") \ ! range(0, max_jint) \ ! \ ! experimental(int, RTMLockingThreshold, 10000, \ ! "Lock count at which to do RTM lock eliding without " \ ! "abort ratio calculation") \ ! range(0, max_jint) \ ! \ ! experimental(int, RTMAbortRatio, 50, \ ! "Lock abort ratio at which to stop use RTM lock eliding") \ ! range(0, 100) /* natural range */ \ ! \ ! experimental(int, RTMTotalCountIncrRate, 64, \ ! "Increment total RTM attempted lock count once every n times") \ ! range(1, max_jint) \ ! constraint(RTMTotalCountIncrRateConstraintFunc,AfterErgo) \ ! \ ! experimental(intx, RTMLockingCalculationDelay, 0, \ ! "Number of milliseconds to wait before start calculating aborts " \ ! "for RTM locking") \ ! \ ! experimental(bool, UseRTMXendForLockBusy, true, \ ! "Use RTM Xend instead of Xabort when lock busy") \ ! \ ! /* assembler */ \ ! product(bool, UseCountLeadingZerosInstruction, false, \ ! "Use count leading zeros instruction") \ ! \ ! product(bool, UseCountTrailingZerosInstruction, false, \ ! "Use count trailing zeros instruction") \ ! \ ! product(bool, UseSSE42Intrinsics, false, \ ! "SSE4.2 versions of intrinsics") \ ! \ ! product(bool, UseBMI1Instructions, false, \ ! "Use BMI1 instructions") \ ! \ ! product(bool, UseBMI2Instructions, false, \ ! "Use BMI2 instructions") \ ! \ ! diagnostic(bool, UseLibmIntrinsic, true, \ ! "Use Libm Intrinsics") \ ! \ ! /* Minimum array size in bytes to use AVX512 intrinsics */ \ ! /* for copy, inflate and fill which don't bail out early based on any */ \ ! /* condition. When this value is set to zero compare operations like */ \ ! /* compare, vectorizedMismatch, compress can also use AVX512 intrinsics.*/\ ! diagnostic(int, AVX3Threshold, 4096, \ ! "Minimum array size in bytes to use AVX512 intrinsics" \ ! "for copy, inflate and fill. When this value is set as zero" \ ! "compare operations can also use AVX512 intrinsics.") \ ! range(0, max_jint) \ ! \ ! diagnostic(bool, IntelJccErratumMitigation, true, \ ! "Turn off JVM mitigations related to Intel micro code " \ ! "mitigations for the Intel JCC erratum") #endif // CPU_X86_GLOBALS_X86_HPP --- 87,218 ---- define_pd_global(bool, PreserveFramePointer, false); define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong); ! #include "runtime/flags/jvmFlag.hpp" ! DEVELOP_FLAG(bool, IEEEPrecision, true, JVMFlag::DEFAULT, ! "Enables IEEE precision (for INTEL only)"); + PRODUCT_FLAG(bool, UseStoreImmI16, true, JVMFlag::DEFAULT, + "Use store immediate 16-bits value instruction on x86"); + + PRODUCT_FLAG(intx, UseSSE, 99, JVMFlag::RANGE, + "Highest supported SSE instructions set on x86/x64"); + FLAG_RANGE( UseSSE, 0, 99); + + PRODUCT_FLAG(intx, UseAVX, 3, JVMFlag::RANGE, + "Highest supported AVX instructions set on x86/x64"); + FLAG_RANGE( UseAVX, 0, 99); + + PRODUCT_FLAG(bool, UseCLMUL, false, JVMFlag::DEFAULT, + "Control whether CLMUL instructions can be used on x86/x64"); + + PRODUCT_FLAG(bool, UseIncDec, true, JVMFlag::DIAGNOSTIC, + "Use INC, DEC instructions on x86"); + + PRODUCT_FLAG(bool, UseNewLongLShift, false, JVMFlag::DEFAULT, + "Use optimized bitwise shift left"); + + PRODUCT_FLAG(bool, UseAddressNop, false, JVMFlag::DEFAULT, + "Use '0F 1F [addr]' NOP instructions on x86 cpus"); + + PRODUCT_FLAG(bool, UseXmmLoadAndClearUpper, true, JVMFlag::DEFAULT, + "Load low part of XMM register and clear upper part"); + + PRODUCT_FLAG(bool, UseXmmRegToRegMoveAll, false, JVMFlag::DEFAULT, + "Copy all XMM register bits when moving value between registers"); + + PRODUCT_FLAG(bool, UseXmmI2D, false, JVMFlag::DEFAULT, + "Use SSE2 CVTDQ2PD instruction to convert Integer to Double"); + + PRODUCT_FLAG(bool, UseXmmI2F, false, JVMFlag::DEFAULT, + "Use SSE2 CVTDQ2PS instruction to convert Integer to Float"); + + PRODUCT_FLAG(bool, UseUnalignedLoadStores, false, JVMFlag::DEFAULT, + "Use SSE2 MOVDQU instruction for Arraycopy"); + + PRODUCT_FLAG(bool, UseXMMForObjInit, false, JVMFlag::DEFAULT, + "Use XMM/YMM MOVDQU instruction for Object Initialization"); + + PRODUCT_FLAG(bool, UseFastStosb, false, JVMFlag::DEFAULT, + "Use fast-string operation for zeroing: rep stosb"); + + + // Use Restricted Transactional Memory for lock eliding + PRODUCT_FLAG(bool, UseRTMLocking, false, JVMFlag::DEFAULT, + "Enable RTM lock eliding for inflated locks in compiled code"); + + PRODUCT_FLAG(bool, UseRTMForStackLocks, false, JVMFlag::EXPERIMENTAL, + "Enable RTM lock eliding for stack locks in compiled code"); + + PRODUCT_FLAG(bool, UseRTMDeopt, false, JVMFlag::DEFAULT, + "Perform deopt and recompilation based on RTM abort ratio"); + + PRODUCT_FLAG(int, RTMRetryCount, 5, JVMFlag::RANGE, + "Number of RTM retries on lock abort or busy"); + FLAG_RANGE( RTMRetryCount, 0, max_jint); + + PRODUCT_FLAG(int, RTMSpinLoopCount, 100, JVMFlag::EXPERIMENTAL | JVMFlag::RANGE, + "Spin count for lock to become free before RTM retry"); + FLAG_RANGE( RTMSpinLoopCount, 0, max_jint); + + PRODUCT_FLAG(int, RTMAbortThreshold, 1000, JVMFlag::EXPERIMENTAL | JVMFlag::RANGE, + "Calculate abort ratio after this number of aborts"); + FLAG_RANGE( RTMAbortThreshold, 0, max_jint); + + PRODUCT_FLAG(int, RTMLockingThreshold, 10000, JVMFlag::EXPERIMENTAL | JVMFlag::RANGE, + "Lock count at which to do RTM lock eliding without " + "abort ratio calculation"); + FLAG_RANGE( RTMLockingThreshold, 0, max_jint); + + PRODUCT_FLAG(int, RTMAbortRatio, 50, JVMFlag::EXPERIMENTAL | JVMFlag::RANGE, + "Lock abort ratio at which to stop use RTM lock eliding"); + FLAG_RANGE( RTMAbortRatio, 0, 100); + + PRODUCT_FLAG(int, RTMTotalCountIncrRate, 64, JVMFlag::EXPERIMENTAL | JVMFlag::RANGE | JVMFlag::CONSTRAINT, + "Increment total RTM attempted lock count once every n times"); + FLAG_RANGE( RTMTotalCountIncrRate, 1, max_jint); + FLAG_CONSTRAINT( RTMTotalCountIncrRate, (void*)RTMTotalCountIncrRateConstraintFunc, JVMFlag::AfterErgo); + + PRODUCT_FLAG(intx, RTMLockingCalculationDelay, 0, JVMFlag::EXPERIMENTAL, + "Number of milliseconds to wait before start calculating aborts " + "for RTM locking"); + + PRODUCT_FLAG(bool, UseRTMXendForLockBusy, true, JVMFlag::EXPERIMENTAL, + "Use RTM Xend instead of Xabort when lock busy"); + + + // assembler + PRODUCT_FLAG(bool, UseCountLeadingZerosInstruction, false, JVMFlag::DEFAULT, + "Use count leading zeros instruction"); + + PRODUCT_FLAG(bool, UseCountTrailingZerosInstruction, false, JVMFlag::DEFAULT, + "Use count trailing zeros instruction"); + + PRODUCT_FLAG(bool, UseSSE42Intrinsics, false, JVMFlag::DEFAULT, + "SSE4.2 versions of intrinsics"); + + PRODUCT_FLAG(bool, UseBMI1Instructions, false, JVMFlag::DEFAULT, + "Use BMI1 instructions"); + + PRODUCT_FLAG(bool, UseBMI2Instructions, false, JVMFlag::DEFAULT, + "Use BMI2 instructions"); + + PRODUCT_FLAG(bool, UseLibmIntrinsic, true, JVMFlag::DIAGNOSTIC, + "Use Libm Intrinsics"); + + + // Minimum array size in bytes to use AVX512 intrinsics + // for copy, inflate and fill which don't bail out early based on any + // condition. When this value is set to zero compare operations like + // compare, vectorizedMismatch, compress can also use AVX512 intrinsics. + PRODUCT_FLAG(int, AVX3Threshold, 4096, JVMFlag::DIAGNOSTIC | JVMFlag::RANGE, + "Minimum array size in bytes to use AVX512 intrinsics" + "for copy, inflate and fill. When this value is set as zero" + "compare operations can also use AVX512 intrinsics."); + FLAG_RANGE( AVX3Threshold, 0, max_jint); + + PRODUCT_FLAG(bool, IntelJccErratumMitigation, true, JVMFlag::DIAGNOSTIC, + "Turn off JVM mitigations related to Intel micro code " + "mitigations for the Intel JCC erratum"); #endif // CPU_X86_GLOBALS_X86_HPP
< prev index next >