927 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
928 }
929 }
930 #else
931 if (UseRTMLocking) {
932 // Only C2 does RTM locking optimization.
933 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
934 // setting during arguments processing. See use_biased_locking().
935 vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
936 }
937 #endif
938
939 #ifdef COMPILER2
940 if (UseFPUForSpilling) {
941 if (UseSSE < 2) {
942 // Only supported with SSE2+
943 FLAG_SET_DEFAULT(UseFPUForSpilling, false);
944 }
945 }
946 #endif
947 #if defined(COMPILER2) || INCLUDE_JVMCI
948 if (MaxVectorSize > 0) {
949 if (!is_power_of_2(MaxVectorSize)) {
950 warning("MaxVectorSize must be a power of 2");
951 FLAG_SET_DEFAULT(MaxVectorSize, 64);
952 }
953 if (UseSSE < 2) {
954 // Vectors (in XMM) are only supported with SSE2+
955 if (MaxVectorSize > 0) {
956 if (!FLAG_IS_DEFAULT(MaxVectorSize))
957 warning("MaxVectorSize must be 0");
958 FLAG_SET_DEFAULT(MaxVectorSize, 0);
959 }
960 }
961 else if (UseAVX == 0 || !os_supports_avx_vectors()) {
962 // 32 bytes vectors (in YMM) are only supported with AVX+
963 if (MaxVectorSize > 16) {
964 if (!FLAG_IS_DEFAULT(MaxVectorSize))
965 warning("MaxVectorSize must be <= 16");
966 FLAG_SET_DEFAULT(MaxVectorSize, 16);
967 }
979 if (!FLAG_IS_DEFAULT(MaxVectorSize))
980 warning("MaxVectorSize must be <= 64");
981 FLAG_SET_DEFAULT(MaxVectorSize, 64);
982 }
983 }
984 #if defined(COMPILER2) && defined(ASSERT)
985 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
986 tty->print_cr("State of YMM registers after signal handle:");
987 int nreg = 2 LP64_ONLY(+2);
988 const char* ymm_name[4] = {"0", "7", "8", "15"};
989 for (int i = 0; i < nreg; i++) {
990 tty->print("YMM%s:", ymm_name[i]);
991 for (int j = 7; j >=0; j--) {
992 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
993 }
994 tty->cr();
995 }
996 }
997 #endif // COMPILER2 && ASSERT
998 }
999 #endif // COMPILER2 || INCLUDE_JVMCI
1000
1001 #ifdef COMPILER2
1002 #ifdef _LP64
1003 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
1004 UseMultiplyToLenIntrinsic = true;
1005 }
1006 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
1007 UseSquareToLenIntrinsic = true;
1008 }
1009 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
1010 UseMulAddIntrinsic = true;
1011 }
1012 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
1013 UseMontgomeryMultiplyIntrinsic = true;
1014 }
1015 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
1016 UseMontgomerySquareIntrinsic = true;
1017 }
1018 #else
1019 if (UseMultiplyToLenIntrinsic) {
|
927 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
928 }
929 }
930 #else
931 if (UseRTMLocking) {
932 // Only C2 does RTM locking optimization.
933 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
934 // setting during arguments processing. See use_biased_locking().
935 vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
936 }
937 #endif
938
939 #ifdef COMPILER2
940 if (UseFPUForSpilling) {
941 if (UseSSE < 2) {
942 // Only supported with SSE2+
943 FLAG_SET_DEFAULT(UseFPUForSpilling, false);
944 }
945 }
946 #endif
947 #if COMPILER2_OR_JVMCI
948 if (MaxVectorSize > 0) {
949 if (!is_power_of_2(MaxVectorSize)) {
950 warning("MaxVectorSize must be a power of 2");
951 FLAG_SET_DEFAULT(MaxVectorSize, 64);
952 }
953 if (UseSSE < 2) {
954 // Vectors (in XMM) are only supported with SSE2+
955 if (MaxVectorSize > 0) {
956 if (!FLAG_IS_DEFAULT(MaxVectorSize))
957 warning("MaxVectorSize must be 0");
958 FLAG_SET_DEFAULT(MaxVectorSize, 0);
959 }
960 }
961 else if (UseAVX == 0 || !os_supports_avx_vectors()) {
962 // 32 bytes vectors (in YMM) are only supported with AVX+
963 if (MaxVectorSize > 16) {
964 if (!FLAG_IS_DEFAULT(MaxVectorSize))
965 warning("MaxVectorSize must be <= 16");
966 FLAG_SET_DEFAULT(MaxVectorSize, 16);
967 }
979 if (!FLAG_IS_DEFAULT(MaxVectorSize))
980 warning("MaxVectorSize must be <= 64");
981 FLAG_SET_DEFAULT(MaxVectorSize, 64);
982 }
983 }
984 #if defined(COMPILER2) && defined(ASSERT)
985 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
986 tty->print_cr("State of YMM registers after signal handle:");
987 int nreg = 2 LP64_ONLY(+2);
988 const char* ymm_name[4] = {"0", "7", "8", "15"};
989 for (int i = 0; i < nreg; i++) {
990 tty->print("YMM%s:", ymm_name[i]);
991 for (int j = 7; j >=0; j--) {
992 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
993 }
994 tty->cr();
995 }
996 }
997 #endif // COMPILER2 && ASSERT
998 }
999 #endif // COMPILER2_OR_JVMCI
1000
1001 #ifdef COMPILER2
1002 #ifdef _LP64
1003 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
1004 UseMultiplyToLenIntrinsic = true;
1005 }
1006 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
1007 UseSquareToLenIntrinsic = true;
1008 }
1009 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
1010 UseMulAddIntrinsic = true;
1011 }
1012 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
1013 UseMontgomeryMultiplyIntrinsic = true;
1014 }
1015 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
1016 UseMontgomerySquareIntrinsic = true;
1017 }
1018 #else
1019 if (UseMultiplyToLenIntrinsic) {
|