763 if (PrintPreciseRTMLockingStatistics) {
764 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
765 }
766 }
767 #else
768 if (UseRTMLocking) {
769 // Only C2 does RTM locking optimization.
770 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
771 // setting during arguments processing. See use_biased_locking().
772 vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
773 }
774 #endif
775
776 #ifdef COMPILER2
777 if (UseFPUForSpilling) {
778 if (UseSSE < 2) {
779 // Only supported with SSE2+
780 FLAG_SET_DEFAULT(UseFPUForSpilling, false);
781 }
782 }
783 if (MaxVectorSize > 0) {
784 if (!is_power_of_2(MaxVectorSize)) {
785 warning("MaxVectorSize must be a power of 2");
786 FLAG_SET_DEFAULT(MaxVectorSize, 64);
787 }
788 if (MaxVectorSize > 64) {
789 FLAG_SET_DEFAULT(MaxVectorSize, 64);
790 }
791 if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) {
792 // 32 bytes vectors (in YMM) are only supported with AVX+
793 FLAG_SET_DEFAULT(MaxVectorSize, 16);
794 }
795 if (UseSSE < 2) {
796 // Vectors (in XMM) are only supported with SSE2+
797 FLAG_SET_DEFAULT(MaxVectorSize, 0);
798 }
799 #ifdef ASSERT
800 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
801 tty->print_cr("State of YMM registers after signal handle:");
802 int nreg = 2 LP64_ONLY(+2);
803 const char* ymm_name[4] = {"0", "7", "8", "15"};
804 for (int i = 0; i < nreg; i++) {
805 tty->print("YMM%s:", ymm_name[i]);
806 for (int j = 7; j >=0; j--) {
807 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
808 }
809 tty->cr();
810 }
811 }
812 #endif
813 }
814
815 #ifdef _LP64
816 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
817 UseMultiplyToLenIntrinsic = true;
818 }
819 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
820 UseSquareToLenIntrinsic = true;
821 }
822 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
823 UseMulAddIntrinsic = true;
824 }
825 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
826 UseMontgomeryMultiplyIntrinsic = true;
827 }
828 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
829 UseMontgomerySquareIntrinsic = true;
830 }
831 #else
832 if (UseMultiplyToLenIntrinsic) {
833 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
834 warning("multiplyToLen intrinsic is not available in 32-bit VM");
|
763 if (PrintPreciseRTMLockingStatistics) {
764 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
765 }
766 }
767 #else
768 if (UseRTMLocking) {
769 // Only C2 does RTM locking optimization.
770 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
771 // setting during arguments processing. See use_biased_locking().
772 vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
773 }
774 #endif
775
776 #ifdef COMPILER2
777 if (UseFPUForSpilling) {
778 if (UseSSE < 2) {
779 // Only supported with SSE2+
780 FLAG_SET_DEFAULT(UseFPUForSpilling, false);
781 }
782 }
783 #endif
784 #if defined(COMPILER2) || INCLUDE_JVMCI
785 if (MaxVectorSize > 0) {
786 if (!is_power_of_2(MaxVectorSize)) {
787 warning("MaxVectorSize must be a power of 2");
788 FLAG_SET_DEFAULT(MaxVectorSize, 64);
789 }
790 if (MaxVectorSize > 64) {
791 FLAG_SET_DEFAULT(MaxVectorSize, 64);
792 }
793 if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) {
794 // 32 bytes vectors (in YMM) are only supported with AVX+
795 FLAG_SET_DEFAULT(MaxVectorSize, 16);
796 }
797 if (UseSSE < 2) {
798 // Vectors (in XMM) are only supported with SSE2+
799 FLAG_SET_DEFAULT(MaxVectorSize, 0);
800 }
801 #if defined(COMPILER2) && defined(ASSERT)
802 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
803 tty->print_cr("State of YMM registers after signal handle:");
804 int nreg = 2 LP64_ONLY(+2);
805 const char* ymm_name[4] = {"0", "7", "8", "15"};
806 for (int i = 0; i < nreg; i++) {
807 tty->print("YMM%s:", ymm_name[i]);
808 for (int j = 7; j >=0; j--) {
809 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
810 }
811 tty->cr();
812 }
813 }
814 #endif
815 }
816 #endif
817
818 #ifdef COMPILER2
819 #ifdef _LP64
820 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
821 UseMultiplyToLenIntrinsic = true;
822 }
823 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
824 UseSquareToLenIntrinsic = true;
825 }
826 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
827 UseMulAddIntrinsic = true;
828 }
829 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
830 UseMontgomeryMultiplyIntrinsic = true;
831 }
832 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
833 UseMontgomerySquareIntrinsic = true;
834 }
835 #else
836 if (UseMultiplyToLenIntrinsic) {
837 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
838 warning("multiplyToLen intrinsic is not available in 32-bit VM");
|