686 #ifdef ASSERT
687 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
688 tty->print_cr("State of YMM registers after signal handle:");
689 int nreg = 2 LP64_ONLY(+2);
690 const char* ymm_name[4] = {"0", "7", "8", "15"};
691 for (int i = 0; i < nreg; i++) {
692 tty->print("YMM%s:", ymm_name[i]);
693 for (int j = 7; j >=0; j--) {
694 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
695 }
696 tty->cr();
697 }
698 }
699 #endif
700 }
701
702 #ifdef _LP64
703 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
704 UseMultiplyToLenIntrinsic = true;
705 }
706 #else
707 if (UseMultiplyToLenIntrinsic) {
708 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
709 warning("multiplyToLen intrinsic is not available in 32-bit VM");
710 }
711 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
712 }
713 #endif
714 #endif // COMPILER2
715
716 // On new cpus instructions which update whole XMM register should be used
717 // to prevent partial register stall due to dependencies on high half.
718 //
719 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem)
720 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem)
721 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm).
722 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm).
723
724 if( is_amd() ) { // AMD cpus specific settings
725 if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) {
726 // Use it on new AMD cpus starting from Opteron.
727 UseAddressNop = true;
728 }
729 if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) {
730 // Use it on new AMD cpus starting from Opteron.
731 UseNewLongLShift = true;
|
686 #ifdef ASSERT
687 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
688 tty->print_cr("State of YMM registers after signal handle:");
689 int nreg = 2 LP64_ONLY(+2);
690 const char* ymm_name[4] = {"0", "7", "8", "15"};
691 for (int i = 0; i < nreg; i++) {
692 tty->print("YMM%s:", ymm_name[i]);
693 for (int j = 7; j >=0; j--) {
694 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
695 }
696 tty->cr();
697 }
698 }
699 #endif
700 }
701
702 #ifdef _LP64
703 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
704 UseMultiplyToLenIntrinsic = true;
705 }
706 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
707 UseSquareToLenIntrinsic = true;
708 }
709 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
710 UseMulAddIntrinsic = true;
711 }
712 #else
713 if (UseMultiplyToLenIntrinsic) {
714 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
715 warning("multiplyToLen intrinsic is not available in 32-bit VM");
716 }
717 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
718 }
719 if (UseSquareToLenIntrinsic) {
720 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
721 warning("squareToLen intrinsic is not available in 32-bit VM");
722 }
723 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false);
724 }
725 if (UseMulAddIntrinsic) {
726 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
727 warning("mulAdd intrinsic is not available in 32-bit VM");
728 }
729 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false);
730 }
731 #endif
732 #endif // COMPILER2
733
734 // On new cpus instructions which update whole XMM register should be used
735 // to prevent partial register stall due to dependencies on high half.
736 //
737 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem)
738 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem)
739 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm).
740 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm).
741
742 if( is_amd() ) { // AMD cpus specific settings
743 if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) {
744 // Use it on new AMD cpus starting from Opteron.
745 UseAddressNop = true;
746 }
747 if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) {
748 // Use it on new AMD cpus starting from Opteron.
749 UseNewLongLShift = true;
|