< prev index next >

src/cpu/x86/vm/x86.ad

Print this page
rev 9944 : 8145336: PPC64: fix string intrinsics after CompactStrings change


1694     case Op_MulReductionVF:
1695     case Op_MulReductionVD:
1696       if (UseSSE < 1) // requires at least SSE
1697         ret_value = false;
1698       break;
1699     case Op_SqrtVD:
1700       if (UseAVX < 1) // enabled for AVX only
1701         ret_value = false;
1702       break;
1703     case Op_CompareAndSwapL:
1704 #ifdef _LP64
1705     case Op_CompareAndSwapP:
1706 #endif
1707       if (!VM_Version::supports_cx8())
1708         ret_value = false;
1709       break;
1710     case Op_CMoveVD:
1711       if (UseAVX < 1 || UseAVX > 2)
1712         ret_value = false;
1713       break;








1714   }
1715 
1716   return ret_value;  // Per default match rules are supported.
1717 }
1718 
1719 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
1720   // identify extra cases that we might want to provide match rules for
1721   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
1722   bool ret_value = match_rule_supported(opcode);
1723   if (ret_value) {
1724     switch (opcode) {
1725       case Op_AddVB:
1726       case Op_SubVB:
1727         if ((vlen == 64) && (VM_Version::supports_avx512bw() == false))
1728           ret_value = false;
1729         break;
1730       case Op_URShiftVS:
1731       case Op_RShiftVS:
1732       case Op_LShiftVS:
1733       case Op_MulVS:




1694     case Op_MulReductionVF:
1695     case Op_MulReductionVD:
1696       if (UseSSE < 1) // requires at least SSE
1697         ret_value = false;
1698       break;
1699     case Op_SqrtVD:
1700       if (UseAVX < 1) // enabled for AVX only
1701         ret_value = false;
1702       break;
1703     case Op_CompareAndSwapL:
1704 #ifdef _LP64
1705     case Op_CompareAndSwapP:
1706 #endif
1707       if (!VM_Version::supports_cx8())
1708         ret_value = false;
1709       break;
1710     case Op_CMoveVD:
1711       if (UseAVX < 1 || UseAVX > 2)
1712         ret_value = false;
1713       break;
1714     case Op_StrIndexOf:
1715       if (!UseSSE42Intrinsics)
1716         ret_value = false;
1717       break;
1718     case Op_StrIndexOfChar:
1719       if (!(UseSSE > 4))
1720         ret_value = false;
1721       break;
1722   }
1723 
1724   return ret_value;  // Per default match rules are supported.
1725 }
1726 
1727 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
1728   // identify extra cases that we might want to provide match rules for
1729   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
1730   bool ret_value = match_rule_supported(opcode);
1731   if (ret_value) {
1732     switch (opcode) {
1733       case Op_AddVB:
1734       case Op_SubVB:
1735         if ((vlen == 64) && (VM_Version::supports_avx512bw() == false))
1736           ret_value = false;
1737         break;
1738       case Op_URShiftVS:
1739       case Op_RShiftVS:
1740       case Op_LShiftVS:
1741       case Op_MulVS:


< prev index next >