src/cpu/x86/vm/x86.ad

Print this page
rev 10065 : 8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
Summary: adds c1 & c2 x86 intrinsics for j.l.Runtime.onSpinWait() that utilize the 'pause' instruction
Contributed-by: ikrylov, ygaevsky
Reviewed-by: iveresov, vlivanov, kvn


1702       break;
1703     case Op_CompareAndSwapL:
1704 #ifdef _LP64
1705     case Op_CompareAndSwapP:
1706 #endif
1707       if (!VM_Version::supports_cx8())
1708         ret_value = false;
1709       break;
1710     case Op_CMoveVD:
1711       if (UseAVX < 1 || UseAVX > 2)
1712         ret_value = false;
1713       break;
1714     case Op_StrIndexOf:
1715       if (!UseSSE42Intrinsics)
1716         ret_value = false;
1717       break;
1718     case Op_StrIndexOfChar:
1719       if (!(UseSSE > 4))
1720         ret_value = false;
1721       break;




1722   }
1723 
1724   return ret_value;  // Per default match rules are supported.
1725 }
1726 
1727 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
1728   // identify extra cases that we might want to provide match rules for
1729   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
1730   bool ret_value = match_rule_supported(opcode);
1731   if (ret_value) {
1732     switch (opcode) {
1733       case Op_AddVB:
1734       case Op_SubVB:
1735         if ((vlen == 64) && (VM_Version::supports_avx512bw() == false))
1736           ret_value = false;
1737         break;
1738       case Op_URShiftVS:
1739       case Op_RShiftVS:
1740       case Op_LShiftVS:
1741       case Op_MulVS:


2979 
2980   format %{ "sqrtsd  $dst, $src" %}
2981   ins_cost(150);
2982   ins_encode %{
2983     __ sqrtsd($dst$$XMMRegister, $src$$Address);
2984   %}
2985   ins_pipe(pipe_slow);
2986 %}
2987 
2988 instruct sqrtD_imm(regD dst, immD con) %{
2989   predicate(UseSSE>=2);
2990   match(Set dst (SqrtD con));
2991   format %{ "sqrtsd  $dst, [$constantaddress]\t# load from constant table: double=$con" %}
2992   ins_cost(150);
2993   ins_encode %{
2994     __ sqrtsd($dst$$XMMRegister, $constantaddress($con));
2995   %}
2996   ins_pipe(pipe_slow);
2997 %}
2998 


















2999 // ====================VECTOR INSTRUCTIONS=====================================
3000 
3001 // Load vectors (4 bytes long)
3002 instruct loadV4(vecS dst, memory mem) %{
3003   predicate(n->as_LoadVector()->memory_size() == 4);
3004   match(Set dst (LoadVector mem));
3005   ins_cost(125);
3006   format %{ "movd    $dst,$mem\t! load vector (4 bytes)" %}
3007   ins_encode %{
3008     __ movdl($dst$$XMMRegister, $mem$$Address);
3009   %}
3010   ins_pipe( pipe_slow );
3011 %}
3012 
3013 // Load vectors (8 bytes long)
3014 instruct loadV8(vecD dst, memory mem) %{
3015   predicate(n->as_LoadVector()->memory_size() == 8);
3016   match(Set dst (LoadVector mem));
3017   ins_cost(125);
3018   format %{ "movq    $dst,$mem\t! load vector (8 bytes)" %}




1702       break;
1703     case Op_CompareAndSwapL:
1704 #ifdef _LP64
1705     case Op_CompareAndSwapP:
1706 #endif
1707       if (!VM_Version::supports_cx8())
1708         ret_value = false;
1709       break;
1710     case Op_CMoveVD:
1711       if (UseAVX < 1 || UseAVX > 2)
1712         ret_value = false;
1713       break;
1714     case Op_StrIndexOf:
1715       if (!UseSSE42Intrinsics)
1716         ret_value = false;
1717       break;
1718     case Op_StrIndexOfChar:
1719       if (!(UseSSE > 4))
1720         ret_value = false;
1721       break;
1722     case Op_OnSpinWait:
1723       if (VM_Version::supports_on_spin_wait() == false)
1724         ret_value = false;
1725       break;
1726   }
1727 
1728   return ret_value;  // Per default match rules are supported.
1729 }
1730 
1731 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
1732   // identify extra cases that we might want to provide match rules for
1733   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
1734   bool ret_value = match_rule_supported(opcode);
1735   if (ret_value) {
1736     switch (opcode) {
1737       case Op_AddVB:
1738       case Op_SubVB:
1739         if ((vlen == 64) && (VM_Version::supports_avx512bw() == false))
1740           ret_value = false;
1741         break;
1742       case Op_URShiftVS:
1743       case Op_RShiftVS:
1744       case Op_LShiftVS:
1745       case Op_MulVS:


2983 
2984   format %{ "sqrtsd  $dst, $src" %}
2985   ins_cost(150);
2986   ins_encode %{
2987     __ sqrtsd($dst$$XMMRegister, $src$$Address);
2988   %}
2989   ins_pipe(pipe_slow);
2990 %}
2991 
2992 instruct sqrtD_imm(regD dst, immD con) %{
2993   predicate(UseSSE>=2);
2994   match(Set dst (SqrtD con));
2995   format %{ "sqrtsd  $dst, [$constantaddress]\t# load from constant table: double=$con" %}
2996   ins_cost(150);
2997   ins_encode %{
2998     __ sqrtsd($dst$$XMMRegister, $constantaddress($con));
2999   %}
3000   ins_pipe(pipe_slow);
3001 %}
3002 
3003 instruct onspinwait() %{
3004   match(OnSpinWait);
3005   ins_cost(200);
3006 
3007   format %{
3008     $$template
3009     if (os::is_MP()) {
3010       $$emit$$"pause\t! membar_onspinwait"
3011     } else {
3012       $$emit$$"MEMBAR-onspinwait ! (empty encoding)"
3013     }
3014   %}
3015   ins_encode %{
3016     __ pause();
3017   %}
3018   ins_pipe(pipe_slow);
3019 %}
3020 
3021 // ====================VECTOR INSTRUCTIONS=====================================
3022 
3023 // Load vectors (4 bytes long)
3024 instruct loadV4(vecS dst, memory mem) %{
3025   predicate(n->as_LoadVector()->memory_size() == 4);
3026   match(Set dst (LoadVector mem));
3027   ins_cost(125);
3028   format %{ "movd    $dst,$mem\t! load vector (4 bytes)" %}
3029   ins_encode %{
3030     __ movdl($dst$$XMMRegister, $mem$$Address);
3031   %}
3032   ins_pipe( pipe_slow );
3033 %}
3034 
3035 // Load vectors (8 bytes long)
3036 instruct loadV8(vecD dst, memory mem) %{
3037   predicate(n->as_LoadVector()->memory_size() == 8);
3038   match(Set dst (LoadVector mem));
3039   ins_cost(125);
3040   format %{ "movq    $dst,$mem\t! load vector (8 bytes)" %}