< prev index next >

src/cpu/x86/vm/x86.ad

Print this page




10503   match(Set dst (XorV src1 src2));
10504   format %{ "vpxor   $dst,$src1,$src2\t! xor vectors (64 bytes)" %}
10505   ins_encode %{
10506     int vector_len = 2;
10507     __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len);
10508   %}
10509   ins_pipe( pipe_slow );
10510 %}
10511 
10512 instruct vxor64B_mem(vecZ dst, vecZ src, memory mem) %{
10513   predicate(UseAVX > 2 && n->as_Vector()->length_in_bytes() == 64);
10514   match(Set dst (XorV src (LoadVector mem)));
10515   format %{ "vpxor   $dst,$src,$mem\t! xor vectors (64 bytes)" %}
10516   ins_encode %{
10517     int vector_len = 2;
10518     __ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
10519   %}
10520   ins_pipe( pipe_slow );
10521 %}
10522 
































































































































































10503   match(Set dst (XorV src1 src2));
10504   format %{ "vpxor   $dst,$src1,$src2\t! xor vectors (64 bytes)" %}
10505   ins_encode %{
10506     int vector_len = 2;
10507     __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len);
10508   %}
10509   ins_pipe( pipe_slow );
10510 %}
10511 
10512 instruct vxor64B_mem(vecZ dst, vecZ src, memory mem) %{
10513   predicate(UseAVX > 2 && n->as_Vector()->length_in_bytes() == 64);
10514   match(Set dst (XorV src (LoadVector mem)));
10515   format %{ "vpxor   $dst,$src,$mem\t! xor vectors (64 bytes)" %}
10516   ins_encode %{
10517     int vector_len = 2;
10518     __ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
10519   %}
10520   ins_pipe( pipe_slow );
10521 %}
10522 
10523 // --------------------------------- FMA --------------------------------------
10524 
10525 // a * b + c
10526 instruct vfma2D_reg(vecX a, vecX b, vecX c) %{
10527   predicate(UseFMA && n->as_Vector()->length() == 2);
10528   match(Set c (FmaVD  c (Binary a b)));
10529   format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed2D" %}
10530   ins_cost(150);
10531   ins_encode %{
10532     int vector_len = 0;
10533     __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
10534   %}
10535   ins_pipe( pipe_slow );
10536 %}
10537 
10538 // a * b + c
10539 instruct vfma2D_mem(vecX a, memory b, vecX c) %{
10540   predicate(UseFMA && n->as_Vector()->length() == 2);
10541   match(Set c (FmaVD  c (Binary a (LoadVector b))));
10542   format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed2D" %}
10543   ins_cost(150);
10544   ins_encode %{
10545     int vector_len = 0;
10546     __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
10547   %}
10548   ins_pipe( pipe_slow );
10549 %}
10550 
10551 
10552 // a * b + c
10553 instruct vfma4D_reg(vecY a, vecY b, vecY c) %{
10554   predicate(UseFMA && n->as_Vector()->length() == 4);
10555   match(Set c (FmaVD  c (Binary a b)));
10556   format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed4D" %}
10557   ins_cost(150);
10558   ins_encode %{
10559     int vector_len = 1;
10560     __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
10561   %}
10562   ins_pipe( pipe_slow );
10563 %}
10564 
10565 // a * b + c
10566 instruct vfma4D_mem(vecY a, memory b, vecY c) %{
10567   predicate(UseFMA && n->as_Vector()->length() == 4);
10568   match(Set c (FmaVD  c (Binary a (LoadVector b))));
10569   format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed4D" %}
10570   ins_cost(150);
10571   ins_encode %{
10572     int vector_len = 1;
10573     __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
10574   %}
10575   ins_pipe( pipe_slow );
10576 %}
10577 
10578 // a * b + c
10579 instruct vfma8D_reg(vecZ a, vecZ b, vecZ c) %{
10580   predicate(UseFMA && n->as_Vector()->length() == 8);
10581   match(Set c (FmaVD  c (Binary a b)));
10582   format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed8D" %}
10583   ins_cost(150);
10584   ins_encode %{
10585     int vector_len = 2;
10586     __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
10587   %}
10588   ins_pipe( pipe_slow );
10589 %}
10590 
10591 // a * b + c
10592 instruct vfma8D_mem(vecZ a, memory b, vecZ c) %{
10593   predicate(UseFMA && n->as_Vector()->length() == 8);
10594   match(Set c (FmaVD  c (Binary a (LoadVector b))));
10595   format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed8D" %}
10596   ins_cost(150);
10597   ins_encode %{
10598     int vector_len = 2;
10599     __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
10600   %}
10601   ins_pipe( pipe_slow );
10602 %}
10603 
10604 // a * b + c
10605 instruct vfma4F_reg(vecX a, vecX b, vecX c) %{
10606   predicate(UseFMA && n->as_Vector()->length() == 4);
10607   match(Set c (FmaVF  c (Binary a b)));
10608   format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed4F" %}
10609   ins_cost(150);
10610   ins_encode %{
10611     int vector_len = 0;
10612     __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
10613   %}
10614   ins_pipe( pipe_slow );
10615 %}
10616 
10617 // a * b + c
10618 instruct vfma4F_mem(vecX a, memory b, vecX c) %{
10619   predicate(UseFMA && n->as_Vector()->length() == 4);
10620   match(Set c (FmaVF  c (Binary a (LoadVector b))));
10621   format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed4F" %}
10622   ins_cost(150);
10623   ins_encode %{
10624     int vector_len = 0;
10625     __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
10626   %}
10627   ins_pipe( pipe_slow );
10628 %}
10629 
10630 // a * b + c
10631 instruct vfma8F_reg(vecY a, vecY b, vecY c) %{
10632   predicate(UseFMA && n->as_Vector()->length() == 8);
10633   match(Set c (FmaVF  c (Binary a b)));
10634   format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed8F" %}
10635   ins_cost(150);
10636   ins_encode %{
10637     int vector_len = 1;
10638     __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
10639   %}
10640   ins_pipe( pipe_slow );
10641 %}
10642 
10643 // a * b + c
10644 instruct vfma8F_mem(vecY a, memory b, vecY c) %{
10645   predicate(UseFMA && n->as_Vector()->length() == 8);
10646   match(Set c (FmaVF  c (Binary a (LoadVector b))));
10647   format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed8F" %}
10648   ins_cost(150);
10649   ins_encode %{
10650     int vector_len = 1;
10651     __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
10652   %}
10653   ins_pipe( pipe_slow );
10654 %}
10655 
10656 // a * b + c
10657 instruct vfma16F_reg(vecZ a, vecZ b, vecZ c) %{
10658   predicate(UseFMA && n->as_Vector()->length() == 16);
10659   match(Set c (FmaVF  c (Binary a b)));
10660   format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed16F" %}
10661   ins_cost(150);
10662   ins_encode %{
10663     int vector_len = 2;
10664     __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
10665   %}
10666   ins_pipe( pipe_slow );
10667 %}
10668 
10669 // a * b + c
10670 instruct vfma16F_mem(vecZ a, memory b, vecZ c) %{
10671   predicate(UseFMA && n->as_Vector()->length() == 16);
10672   match(Set c (FmaVF  c (Binary a (LoadVector b))));
10673   format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed16F" %}
10674   ins_cost(150);
10675   ins_encode %{
10676     int vector_len = 2;
10677     __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
10678   %}
10679   ins_pipe( pipe_slow );
10680 %}
< prev index next >