< prev index next >

src/hotspot/cpu/aarch64/aarch64_ad.m4

Print this page
rev 61244 : Refactor vector operations in aarch64 backend
Summary: move all vector operations which are not in jdk master to
aarch64_neon_ad.m4 and place generated instructions to the end of aarch64.ad.
This change is to minimize conflict when merging Vector API to jdk
master. In reduction operations, identify scalar/vector input as isrc/
vsrc to make code clear. Jdk master also uses such naming style.
   1 dnl Copyright (c) 2014, Red Hat Inc. All rights reserved.
   2 dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   3 dnl
   4 dnl This code is free software; you can redistribute it and/or modify it
   5 dnl under the terms of the GNU General Public License version 2 only, as
   6 dnl published by the Free Software Foundation.
   7 dnl
   8 dnl This code is distributed in the hope that it will be useful, but WITHOUT
   9 dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 dnl FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11 dnl version 2 for more details (a copy is included in the LICENSE file that
  12 dnl accompanied this code).
  13 dnl
  14 dnl You should have received a copy of the GNU General Public License version
  15 dnl 2 along with this work; if not, write to the Free Software Foundation,
  16 dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17 dnl
  18 dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19 dnl or visit www.oracle.com if you need additional information or have any
  20 dnl questions.
  21 dnl
  22 dnl 
  23 dnl Process this file with m4 aarch64_ad.m4 to generate the arithmetic
  24 dnl and shift patterns patterns used in aarch64.ad.


  25 dnl
  26 // BEGIN This section of the file is automatically generated. Do not edit --------------

  27 dnl
  28 define(`ORL2I', `ifelse($1,I,orL2I)')
  29 dnl
  30 define(`BASE_SHIFT_INSN',
  31 `
  32 instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
  33                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
  34                          immI src3, rFlagsReg cr) %{
  35   match(Set dst ($2$1 src1 ($4$1 src2 src3)));
  36 
  37   ins_cost(1.9 * INSN_COST);
  38   format %{ "$3  $dst, $src1, $src2, $5 $src3" %}
  39 
  40   ins_encode %{
  41     __ $3(as_Register($dst$$reg),
  42               as_Register($src1$$reg),
  43               as_Register($src2$$reg),
  44               Assembler::$5,
  45               $src3$$constant & ifelse($1,I,0x1f,0x3f));
  46   %}


 216 %}
 217 
 218 define(`UBFIZ_INSN',
 219 // We can use ubfiz when masking by a positive number and then left shifting the result.
 220 // We know that the mask is positive because imm$1_bitmask guarantees it.
 221 `instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
 222 %{
 223   match(Set dst (LShift$1 (And$1 src mask) lshift));
 224   predicate((exact_log2$5(n->in(1)->in(2)->get_$4() + 1) + (n->in(2)->get_int() & $3)) <= ($3 + 1));
 225 
 226   ins_cost(INSN_COST);
 227   format %{ "$2 $dst, $src, $lshift, $mask" %}
 228   ins_encode %{
 229     int lshift = $lshift$$constant & $3;
 230     long mask = $mask$$constant;
 231     int width = exact_log2$5(mask+1);
 232     __ $2(as_Register($dst$$reg),
 233           as_Register($src$$reg), lshift, width);
 234   %}
 235   ins_pipe(ialu_reg_shift);
 236 %}')
 237 UBFIZ_INSN(I, ubfizw, 31, int)
 238 UBFIZ_INSN(L, ubfiz,  63, long, _long)
 239 
 240 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
 241 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
 242 %{
 243   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
 244   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
 245 
 246   ins_cost(INSN_COST);
 247   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
 248   ins_encode %{
 249     int lshift = $lshift$$constant & 63;
 250     long mask = $mask$$constant;
 251     int width = exact_log2(mask+1);
 252     __ ubfiz(as_Register($dst$$reg),
 253              as_Register($src$$reg), lshift, width);
 254   %}
 255   ins_pipe(ialu_reg_shift);
 256 %}


   1 dnl Copyright (c) 2019, 2020, Red Hat Inc. All rights reserved.
   2 dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   3 dnl
   4 dnl This code is free software; you can redistribute it and/or modify it
   5 dnl under the terms of the GNU General Public License version 2 only, as
   6 dnl published by the Free Software Foundation.
   7 dnl
   8 dnl This code is distributed in the hope that it will be useful, but WITHOUT
   9 dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 dnl FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11 dnl version 2 for more details (a copy is included in the LICENSE file that
  12 dnl accompanied this code).
  13 dnl
  14 dnl You should have received a copy of the GNU General Public License version
  15 dnl 2 along with this work; if not, write to the Free Software Foundation,
  16 dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17 dnl
  18 dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19 dnl or visit www.oracle.com if you need additional information or have any
  20 dnl questions.
  21 dnl
  22 dnl
  23 dnl Process this file with m4 aarch64_ad.m4 to generate instructions used in
  24 dnl aarch64.ad:
  25 dnl 1. the arithmetic
  26 dnl 2. shift patterns
  27 dnl
  28 // BEGIN This section of the file is automatically generated. Do not edit --------------
  29 // This section is generated from aarch64_ad.m4
  30 dnl
  31 define(`ORL2I', `ifelse($1,I,orL2I)')
  32 dnl
  33 define(`BASE_SHIFT_INSN',
  34 `
  35 instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
  36                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
  37                          immI src3, rFlagsReg cr) %{
  38   match(Set dst ($2$1 src1 ($4$1 src2 src3)));
  39 
  40   ins_cost(1.9 * INSN_COST);
  41   format %{ "$3  $dst, $src1, $src2, $5 $src3" %}
  42 
  43   ins_encode %{
  44     __ $3(as_Register($dst$$reg),
  45               as_Register($src1$$reg),
  46               as_Register($src2$$reg),
  47               Assembler::$5,
  48               $src3$$constant & ifelse($1,I,0x1f,0x3f));
  49   %}


 219 %}
 220 
 221 define(`UBFIZ_INSN',
 222 // We can use ubfiz when masking by a positive number and then left shifting the result.
 223 // We know that the mask is positive because imm$1_bitmask guarantees it.
 224 `instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
 225 %{
 226   match(Set dst (LShift$1 (And$1 src mask) lshift));
 227   predicate((exact_log2$5(n->in(1)->in(2)->get_$4() + 1) + (n->in(2)->get_int() & $3)) <= ($3 + 1));
 228 
 229   ins_cost(INSN_COST);
 230   format %{ "$2 $dst, $src, $lshift, $mask" %}
 231   ins_encode %{
 232     int lshift = $lshift$$constant & $3;
 233     long mask = $mask$$constant;
 234     int width = exact_log2$5(mask+1);
 235     __ $2(as_Register($dst$$reg),
 236           as_Register($src$$reg), lshift, width);
 237   %}
 238   ins_pipe(ialu_reg_shift);
 239 %}')dnl
 240 UBFIZ_INSN(I, ubfizw, 31, int)
 241 UBFIZ_INSN(L, ubfiz,  63, long, _long)
 242 
 243 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
 244 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
 245 %{
 246   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
 247   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
 248 
 249   ins_cost(INSN_COST);
 250   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
 251   ins_encode %{
 252     int lshift = $lshift$$constant & 63;
 253     long mask = $mask$$constant;
 254     int width = exact_log2(mask+1);
 255     __ ubfiz(as_Register($dst$$reg),
 256              as_Register($src$$reg), lshift, width);
 257   %}
 258   ins_pipe(ialu_reg_shift);
 259 %}


< prev index next >