1 dnl Copyright (c) 2019, 2020, Red Hat Inc. All rights reserved.
   2 dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   3 dnl
   4 dnl This code is free software; you can redistribute it and/or modify it
   5 dnl under the terms of the GNU General Public License version 2 only, as
   6 dnl published by the Free Software Foundation.
   7 dnl
   8 dnl This code is distributed in the hope that it will be useful, but WITHOUT
   9 dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 dnl FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11 dnl version 2 for more details (a copy is included in the LICENSE file that
  12 dnl accompanied this code).
  13 dnl
  14 dnl You should have received a copy of the GNU General Public License version
  15 dnl 2 along with this work; if not, write to the Free Software Foundation,
  16 dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17 dnl
  18 dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19 dnl or visit www.oracle.com if you need additional information or have any
  20 dnl questions.
  21 dnl
  22 dnl
  23 dnl Process this file with m4 aarch64_ad.m4 to generate instructions used in
  24 dnl aarch64.ad:
  25 dnl 1. the arithmetic
  26 dnl 2. shift patterns
  27 dnl
  28 // BEGIN This section of the file is automatically generated. Do not edit --------------
  29 // This section is generated from aarch64_ad.m4
  30 dnl
  31 define(`ORL2I', `ifelse($1,I,orL2I)')
  32 dnl
  33 define(`BASE_SHIFT_INSN',
  34 `
  35 instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
  36                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
  37                          immI src3, rFlagsReg cr) %{
  38   match(Set dst ($2$1 src1 ($4$1 src2 src3)));
  39 
  40   ins_cost(1.9 * INSN_COST);
  41   format %{ "$3  $dst, $src1, $src2, $5 $src3" %}
  42 
  43   ins_encode %{
  44     __ $3(as_Register($dst$$reg),
  45               as_Register($src1$$reg),
  46               as_Register($src2$$reg),
  47               Assembler::$5,
  48               $src3$$constant & ifelse($1,I,0x1f,0x3f));
  49   %}
  50 
  51   ins_pipe(ialu_reg_reg_shift);
  52 %}')dnl
  53 define(`BASE_INVERTED_INSN',
  54 `
  55 instruct $2$1_reg_not_reg(iReg$1NoSp dst,
  56                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1,
  57                          rFlagsReg cr) %{
  58 dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
  59 dnl into this canonical form.
  60   ifelse($2,Xor,
  61     match(Set dst (Xor$1 m1 (Xor$1 src2 src1)));,
  62     match(Set dst ($2$1 src1 (Xor$1 src2 m1)));)
  63   ins_cost(INSN_COST);
  64   format %{ "$3  $dst, $src1, $src2" %}
  65 
  66   ins_encode %{
  67     __ $3(as_Register($dst$$reg),
  68               as_Register($src1$$reg),
  69               as_Register($src2$$reg),
  70               Assembler::LSL, 0);
  71   %}
  72 
  73   ins_pipe(ialu_reg_reg);
  74 %}')dnl
  75 define(`INVERTED_SHIFT_INSN',
  76 `
  77 instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst,
  78                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
  79                          immI src3, imm$1_M1 src4, rFlagsReg cr) %{
  80 dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
  81 dnl into this canonical form.
  82   ifelse($2,Xor,
  83     match(Set dst ($2$1 src4 (Xor$1($4$1 src2 src3) src1)));,
  84     match(Set dst ($2$1 src1 (Xor$1($4$1 src2 src3) src4)));)
  85   ins_cost(1.9 * INSN_COST);
  86   format %{ "$3  $dst, $src1, $src2, $5 $src3" %}
  87 
  88   ins_encode %{
  89     __ $3(as_Register($dst$$reg),
  90               as_Register($src1$$reg),
  91               as_Register($src2$$reg),
  92               Assembler::$5,
  93               $src3$$constant & ifelse($1,I,0x1f,0x3f));
  94   %}
  95 
  96   ins_pipe(ialu_reg_reg_shift);
  97 %}')dnl
  98 define(`NOT_INSN',
  99 `instruct reg$1_not_reg(iReg$1NoSp dst,
 100                          iReg$1`'ORL2I($1) src1, imm$1_M1 m1,
 101                          rFlagsReg cr) %{
 102   match(Set dst (Xor$1 src1 m1));
 103   ins_cost(INSN_COST);
 104   format %{ "$2  $dst, $src1, zr" %}
 105 
 106   ins_encode %{
 107     __ $2(as_Register($dst$$reg),
 108               as_Register($src1$$reg),
 109               zr,
 110               Assembler::LSL, 0);
 111   %}
 112 
 113   ins_pipe(ialu_reg);
 114 %}')dnl
 115 dnl
 116 define(`BOTH_SHIFT_INSNS',
 117 `BASE_SHIFT_INSN(I, $1, ifelse($2,andr,andw,$2w), $3, $4)
 118 BASE_SHIFT_INSN(L, $1, $2, $3, $4)')dnl
 119 dnl
 120 define(`BOTH_INVERTED_INSNS',
 121 `BASE_INVERTED_INSN(I, $1, $2w, $3, $4)
 122 BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl
 123 dnl
 124 define(`BOTH_INVERTED_SHIFT_INSNS',
 125 `INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4, ~0, int)
 126 INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, long)')dnl
 127 dnl
 128 define(`ALL_SHIFT_KINDS',
 129 `BOTH_SHIFT_INSNS($1, $2, URShift, LSR)
 130 BOTH_SHIFT_INSNS($1, $2, RShift, ASR)
 131 BOTH_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
 132 dnl
 133 define(`ALL_INVERTED_SHIFT_KINDS',
 134 `BOTH_INVERTED_SHIFT_INSNS($1, $2, URShift, LSR)
 135 BOTH_INVERTED_SHIFT_INSNS($1, $2, RShift, ASR)
 136 BOTH_INVERTED_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
 137 dnl
 138 NOT_INSN(L, eon)
 139 NOT_INSN(I, eonw)
 140 BOTH_INVERTED_INSNS(And, bic)
 141 BOTH_INVERTED_INSNS(Or, orn)
 142 BOTH_INVERTED_INSNS(Xor, eon)
 143 ALL_INVERTED_SHIFT_KINDS(And, bic)
 144 ALL_INVERTED_SHIFT_KINDS(Xor, eon)
 145 ALL_INVERTED_SHIFT_KINDS(Or, orn)
 146 ALL_SHIFT_KINDS(And, andr)
 147 ALL_SHIFT_KINDS(Xor, eor)
 148 ALL_SHIFT_KINDS(Or, orr)
 149 ALL_SHIFT_KINDS(Add, add)
 150 ALL_SHIFT_KINDS(Sub, sub)
 151 dnl
 152 dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count
 153 define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)')
 154 define(`BFM_INSN',`
 155 // Shift Left followed by Shift Right.
 156 // This idiom is used by the compiler for the i2b bytecode etc.
 157 instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rshift_count)
 158 %{
 159   match(Set dst EXTEND($1, $3, src, lshift_count, rshift_count));
 160   ins_cost(INSN_COST * 2);
 161   format %{ "$4  $dst, $src, $rshift_count - $lshift_count, #$2 - $lshift_count" %}
 162   ins_encode %{
 163     int lshift = $lshift_count$$constant & $2;
 164     int rshift = $rshift_count$$constant & $2;
 165     int s = $2 - lshift;
 166     int r = (rshift - lshift) & $2;
 167     __ $4(as_Register($dst$$reg),
 168             as_Register($src$$reg),
 169             r, s);
 170   %}
 171 
 172   ins_pipe(ialu_reg_shift);
 173 %}')
 174 BFM_INSN(L, 63, RShift, sbfm)
 175 BFM_INSN(I, 31, RShift, sbfmw)
 176 BFM_INSN(L, 63, URShift, ubfm)
 177 BFM_INSN(I, 31, URShift, ubfmw)
 178 dnl
 179 // Bitfield extract with shift & mask
 180 define(`BFX_INSN',
 181 `instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
 182 %{
 183   match(Set dst (And$1 ($2$1 src rshift) mask));
 184   // Make sure we are not going to exceed what $3 can do.
 185   predicate((exact_log2$6(n->in(2)->get_$5() + 1) + (n->in(1)->in(2)->get_int() & $4)) <= ($4 + 1));
 186 
 187   ins_cost(INSN_COST);
 188   format %{ "$3 $dst, $src, $rshift, $mask" %}
 189   ins_encode %{
 190     int rshift = $rshift$$constant & $4;
 191     long mask = $mask$$constant;
 192     int width = exact_log2$6(mask+1);
 193     __ $3(as_Register($dst$$reg),
 194             as_Register($src$$reg), rshift, width);
 195   %}
 196   ins_pipe(ialu_reg_shift);
 197 %}')
 198 BFX_INSN(I, URShift, ubfxw, 31, int)
 199 BFX_INSN(L, URShift, ubfx,  63, long, _long)
 200 
 201 // We can use ubfx when extending an And with a mask when we know mask
 202 // is positive.  We know that because immI_bitmask guarantees it.
 203 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
 204 %{
 205   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
 206   // Make sure we are not going to exceed what ubfxw can do.
 207   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
 208 
 209   ins_cost(INSN_COST * 2);
 210   format %{ "ubfx $dst, $src, $rshift, $mask" %}
 211   ins_encode %{
 212     int rshift = $rshift$$constant & 31;
 213     long mask = $mask$$constant;
 214     int width = exact_log2(mask+1);
 215     __ ubfx(as_Register($dst$$reg),
 216             as_Register($src$$reg), rshift, width);
 217   %}
 218   ins_pipe(ialu_reg_shift);
 219 %}
 220 
 221 define(`UBFIZ_INSN',
 222 // We can use ubfiz when masking by a positive number and then left shifting the result.
 223 // We know that the mask is positive because imm$1_bitmask guarantees it.
 224 `instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
 225 %{
 226   match(Set dst (LShift$1 (And$1 src mask) lshift));
 227   predicate((exact_log2$5(n->in(1)->in(2)->get_$4() + 1) + (n->in(2)->get_int() & $3)) <= ($3 + 1));
 228 
 229   ins_cost(INSN_COST);
 230   format %{ "$2 $dst, $src, $lshift, $mask" %}
 231   ins_encode %{
 232     int lshift = $lshift$$constant & $3;
 233     long mask = $mask$$constant;
 234     int width = exact_log2$5(mask+1);
 235     __ $2(as_Register($dst$$reg),
 236           as_Register($src$$reg), lshift, width);
 237   %}
 238   ins_pipe(ialu_reg_shift);
 239 %}')dnl
 240 UBFIZ_INSN(I, ubfizw, 31, int)
 241 UBFIZ_INSN(L, ubfiz,  63, long, _long)
 242 
 243 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
 244 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
 245 %{
 246   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
 247   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
 248 
 249   ins_cost(INSN_COST);
 250   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
 251   ins_encode %{
 252     int lshift = $lshift$$constant & 63;
 253     long mask = $mask$$constant;
 254     int width = exact_log2(mask+1);
 255     __ ubfiz(as_Register($dst$$reg),
 256              as_Register($src$$reg), lshift, width);
 257   %}
 258   ins_pipe(ialu_reg_shift);
 259 %}
 260 
 261 // Rotations
 262 
 263 define(`EXTRACT_INSN',
 264 `instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr)
 265 %{
 266   match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift)));
 267   predicate(0 == (((n->in(1)->in(2)->get_int() & $2) + (n->in(2)->in(2)->get_int() & $2)) & $2));
 268 
 269   ins_cost(INSN_COST);
 270   format %{ "extr $dst, $src1, $src2, #$rshift" %}
 271 
 272   ins_encode %{
 273     __ $4(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
 274             $rshift$$constant & $2);
 275   %}
 276   ins_pipe(ialu_reg_reg_extr);
 277 %}
 278 ')dnl
 279 EXTRACT_INSN(L, 63, Or, extr)
 280 EXTRACT_INSN(I, 31, Or, extrw)
 281 EXTRACT_INSN(L, 63, Add, extr)
 282 EXTRACT_INSN(I, 31, Add, extrw)
 283 define(`ROL_EXPAND', `
 284 // $2 expander
 285 
 286 instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
 287 %{
 288   effect(DEF dst, USE src, USE shift);
 289 
 290   format %{ "$2    $dst, $src, $shift" %}
 291   ins_cost(INSN_COST * 3);
 292   ins_encode %{
 293     __ subw(rscratch1, zr, as_Register($shift$$reg));
 294     __ $3(as_Register($dst$$reg), as_Register($src$$reg),
 295             rscratch1);
 296     %}
 297   ins_pipe(ialu_reg_reg_vshift);
 298 %}')dnl
 299 define(`ROR_EXPAND', `
 300 // $2 expander
 301 
 302 instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
 303 %{
 304   effect(DEF dst, USE src, USE shift);
 305 
 306   format %{ "$2    $dst, $src, $shift" %}
 307   ins_cost(INSN_COST);
 308   ins_encode %{
 309     __ $3(as_Register($dst$$reg), as_Register($src$$reg),
 310             as_Register($shift$$reg));
 311     %}
 312   ins_pipe(ialu_reg_reg_vshift);
 313 %}')dnl
 314 define(ROL_INSN, `
 315 instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
 316 %{
 317   match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift))));
 318 
 319   expand %{
 320     $3$1_rReg(dst, src, shift, cr);
 321   %}
 322 %}')dnl
 323 define(ROR_INSN, `
 324 instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
 325 %{
 326   match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift))));
 327 
 328   expand %{
 329     $3$1_rReg(dst, src, shift, cr);
 330   %}
 331 %}')dnl
 332 ROL_EXPAND(L, rol, rorv)
 333 ROL_EXPAND(I, rol, rorvw)
 334 ROL_INSN(L, _64, rol)
 335 ROL_INSN(L, 0, rol)
 336 ROL_INSN(I, _32, rol)
 337 ROL_INSN(I, 0, rol)
 338 ROR_EXPAND(L, ror, rorv)
 339 ROR_EXPAND(I, ror, rorvw)
 340 ROR_INSN(L, _64, ror)
 341 ROR_INSN(L, 0, ror)
 342 ROR_INSN(I, _32, ror)
 343 ROR_INSN(I, 0, ror)
 344 
 345 // Add/subtract (extended)
 346 dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize
 347 define(`ADD_SUB_CONV', `
 348 instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2, rFlagsReg cr)
 349 %{
 350   match(Set dst ($3$2 src1 (ConvI2L src2)));
 351   ins_cost(INSN_COST);
 352   format %{ "$4  $dst, $src1, $src2, $5" %}
 353 
 354    ins_encode %{
 355      __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
 356             as_Register($src2$$reg), ext::$5);
 357    %}
 358   ins_pipe(ialu_reg_reg);
 359 %}')dnl
 360 ADD_SUB_CONV(I,L,Add,add,sxtw);
 361 ADD_SUB_CONV(I,L,Sub,sub,sxtw);
 362 dnl
 363 define(`ADD_SUB_EXTENDED', `
 364 instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr)
 365 %{
 366   match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
 367   ins_cost(INSN_COST);
 368   format %{ "$5  $dst, $src1, $src2, $6" %}
 369 
 370    ins_encode %{
 371      __ $5(as_Register($dst$$reg), as_Register($src1$$reg),
 372             as_Register($src2$$reg), ext::$6);
 373    %}
 374   ins_pipe(ialu_reg_reg);
 375 %}')
 376 ADD_SUB_EXTENDED(I,16,Add,RShift,add,sxth,32)
 377 ADD_SUB_EXTENDED(I,8,Add,RShift,add,sxtb,32)
 378 ADD_SUB_EXTENDED(I,8,Add,URShift,add,uxtb,32)
 379 ADD_SUB_EXTENDED(L,16,Add,RShift,add,sxth,64)
 380 ADD_SUB_EXTENDED(L,32,Add,RShift,add,sxtw,64)
 381 ADD_SUB_EXTENDED(L,8,Add,RShift,add,sxtb,64)
 382 ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64)
 383 dnl
 384 dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type)
 385 define(`ADD_SUB_ZERO_EXTEND', `
 386 instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, rFlagsReg cr)
 387 %{
 388   match(Set dst ($3$1 src1 (And$1 src2 mask)));
 389   ins_cost(INSN_COST);
 390   format %{ "$4  $dst, $src1, $src2, $5" %}
 391 
 392    ins_encode %{
 393      __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
 394             as_Register($src2$$reg), ext::$5);
 395    %}
 396   ins_pipe(ialu_reg_reg);
 397 %}')
 398 dnl
 399 ADD_SUB_ZERO_EXTEND(I,255,Add,addw,uxtb)
 400 ADD_SUB_ZERO_EXTEND(I,65535,Add,addw,uxth)
 401 ADD_SUB_ZERO_EXTEND(L,255,Add,add,uxtb)
 402 ADD_SUB_ZERO_EXTEND(L,65535,Add,add,uxth)
 403 ADD_SUB_ZERO_EXTEND(L,4294967295,Add,add,uxtw)
 404 dnl
 405 ADD_SUB_ZERO_EXTEND(I,255,Sub,subw,uxtb)
 406 ADD_SUB_ZERO_EXTEND(I,65535,Sub,subw,uxth)
 407 ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb)
 408 ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth)
 409 ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw)
 410 dnl
 411 dnl ADD_SUB_ZERO_EXTEND_SHIFT(mode, size, add node, insn, ext type)
 412 define(`ADD_SUB_EXTENDED_SHIFT', `
 413 instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immIExt lshift2, immI_`'eval($7-$2) lshift1, immI_`'eval($7-$2) rshift1, rFlagsReg cr)
 414 %{
 415   match(Set dst ($3$1 src1 (LShift$1 EXTEND($1, $4, src2, lshift1, rshift1) lshift2)));
 416   ins_cost(1.9 * INSN_COST);
 417   format %{ "$5  $dst, $src1, $src2, $6 #lshift2" %}
 418 
 419    ins_encode %{
 420      __ $5(as_Register($dst$$reg), as_Register($src1$$reg),
 421             as_Register($src2$$reg), ext::$6, ($lshift2$$constant));
 422    %}
 423   ins_pipe(ialu_reg_reg_shift);
 424 %}')
 425 dnl                   $1 $2 $3   $4   $5   $6  $7
 426 ADD_SUB_EXTENDED_SHIFT(L,8,Add,RShift,add,sxtb,64)
 427 ADD_SUB_EXTENDED_SHIFT(L,16,Add,RShift,add,sxth,64)
 428 ADD_SUB_EXTENDED_SHIFT(L,32,Add,RShift,add,sxtw,64)
 429 dnl
 430 ADD_SUB_EXTENDED_SHIFT(L,8,Sub,RShift,sub,sxtb,64)
 431 ADD_SUB_EXTENDED_SHIFT(L,16,Sub,RShift,sub,sxth,64)
 432 ADD_SUB_EXTENDED_SHIFT(L,32,Sub,RShift,sub,sxtw,64)
 433 dnl
 434 ADD_SUB_EXTENDED_SHIFT(I,8,Add,RShift,addw,sxtb,32)
 435 ADD_SUB_EXTENDED_SHIFT(I,16,Add,RShift,addw,sxth,32)
 436 dnl
 437 ADD_SUB_EXTENDED_SHIFT(I,8,Sub,RShift,subw,sxtb,32)
 438 ADD_SUB_EXTENDED_SHIFT(I,16,Sub,RShift,subw,sxth,32)
 439 dnl
 440 dnl ADD_SUB_CONV_SHIFT(mode, add node, insn, ext type)
 441 define(`ADD_SUB_CONV_SHIFT', `
 442 instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
 443 %{
 444   match(Set dst ($2$1 src1 (LShiftL (ConvI2L src2) lshift)));
 445   ins_cost(1.9 * INSN_COST);
 446   format %{ "$3  $dst, $src1, $src2, $4 #lshift" %}
 447 
 448    ins_encode %{
 449      __ $3(as_Register($dst$$reg), as_Register($src1$$reg),
 450             as_Register($src2$$reg), ext::$4, ($lshift$$constant));
 451    %}
 452   ins_pipe(ialu_reg_reg_shift);
 453 %}')
 454 dnl
 455 ADD_SUB_CONV_SHIFT(L,Add,add,sxtw);
 456 ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw);
 457 dnl
 458 dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, ext type)
 459 define(`ADD_SUB_ZERO_EXTEND_SHIFT', `
 460 instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, immIExt lshift, rFlagsReg cr)
 461 %{
 462   match(Set dst ($3$1 src1 (LShift$1 (And$1 src2 mask) lshift)));
 463   ins_cost(1.9 * INSN_COST);
 464   format %{ "$4  $dst, $src1, $src2, $5 #lshift" %}
 465 
 466    ins_encode %{
 467      __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
 468             as_Register($src2$$reg), ext::$5, ($lshift$$constant));
 469    %}
 470   ins_pipe(ialu_reg_reg_shift);
 471 %}')
 472 dnl
 473 dnl                       $1 $2  $3  $4  $5
 474 ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Add,add,uxtb)
 475 ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Add,add,uxth)
 476 ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Add,add,uxtw)
 477 dnl
 478 ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Sub,sub,uxtb)
 479 ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Sub,sub,uxth)
 480 ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Sub,sub,uxtw)
 481 dnl
 482 ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Add,addw,uxtb)
 483 ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Add,addw,uxth)
 484 dnl
 485 ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Sub,subw,uxtb)
 486 ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Sub,subw,uxth)
 487 dnl
 488 // END This section of the file is automatically generated. Do not edit --------------