1 dnl Copyright (c) 2014, Red Hat Inc. All rights reserved. 2 dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 dnl 4 dnl This code is free software; you can redistribute it and/or modify it 5 dnl under the terms of the GNU General Public License version 2 only, as 6 dnl published by the Free Software Foundation. 7 dnl 8 dnl This code is distributed in the hope that it will be useful, but WITHOUT 9 dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 dnl FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 dnl version 2 for more details (a copy is included in the LICENSE file that 12 dnl accompanied this code). 13 dnl 14 dnl You should have received a copy of the GNU General Public License version 15 dnl 2 along with this work; if not, write to the Free Software Foundation, 16 dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 dnl 18 dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 dnl or visit www.oracle.com if you need additional information or have any 20 dnl questions. 21 dnl 22 dnl 23 dnl Process this file with m4 aarch64_ad.m4 to generate the arithmetic 24 dnl and shift patterns patterns used in aarch64.ad. 25 dnl 26 // BEGIN This section of the file is automatically generated. Do not edit -------------- 27 dnl 28 define(`ORL2I', `ifelse($1,I,orL2I)') 29 dnl 30 define(`BASE_SHIFT_INSN', 31 ` 32 instruct $2$1_reg_$4_reg(iReg$1NoSp dst, 33 iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, 34 immI src3, rFlagsReg cr) %{ 35 match(Set dst ($2$1 src1 ($4$1 src2 src3))); 36 37 ins_cost(1.9 * INSN_COST); 38 format %{ "$3 $dst, $src1, $src2, $5 $src3" %} 39 40 ins_encode %{ 41 __ $3(as_Register($dst$$reg), 42 as_Register($src1$$reg), 43 as_Register($src2$$reg), 44 Assembler::$5, 45 $src3$$constant & ifelse($1,I,0x1f,0x3f)); 46 %} 47 48 ins_pipe(ialu_reg_reg_shift); 49 %}')dnl 50 define(`BASE_INVERTED_INSN', 51 ` 52 instruct $2$1_reg_not_reg(iReg$1NoSp dst, 53 iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1, 54 rFlagsReg cr) %{ 55 dnl This ifelse is because hotspot reassociates (xor (xor ..)..) 56 dnl into this canonical form. 57 ifelse($2,Xor, 58 match(Set dst (Xor$1 m1 (Xor$1 src2 src1)));, 59 match(Set dst ($2$1 src1 (Xor$1 src2 m1)));) 60 ins_cost(INSN_COST); 61 format %{ "$3 $dst, $src1, $src2" %} 62 63 ins_encode %{ 64 __ $3(as_Register($dst$$reg), 65 as_Register($src1$$reg), 66 as_Register($src2$$reg), 67 Assembler::LSL, 0); 68 %} 69 70 ins_pipe(ialu_reg_reg); 71 %}')dnl 72 define(`INVERTED_SHIFT_INSN', 73 ` 74 instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst, 75 iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, 76 immI src3, imm$1_M1 src4, rFlagsReg cr) %{ 77 dnl This ifelse is because hotspot reassociates (xor (xor ..)..) 78 dnl into this canonical form. 79 ifelse($2,Xor, 80 match(Set dst ($2$1 src4 (Xor$1($4$1 src2 src3) src1)));, 81 match(Set dst ($2$1 src1 (Xor$1($4$1 src2 src3) src4)));) 82 ins_cost(1.9 * INSN_COST); 83 format %{ "$3 $dst, $src1, $src2, $5 $src3" %} 84 85 ins_encode %{ 86 __ $3(as_Register($dst$$reg), 87 as_Register($src1$$reg), 88 as_Register($src2$$reg), 89 Assembler::$5, 90 $src3$$constant & ifelse($1,I,0x1f,0x3f)); 91 %} 92 93 ins_pipe(ialu_reg_reg_shift); 94 %}')dnl 95 define(`NOT_INSN', 96 `instruct reg$1_not_reg(iReg$1NoSp dst, 97 iReg$1`'ORL2I($1) src1, imm$1_M1 m1, 98 rFlagsReg cr) %{ 99 match(Set dst (Xor$1 src1 m1)); 100 ins_cost(INSN_COST); 101 format %{ "$2 $dst, $src1, zr" %} 102 103 ins_encode %{ 104 __ $2(as_Register($dst$$reg), 105 as_Register($src1$$reg), 106 zr, 107 Assembler::LSL, 0); 108 %} 109 110 ins_pipe(ialu_reg); 111 %}')dnl 112 dnl 113 define(`BOTH_SHIFT_INSNS', 114 `BASE_SHIFT_INSN(I, $1, ifelse($2,andr,andw,$2w), $3, $4) 115 BASE_SHIFT_INSN(L, $1, $2, $3, $4)')dnl 116 dnl 117 define(`BOTH_INVERTED_INSNS', 118 `BASE_INVERTED_INSN(I, $1, $2w, $3, $4) 119 BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl 120 dnl 121 define(`BOTH_INVERTED_SHIFT_INSNS', 122 `INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4, ~0, int) 123 INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, long)')dnl 124 dnl 125 define(`ALL_SHIFT_KINDS', 126 `BOTH_SHIFT_INSNS($1, $2, URShift, LSR) 127 BOTH_SHIFT_INSNS($1, $2, RShift, ASR) 128 BOTH_SHIFT_INSNS($1, $2, LShift, LSL)')dnl 129 dnl 130 define(`ALL_INVERTED_SHIFT_KINDS', 131 `BOTH_INVERTED_SHIFT_INSNS($1, $2, URShift, LSR) 132 BOTH_INVERTED_SHIFT_INSNS($1, $2, RShift, ASR) 133 BOTH_INVERTED_SHIFT_INSNS($1, $2, LShift, LSL)')dnl 134 dnl 135 NOT_INSN(L, eon) 136 NOT_INSN(I, eonw) 137 BOTH_INVERTED_INSNS(And, bic) 138 BOTH_INVERTED_INSNS(Or, orn) 139 BOTH_INVERTED_INSNS(Xor, eon) 140 ALL_INVERTED_SHIFT_KINDS(And, bic) 141 ALL_INVERTED_SHIFT_KINDS(Xor, eon) 142 ALL_INVERTED_SHIFT_KINDS(Or, orn) 143 ALL_SHIFT_KINDS(And, andr) 144 ALL_SHIFT_KINDS(Xor, eor) 145 ALL_SHIFT_KINDS(Or, orr) 146 ALL_SHIFT_KINDS(Add, add) 147 ALL_SHIFT_KINDS(Sub, sub) 148 dnl 149 dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count 150 define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)') 151 define(`BFM_INSN',` 152 // Shift Left followed by Shift Right. 153 // This idiom is used by the compiler for the i2b bytecode etc. 154 instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rshift_count) 155 %{ 156 match(Set dst EXTEND($1, $3, src, lshift_count, rshift_count)); 157 // Make sure we are not going to exceed what $4 can do. 158 predicate((unsigned int)n->in(2)->get_int() <= $2 159 && (unsigned int)n->in(1)->in(2)->get_int() <= $2); 160 161 ins_cost(INSN_COST * 2); 162 format %{ "$4 $dst, $src, $rshift_count - $lshift_count, #$2 - $lshift_count" %} 163 ins_encode %{ 164 int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant; 165 int s = $2 - lshift; 166 int r = (rshift - lshift) & $2; 167 __ $4(as_Register($dst$$reg), 168 as_Register($src$$reg), 169 r, s); 170 %} 171 172 ins_pipe(ialu_reg_shift); 173 %}') 174 BFM_INSN(L, 63, RShift, sbfm) 175 BFM_INSN(I, 31, RShift, sbfmw) 176 BFM_INSN(L, 63, URShift, ubfm) 177 BFM_INSN(I, 31, URShift, ubfmw) 178 dnl 179 // Bitfield extract with shift & mask 180 define(`BFX_INSN', 181 `instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask) 182 %{ 183 match(Set dst (And$1 ($2$1 src rshift) mask)); 184 // Make sure we are not going to exceed what $3 can do. 185 predicate((exact_log2$6(n->in(2)->get_$5() + 1) + (n->in(1)->in(2)->get_int() & $4)) <= ($4 + 1)); 186 187 ins_cost(INSN_COST); 188 format %{ "$3 $dst, $src, $mask" %} 189 ins_encode %{ 190 int rshift = $rshift$$constant & $4; 191 long mask = $mask$$constant; 192 int width = exact_log2$6(mask+1); 193 __ $3(as_Register($dst$$reg), 194 as_Register($src$$reg), rshift, width); 195 %} 196 ins_pipe(ialu_reg_shift); 197 %}') 198 BFX_INSN(I, URShift, ubfxw, 31, int) 199 BFX_INSN(L, URShift, ubfx, 63, long, _long) 200 201 // We can use ubfx when extending an And with a mask when we know mask 202 // is positive. We know that because immI_bitmask guarantees it. 203 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask) 204 %{ 205 match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask))); 206 // Make sure we are not going to exceed what ubfxw can do. 207 predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1)); 208 209 ins_cost(INSN_COST * 2); 210 format %{ "ubfx $dst, $src, $mask" %} 211 ins_encode %{ 212 int rshift = $rshift$$constant & 31; 213 long mask = $mask$$constant; 214 int width = exact_log2(mask+1); 215 __ ubfx(as_Register($dst$$reg), 216 as_Register($src$$reg), rshift, width); 217 %} 218 ins_pipe(ialu_reg_shift); 219 %} 220 221 // Rotations 222 223 define(`EXTRACT_INSN', 224 `instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr) 225 %{ 226 match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift))); 227 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & $2)); 228 229 ins_cost(INSN_COST); 230 format %{ "extr $dst, $src1, $src2, #$rshift" %} 231 232 ins_encode %{ 233 __ $4(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg), 234 $rshift$$constant & $2); 235 %} 236 ins_pipe(ialu_reg_reg_extr); 237 %} 238 ')dnl 239 EXTRACT_INSN(L, 63, Or, extr) 240 EXTRACT_INSN(I, 31, Or, extrw) 241 EXTRACT_INSN(L, 63, Add, extr) 242 EXTRACT_INSN(I, 31, Add, extrw) 243 define(`ROL_EXPAND', ` 244 // $2 expander 245 246 instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr) 247 %{ 248 effect(DEF dst, USE src, USE shift); 249 250 format %{ "$2 $dst, $src, $shift" %} 251 ins_cost(INSN_COST * 3); 252 ins_encode %{ 253 __ subw(rscratch1, zr, as_Register($shift$$reg)); 254 __ $3(as_Register($dst$$reg), as_Register($src$$reg), 255 rscratch1); 256 %} 257 ins_pipe(ialu_reg_reg_vshift); 258 %}')dnl 259 define(`ROR_EXPAND', ` 260 // $2 expander 261 262 instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr) 263 %{ 264 effect(DEF dst, USE src, USE shift); 265 266 format %{ "$2 $dst, $src, $shift" %} 267 ins_cost(INSN_COST); 268 ins_encode %{ 269 __ $3(as_Register($dst$$reg), as_Register($src$$reg), 270 as_Register($shift$$reg)); 271 %} 272 ins_pipe(ialu_reg_reg_vshift); 273 %}')dnl 274 define(ROL_INSN, ` 275 instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr) 276 %{ 277 match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift)))); 278 279 expand %{ 280 $3$1_rReg(dst, src, shift, cr); 281 %} 282 %}')dnl 283 define(ROR_INSN, ` 284 instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr) 285 %{ 286 match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift)))); 287 288 expand %{ 289 $3$1_rReg(dst, src, shift, cr); 290 %} 291 %}')dnl 292 ROL_EXPAND(L, rol, rorv) 293 ROL_EXPAND(I, rol, rorvw) 294 ROL_INSN(L, _64, rol) 295 ROL_INSN(L, 0, rol) 296 ROL_INSN(I, _32, rol) 297 ROL_INSN(I, 0, rol) 298 ROR_EXPAND(L, ror, rorv) 299 ROR_EXPAND(I, ror, rorvw) 300 ROR_INSN(L, _64, ror) 301 ROR_INSN(L, 0, ror) 302 ROR_INSN(I, _32, ror) 303 ROR_INSN(I, 0, ror) 304 305 // Add/subtract (extended) 306 dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize 307 define(`ADD_SUB_CONV', ` 308 instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2, rFlagsReg cr) 309 %{ 310 match(Set dst ($3$2 src1 (ConvI2L src2))); 311 ins_cost(INSN_COST); 312 format %{ "$4 $dst, $src1, $5 $src2" %} 313 314 ins_encode %{ 315 __ $4(as_Register($dst$$reg), as_Register($src1$$reg), 316 as_Register($src2$$reg), ext::$5); 317 %} 318 ins_pipe(ialu_reg_reg); 319 %}')dnl 320 ADD_SUB_CONV(I,L,Add,add,sxtw); 321 ADD_SUB_CONV(I,L,Sub,sub,sxtw); 322 dnl 323 define(`ADD_SUB_EXTENDED', ` 324 instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr) 325 %{ 326 match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift))); 327 ins_cost(INSN_COST); 328 format %{ "$5 $dst, $src1, $6 $src2" %} 329 330 ins_encode %{ 331 __ $5(as_Register($dst$$reg), as_Register($src1$$reg), 332 as_Register($src2$$reg), ext::$6); 333 %} 334 ins_pipe(ialu_reg_reg); 335 %}') 336 ADD_SUB_EXTENDED(I,16,Add,RShift,add,sxth,32) 337 ADD_SUB_EXTENDED(I,8,Add,RShift,add,sxtb,32) 338 ADD_SUB_EXTENDED(I,8,Add,URShift,add,uxtb,32) 339 ADD_SUB_EXTENDED(L,16,Add,RShift,add,sxth,64) 340 ADD_SUB_EXTENDED(L,32,Add,RShift,add,sxtw,64) 341 ADD_SUB_EXTENDED(L,8,Add,RShift,add,sxtb,64) 342 ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64) 343 dnl 344 dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type) 345 define(`ADD_SUB_ZERO_EXTEND', ` 346 instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, rFlagsReg cr) 347 %{ 348 match(Set dst ($3$1 src1 (And$1 src2 mask))); 349 ins_cost(INSN_COST); 350 format %{ "$4 $dst, $src1, $src2, $5" %} 351 352 ins_encode %{ 353 __ $4(as_Register($dst$$reg), as_Register($src1$$reg), 354 as_Register($src2$$reg), ext::$5); 355 %} 356 ins_pipe(ialu_reg_reg); 357 %}') 358 dnl 359 ADD_SUB_ZERO_EXTEND(I,255,Add,addw,uxtb) 360 ADD_SUB_ZERO_EXTEND(I,65535,Add,addw,uxth) 361 ADD_SUB_ZERO_EXTEND(L,255,Add,add,uxtb) 362 ADD_SUB_ZERO_EXTEND(L,65535,Add,add,uxth) 363 ADD_SUB_ZERO_EXTEND(L,4294967295,Add,add,uxtw) 364 dnl 365 ADD_SUB_ZERO_EXTEND(I,255,Sub,subw,uxtb) 366 ADD_SUB_ZERO_EXTEND(I,65535,Sub,subw,uxth) 367 ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb) 368 ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth) 369 ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw) 370 371 // END This section of the file is automatically generated. Do not edit --------------