1 dnl Copyright (c) 2014, Red Hat Inc. All rights reserved. 2 dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 dnl 4 dnl This code is free software; you can redistribute it and/or modify it 5 dnl under the terms of the GNU General Public License version 2 only, as 6 dnl published by the Free Software Foundation. 7 dnl 8 dnl This code is distributed in the hope that it will be useful, but WITHOUT 9 dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 dnl FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 dnl version 2 for more details (a copy is included in the LICENSE file that 12 dnl accompanied this code). 13 dnl 14 dnl You should have received a copy of the GNU General Public License version 15 dnl 2 along with this work; if not, write to the Free Software Foundation, 16 dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 dnl 18 dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 dnl or visit www.oracle.com if you need additional information or have any 20 dnl questions. 21 dnl 22 dnl 23 dnl Process this file with m4 aarch64_ad.m4 to generate the arithmetic 24 dnl and shift patterns patterns used in aarch64.ad. 25 dnl 26 // BEGIN This section of the file is automatically generated. Do not edit -------------- 27 28 define(`BASE_SHIFT_INSN', 29 ` 30 instruct $2$1_reg_$4_reg(iReg$1NoSp dst, 31 iReg$1 src1, iReg$1 src2, 32 immI src3, rFlagsReg cr) %{ 33 match(Set dst ($2$1 src1 ($4$1 src2 src3))); 34 35 ins_cost(1.9 * INSN_COST); 36 format %{ "$3 $dst, $src1, $src2, $5 $src3" %} 37 38 ins_encode %{ 39 __ $3(as_Register($dst$$reg), 40 as_Register($src1$$reg), 41 as_Register($src2$$reg), 42 Assembler::$5, 43 $src3$$constant & 0x3f); 44 %} 45 46 ins_pipe(ialu_reg_reg_shift); 47 %}')dnl 48 define(`BASE_INVERTED_INSN', 49 ` 50 instruct $2$1_reg_not_reg(iReg$1NoSp dst, 51 iReg$1 src1, iReg$1 src2, imm$1_M1 m1, 52 rFlagsReg cr) %{ 53 dnl This ifelse is because hotspot reassociates (xor (xor ..)..) 54 dnl into this canonical form. 55 ifelse($2,Xor, 56 match(Set dst (Xor$1 m1 (Xor$1 src2 src1)));, 57 match(Set dst ($2$1 src1 (Xor$1 src2 m1)));) 58 ins_cost(INSN_COST); 59 format %{ "$3 $dst, $src1, $src2" %} 60 61 ins_encode %{ 62 __ $3(as_Register($dst$$reg), 63 as_Register($src1$$reg), 64 as_Register($src2$$reg), 65 Assembler::LSL, 0); 66 %} 67 68 ins_pipe(ialu_reg_reg); 69 %}')dnl 70 define(`INVERTED_SHIFT_INSN', 71 ` 72 instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst, 73 iReg$1 src1, iReg$1 src2, 74 immI src3, imm$1_M1 src4, rFlagsReg cr) %{ 75 dnl This ifelse is because hotspot reassociates (xor (xor ..)..) 76 dnl into this canonical form. 77 ifelse($2,Xor, 78 match(Set dst ($2$1 src4 (Xor$1($4$1 src2 src3) src1)));, 79 match(Set dst ($2$1 src1 (Xor$1($4$1 src2 src3) src4)));) 80 ins_cost(1.9 * INSN_COST); 81 format %{ "$3 $dst, $src1, $src2, $5 $src3" %} 82 83 ins_encode %{ 84 __ $3(as_Register($dst$$reg), 85 as_Register($src1$$reg), 86 as_Register($src2$$reg), 87 Assembler::$5, 88 $src3$$constant & 0x3f); 89 %} 90 91 ins_pipe(ialu_reg_reg_shift); 92 %}')dnl 93 define(`NOT_INSN', 94 `instruct reg$1_not_reg(iReg$1NoSp dst, 95 iReg$1 src1, imm$1_M1 m1, 96 rFlagsReg cr) %{ 97 match(Set dst (Xor$1 src1 m1)); 98 ins_cost(INSN_COST); 99 format %{ "$2 $dst, $src1, zr" %} 100 101 ins_encode %{ 102 __ $2(as_Register($dst$$reg), 103 as_Register($src1$$reg), 104 zr, 105 Assembler::LSL, 0); 106 %} 107 108 ins_pipe(ialu_reg); 109 %}')dnl 110 dnl 111 define(`BOTH_SHIFT_INSNS', 112 `BASE_SHIFT_INSN(I, $1, ifelse($2,andr,andw,$2w), $3, $4) 113 BASE_SHIFT_INSN(L, $1, $2, $3, $4)')dnl 114 dnl 115 define(`BOTH_INVERTED_INSNS', 116 `BASE_INVERTED_INSN(I, $1, $2, $3, $4) 117 BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl 118 dnl 119 define(`BOTH_INVERTED_SHIFT_INSNS', 120 `INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4, ~0, int) 121 INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, long)')dnl 122 dnl 123 define(`ALL_SHIFT_KINDS', 124 `BOTH_SHIFT_INSNS($1, $2, URShift, LSR) 125 BOTH_SHIFT_INSNS($1, $2, RShift, ASR) 126 BOTH_SHIFT_INSNS($1, $2, LShift, LSL)')dnl 127 dnl 128 define(`ALL_INVERTED_SHIFT_KINDS', 129 `BOTH_INVERTED_SHIFT_INSNS($1, $2, URShift, LSR) 130 BOTH_INVERTED_SHIFT_INSNS($1, $2, RShift, ASR) 131 BOTH_INVERTED_SHIFT_INSNS($1, $2, LShift, LSL)')dnl 132 dnl 133 NOT_INSN(L, eon) 134 NOT_INSN(I, eonw) 135 BOTH_INVERTED_INSNS(And, bic) 136 BOTH_INVERTED_INSNS(Or, orn) 137 BOTH_INVERTED_INSNS(Xor, eon) 138 ALL_INVERTED_SHIFT_KINDS(And, bic) 139 ALL_INVERTED_SHIFT_KINDS(Xor, eon) 140 ALL_INVERTED_SHIFT_KINDS(Or, orn) 141 ALL_SHIFT_KINDS(And, andr) 142 ALL_SHIFT_KINDS(Xor, eor) 143 ALL_SHIFT_KINDS(Or, orr) 144 ALL_SHIFT_KINDS(Add, add) 145 ALL_SHIFT_KINDS(Sub, sub) 146 dnl 147 dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count 148 define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)') 149 define(`BFM_INSN',` 150 // Shift Left followed by Shift Right. 151 // This idiom is used by the compiler for the i2b bytecode etc. 152 instruct $4$1(iReg$1NoSp dst, iReg$1 src, immI lshift_count, immI rshift_count) 153 %{ 154 match(Set dst EXTEND($1, $3, src, lshift_count, rshift_count)); 155 // Make sure we are not going to exceed what $4 can do. 156 predicate((unsigned int)n->in(2)->get_int() <= $2 157 && (unsigned int)n->in(1)->in(2)->get_int() <= $2); 158 159 ins_cost(INSN_COST * 2); 160 format %{ "$4 $dst, $src, $rshift_count - $lshift_count, #$2 - $lshift_count" %} 161 ins_encode %{ 162 int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant; 163 int s = $2 - lshift; 164 int r = (rshift - lshift) & $2; 165 __ $4(as_Register($dst$$reg), 166 as_Register($src$$reg), 167 r, s); 168 %} 169 170 ins_pipe(ialu_reg_shift); 171 %}') 172 BFM_INSN(L, 63, RShift, sbfm) 173 BFM_INSN(I, 31, RShift, sbfmw) 174 BFM_INSN(L, 63, URShift, ubfm) 175 BFM_INSN(I, 31, URShift, ubfmw) 176 dnl 177 // Bitfield extract with shift & mask 178 define(`BFX_INSN', 179 `instruct $3$1(iReg$1NoSp dst, iReg$1 src, immI rshift, imm$1_bitmask mask) 180 %{ 181 match(Set dst (And$1 ($2$1 src rshift) mask)); 182 183 ins_cost(INSN_COST); 184 format %{ "$3 $dst, $src, $mask" %} 185 ins_encode %{ 186 int rshift = $rshift$$constant; 187 long mask = $mask$$constant; 188 int width = exact_log2(mask+1); 189 __ $3(as_Register($dst$$reg), 190 as_Register($src$$reg), rshift, width); 191 %} 192 ins_pipe(ialu_reg_shift); 193 %}') 194 BFX_INSN(I,URShift,ubfxw) 195 BFX_INSN(L,URShift,ubfx) 196 197 // We can use ubfx when extending an And with a mask when we know mask 198 // is positive. We know that because immI_bitmask guarantees it. 199 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask) 200 %{ 201 match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask))); 202 203 ins_cost(INSN_COST * 2); 204 format %{ "ubfx $dst, $src, $mask" %} 205 ins_encode %{ 206 int rshift = $rshift$$constant; 207 long mask = $mask$$constant; 208 int width = exact_log2(mask+1); 209 __ ubfx(as_Register($dst$$reg), 210 as_Register($src$$reg), rshift, width); 211 %} 212 ins_pipe(ialu_reg_shift); 213 %} 214 215 // Rotations 216 217 define(`EXTRACT_INSN', 218 `instruct extr$3$1(iReg$1NoSp dst, iReg$1 src1, iReg$1 src2, immI lshift, immI rshift, rFlagsReg cr) 219 %{ 220 match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift))); 221 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & $2)); 222 223 ins_cost(INSN_COST); 224 format %{ "extr $dst, $src1, $src2, #$rshift" %} 225 226 ins_encode %{ 227 __ $4(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg), 228 $rshift$$constant & $2); 229 %} 230 ins_pipe(ialu_reg_reg_extr); 231 %} 232 ')dnl 233 EXTRACT_INSN(L, 63, Or, extr) 234 EXTRACT_INSN(I, 31, Or, extrw) 235 EXTRACT_INSN(L, 63, Add, extr) 236 EXTRACT_INSN(I, 31, Add, extrw) 237 define(`ROL_EXPAND', ` 238 // $2 expander 239 240 instruct $2$1_rReg(iReg$1 dst, iReg$1 src, iRegI shift, rFlagsReg cr) 241 %{ 242 effect(DEF dst, USE src, USE shift); 243 244 format %{ "$2 $dst, $src, $shift" %} 245 ins_cost(INSN_COST * 3); 246 ins_encode %{ 247 __ subw(rscratch1, zr, as_Register($shift$$reg)); 248 __ $3(as_Register($dst$$reg), as_Register($src$$reg), 249 rscratch1); 250 %} 251 ins_pipe(ialu_reg_reg_vshift); 252 %}')dnl 253 define(`ROR_EXPAND', ` 254 // $2 expander 255 256 instruct $2$1_rReg(iReg$1 dst, iReg$1 src, iRegI shift, rFlagsReg cr) 257 %{ 258 effect(DEF dst, USE src, USE shift); 259 260 format %{ "$2 $dst, $src, $shift" %} 261 ins_cost(INSN_COST); 262 ins_encode %{ 263 __ $3(as_Register($dst$$reg), as_Register($src$$reg), 264 as_Register($shift$$reg)); 265 %} 266 ins_pipe(ialu_reg_reg_vshift); 267 %}')dnl 268 define(ROL_INSN, ` 269 instruct $3$1_rReg_Var_C$2(iRegL dst, iRegL src, iRegI shift, immI$2 c$2, rFlagsReg cr) 270 %{ 271 match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift)))); 272 273 expand %{ 274 $3L_rReg(dst, src, shift, cr); 275 %} 276 %}')dnl 277 define(ROR_INSN, ` 278 instruct $3$1_rReg_Var_C$2(iRegL dst, iRegL src, iRegI shift, immI$2 c$2, rFlagsReg cr) 279 %{ 280 match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift)))); 281 282 expand %{ 283 $3L_rReg(dst, src, shift, cr); 284 %} 285 %}')dnl 286 ROL_EXPAND(L, rol, rorv) 287 ROL_EXPAND(I, rol, rorvw) 288 ROL_INSN(L, _64, rol) 289 ROL_INSN(L, 0, rol) 290 ROL_INSN(I, _32, rol) 291 ROL_INSN(I, 0, rol) 292 ROR_EXPAND(L, ror, rorv) 293 ROR_EXPAND(I, ror, rorvw) 294 ROR_INSN(L, _64, ror) 295 ROR_INSN(L, 0, ror) 296 ROR_INSN(I, _32, ror) 297 ROR_INSN(I, 0, ror) 298 299 // Add/subtract (extended) 300 dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize 301 define(`ADD_SUB_CONV', ` 302 instruct $3Ext$1(iReg$2NoSp dst, iReg$2 src1, iReg$1orL2I src2, rFlagsReg cr) 303 %{ 304 match(Set dst ($3$2 src1 (ConvI2L src2))); 305 ins_cost(INSN_COST); 306 format %{ "$4 $dst, $src1, $5 $src2" %} 307 308 ins_encode %{ 309 __ $4(as_Register($dst$$reg), as_Register($src1$$reg), 310 as_Register($src2$$reg), ext::$5); 311 %} 312 ins_pipe(ialu_reg_reg); 313 %}')dnl 314 ADD_SUB_CONV(I,L,Add,add,sxtw); 315 ADD_SUB_CONV(I,L,Sub,sub,sxtw); 316 dnl 317 define(`ADD_SUB_EXTENDED', ` 318 instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1 src1, iReg$1 src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr) 319 %{ 320 match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift))); 321 ins_cost(INSN_COST); 322 format %{ "$5 $dst, $src1, $6 $src2" %} 323 324 ins_encode %{ 325 __ $5(as_Register($dst$$reg), as_Register($src1$$reg), 326 as_Register($src2$$reg), ext::$6); 327 %} 328 ins_pipe(ialu_reg_reg); 329 %}') 330 ADD_SUB_EXTENDED(I,16,Add,RShift,add,sxth,32) 331 ADD_SUB_EXTENDED(I,8,Add,RShift,add,sxtb,32) 332 ADD_SUB_EXTENDED(I,8,Add,URShift,add,uxtb,32) 333 ADD_SUB_EXTENDED(L,16,Add,RShift,add,sxth,64) 334 ADD_SUB_EXTENDED(L,32,Add,RShift,add,sxtw,64) 335 ADD_SUB_EXTENDED(L,8,Add,RShift,add,sxtb,64) 336 ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64) 337 dnl 338 dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type) 339 define(`ADD_SUB_ZERO_EXTEND', ` 340 instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1 src1, iReg$1 src2, imm$1_$2 mask, rFlagsReg cr) 341 %{ 342 match(Set dst ($3$1 src1 (And$1 src2 mask))); 343 ins_cost(INSN_COST); 344 format %{ "$4 $dst, $src1, $src2, $5" %} 345 346 ins_encode %{ 347 __ $4(as_Register($dst$$reg), as_Register($src1$$reg), 348 as_Register($src2$$reg), ext::$5); 349 %} 350 ins_pipe(ialu_reg_reg); 351 %}') 352 dnl 353 ADD_SUB_ZERO_EXTEND(I,255,Add,addw,uxtb) 354 ADD_SUB_ZERO_EXTEND(I,65535,Add,addw,uxth) 355 ADD_SUB_ZERO_EXTEND(L,255,Add,add,uxtb) 356 ADD_SUB_ZERO_EXTEND(L,65535,Add,add,uxth) 357 ADD_SUB_ZERO_EXTEND(L,4294967295,Add,add,uxtw) 358 dnl 359 ADD_SUB_ZERO_EXTEND(I,255,Sub,subw,uxtb) 360 ADD_SUB_ZERO_EXTEND(I,65535,Sub,subw,uxth) 361 ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb) 362 ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth) 363 ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw) 364 365 // END This section of the file is automatically generated. Do not edit --------------