1 //
   2 // Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2020, Arm Ltd. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 dnl Generate the warning
  27 // This file is automatically generated by running "m4 aarch64_sve_ad.m4". Do not edit ----
  28 dnl
  29 
  30 // AArch64 SVE Architecture Description File
  31 
  32 dnl
  33 dnl OPERAND_VMEMORYA_IMMEDIATE_OFFSET($1,            $2,       $3     )
  34 dnl OPERAND_VMEMORYA_IMMEDIATE_OFFSET(imm_type_abbr, imm_type, imm_len)
  35 define(`OPERAND_VMEMORYA_IMMEDIATE_OFFSET', `
  36 operand vmemA_imm$1Offset$3()
  37 %{
  38   predicate(Address::offset_ok_for_sve_immed(n->get_$2(), $3,
  39             Matcher::scalable_vector_reg_size(T_BYTE)));
  40   match(Con$1);
  41 
  42   op_cost(0);
  43   format %{ %}
  44   interface(CONST_INTER);
  45 %}')
  46 dnl
  47 // 4 bit signed offset -- for predicated load/store
  48 OPERAND_VMEMORYA_IMMEDIATE_OFFSET(I, int,  4)
  49 OPERAND_VMEMORYA_IMMEDIATE_OFFSET(L, long, 4)
  50 dnl
  51 dnl OPERAND_VMEMORYA_INDIRECT_OFFSET($1,            $2     )
  52 dnl OPERAND_VMEMORYA_INDIRECT_OFFSET(imm_type_abbr, imm_len)
  53 define(`OPERAND_VMEMORYA_INDIRECT_OFFSET', `
  54 operand vmemA_indOff$1$2(iRegP reg, vmemA_imm$1Offset$2 off)
  55 %{
  56   constraint(ALLOC_IN_RC(ptr_reg));
  57   match(AddP reg off);
  58   op_cost(0);
  59   format %{ "[$reg, $off, MUL VL]" %}
  60   interface(MEMORY_INTER) %{
  61     base($reg);
  62     `index'(0xffffffff);
  63     scale(0x0);
  64     disp($off);
  65   %}
  66 %}')
  67 dnl
  68 OPERAND_VMEMORYA_INDIRECT_OFFSET(I, 4)
  69 OPERAND_VMEMORYA_INDIRECT_OFFSET(L, 4)
  70 
  71 opclass vmemA(indirect, vmemA_indOffI4, vmemA_indOffL4);
  72 
  73 source_hpp %{
  74   bool op_sve_supported(int opcode);
  75 %}
  76 
  77 source %{
  78 
  79   static inline BasicType vector_element_basic_type(const MachNode* n) {
  80     const TypeVect* vt = n->bottom_type()->is_vect();
  81     return vt->element_basic_type();
  82   }
  83 
  84   static inline BasicType vector_element_basic_type(const MachNode* use, const MachOper* opnd) {
  85     int def_idx = use->operand_index(opnd);
  86     Node* def = use->in(def_idx);
  87     const TypeVect* vt = def->bottom_type()->is_vect();
  88     return vt->element_basic_type();
  89   }
  90 
  91   typedef void (C2_MacroAssembler::* sve_mem_insn_predicate)(FloatRegister Rt, Assembler::SIMD_RegVariant T,
  92                                                              PRegister Pg, const Address &adr);
  93 
  94   // Predicated load/store, with optional ptrue to all elements of given predicate register.
  95   static void loadStoreA_predicate(C2_MacroAssembler masm, bool is_store,
  96                                    FloatRegister reg, PRegister pg, BasicType bt,
  97                                    int opcode, Register base, int index, int size, int disp) {
  98     sve_mem_insn_predicate insn;
  99     Assembler::SIMD_RegVariant type;
 100     int esize = type2aelembytes(bt);
 101     if (index == -1) {
 102       assert(size == 0, "unsupported address mode: scale size = %d", size);
 103       switch(esize) {
 104       case 1:
 105         insn = is_store ? &C2_MacroAssembler::sve_st1b : &C2_MacroAssembler::sve_ld1b;
 106         type = Assembler::B;
 107         break;
 108       case 2:
 109         insn = is_store ? &C2_MacroAssembler::sve_st1h : &C2_MacroAssembler::sve_ld1h;
 110         type = Assembler::H;
 111         break;
 112       case 4:
 113         insn = is_store ? &C2_MacroAssembler::sve_st1w : &C2_MacroAssembler::sve_ld1w;
 114         type = Assembler::S;
 115         break;
 116       case 8:
 117         insn = is_store ? &C2_MacroAssembler::sve_st1d : &C2_MacroAssembler::sve_ld1d;
 118         type = Assembler::D;
 119         break;
 120       default:
 121         assert(false, "unsupported");
 122         ShouldNotReachHere();
 123       }
 124       (masm.*insn)(reg, type, pg, Address(base, disp / Matcher::scalable_vector_reg_size(T_BYTE)));
 125     } else {
 126       assert(false, "unimplemented");
 127       ShouldNotReachHere();
 128     }
 129   }
 130 
 131   bool op_sve_supported(int opcode) {
 132     switch (opcode) {
 133       case Op_MulAddVS2VI:
 134         // No multiply reduction instructions
 135       case Op_MulReductionVD:
 136       case Op_MulReductionVF:
 137       case Op_MulReductionVI:
 138       case Op_MulReductionVL:
 139         // Others
 140       case Op_Extract:
 141       case Op_ExtractB:
 142       case Op_ExtractC:
 143       case Op_ExtractD:
 144       case Op_ExtractF:
 145       case Op_ExtractI:
 146       case Op_ExtractL:
 147       case Op_ExtractS:
 148       case Op_ExtractUB:
 149         return false;
 150       default:
 151         return true;
 152     }
 153   }
 154 
 155 %}
 156 
 157 definitions %{
 158   int_def SVE_COST             (200, 200);
 159 %}
 160 
 161 
 162 dnl
 163 dnl ELEMENT_SHORT_CHART($1, $2)
 164 dnl ELEMENT_SHORT_CHART(etype, node)
 165 define(`ELEMENT_SHORT_CHAR',`ifelse(`$1', `T_SHORT',
 166   `($2->bottom_type()->is_vect()->element_basic_type() == T_SHORT ||
 167             ($2->bottom_type()->is_vect()->element_basic_type() == T_CHAR))',
 168    `($2->bottom_type()->is_vect()->element_basic_type() == $1)')')
 169 dnl
 170 
 171 // All SVE instructions
 172 
 173 // sve vector load/store
 174 
 175 // Use predicated vector load/store
 176 instruct loadVA(vecA dst, vmemA mem) %{
 177   predicate(UseSVE > 0 && n->as_LoadVector()->memory_size() >= 16);
 178   match(Set dst (LoadVector mem));
 179   ins_cost(SVE_COST);
 180   format %{ "sve_ldr $dst, $mem\t # vector (sve)" %}
 181   ins_encode %{
 182     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 183     loadStoreA_predicate(C2_MacroAssembler(&cbuf), false, dst_reg, ptrue,
 184                          vector_element_basic_type(this), $mem->opcode(),
 185                          as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 186   %}
 187   ins_pipe(pipe_slow);
 188 %}
 189 
 190 instruct storeVA(vecA src, vmemA mem) %{
 191   predicate(UseSVE > 0 && n->as_StoreVector()->memory_size() >= 16);
 192   match(Set mem (StoreVector mem src));
 193   ins_cost(SVE_COST);
 194   format %{ "sve_str $mem, $src\t # vector (sve)" %}
 195   ins_encode %{
 196     FloatRegister src_reg = as_FloatRegister($src$$reg);
 197     loadStoreA_predicate(C2_MacroAssembler(&cbuf), true, src_reg, ptrue,
 198                          vector_element_basic_type(this, $src), $mem->opcode(),
 199                          as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 200   %}
 201   ins_pipe(pipe_slow);
 202 %}
 203 
 204 dnl
 205 dnl UNARY_OP_TRUE_PREDICATE_ETYPE($1,        $2,      $3,           $4,   $5,          %6  )
 206 dnl UNARY_OP_TRUE_PREDICATE_ETYPE(insn_name, op_name, element_type, size, min_vec_len, insn)
 207 define(`UNARY_OP_TRUE_PREDICATE_ETYPE', `
 208 instruct $1(vecA dst, vecA src) %{
 209   predicate(UseSVE > 0 && n->as_Vector()->length() >= $5 &&
 210             n->bottom_type()->is_vect()->element_basic_type() == $3);
 211   match(Set dst ($2 src));
 212   ins_cost(SVE_COST);
 213   format %{ "$6 $dst, $src\t# vector (sve) ($4)" %}
 214   ins_encode %{
 215     __ $6(as_FloatRegister($dst$$reg), __ $4,
 216          ptrue, as_FloatRegister($src$$reg));
 217   %}
 218   ins_pipe(pipe_slow);
 219 %}')dnl
 220 
 221 // sve abs
 222 UNARY_OP_TRUE_PREDICATE_ETYPE(vabsAB, AbsVB, T_BYTE,   B, 16, sve_abs)
 223 UNARY_OP_TRUE_PREDICATE_ETYPE(vabsAS, AbsVS, T_SHORT,  H, 8,  sve_abs)
 224 UNARY_OP_TRUE_PREDICATE_ETYPE(vabsAI, AbsVI, T_INT,    S, 4,  sve_abs)
 225 UNARY_OP_TRUE_PREDICATE_ETYPE(vabsAL, AbsVL, T_LONG,   D, 2,  sve_abs)
 226 UNARY_OP_TRUE_PREDICATE_ETYPE(vabsAF, AbsVF, T_FLOAT,  S, 4,  sve_fabs)
 227 UNARY_OP_TRUE_PREDICATE_ETYPE(vabsAD, AbsVD, T_DOUBLE, D, 2,  sve_fabs)
 228 dnl
 229 dnl BINARY_OP_UNPREDICATED($1,        $2       $3,   $4           $5  )
 230 dnl BINARY_OP_UNPREDICATED(insn_name, op_name, size, min_vec_len, insn)
 231 define(`BINARY_OP_UNPREDICATED', `
 232 instruct $1(vecA dst, vecA src1, vecA src2) %{
 233   predicate(UseSVE > 0 && n->as_Vector()->length() >= $4);
 234   match(Set dst ($2 src1 src2));
 235   ins_cost(SVE_COST);
 236   format %{ "$5 $dst, $src1, $src2\t # vector (sve) ($3)" %}
 237   ins_encode %{
 238     __ $5(as_FloatRegister($dst$$reg), __ $3,
 239          as_FloatRegister($src1$$reg),
 240          as_FloatRegister($src2$$reg));
 241   %}
 242   ins_pipe(pipe_slow);
 243 %}')dnl
 244 
 245 // sve add
 246 BINARY_OP_UNPREDICATED(vaddAB, AddVB, B, 16, sve_add)
 247 BINARY_OP_UNPREDICATED(vaddAS, AddVS, H, 8,  sve_add)
 248 BINARY_OP_UNPREDICATED(vaddAI, AddVI, S, 4,  sve_add)
 249 BINARY_OP_UNPREDICATED(vaddAL, AddVL, D, 2,  sve_add)
 250 BINARY_OP_UNPREDICATED(vaddAF, AddVF, S, 4,  sve_fadd)
 251 BINARY_OP_UNPREDICATED(vaddAD, AddVD, D, 2,  sve_fadd)
 252 dnl
 253 dnl BINARY_OP_UNSIZED($1,        $2,      $3,          $4  )
 254 dnl BINARY_OP_UNSIZED(insn_name, op_name, min_vec_len, insn)
 255 define(`BINARY_OP_UNSIZED', `
 256 instruct $1(vecA dst, vecA src1, vecA src2) %{
 257   predicate(UseSVE > 0 && n->as_Vector()->length_in_bytes() >= $3);
 258   match(Set dst ($2 src1 src2));
 259   ins_cost(SVE_COST);
 260   format %{ "$4  $dst, $src1, $src2\t# vector (sve)" %}
 261   ins_encode %{
 262     __ $4(as_FloatRegister($dst$$reg),
 263          as_FloatRegister($src1$$reg),
 264          as_FloatRegister($src2$$reg));
 265   %}
 266   ins_pipe(pipe_slow);
 267 %}')dnl
 268 
 269 // sve and
 270 BINARY_OP_UNSIZED(vandA, AndV, 16, sve_and)
 271 
 272 // sve or
 273 BINARY_OP_UNSIZED(vorA, OrV, 16, sve_orr)
 274 
 275 // sve xor
 276 BINARY_OP_UNSIZED(vxorA, XorV, 16, sve_eor)
 277 dnl
 278 dnl VDIVF($1,          $2  , $3         )
 279 dnl VDIVF(name_suffix, size, min_vec_len)
 280 define(`VDIVF', `
 281 instruct vdivA$1(vecA dst_src1, vecA src2) %{
 282   predicate(UseSVE > 0 && n->as_Vector()->length() >= $3);
 283   match(Set dst_src1 (DivV$1 dst_src1 src2));
 284   ins_cost(SVE_COST);
 285   format %{ "sve_fdiv  $dst_src1, $dst_src1, $src2\t# vector (sve) ($2)" %}
 286   ins_encode %{
 287     __ sve_fdiv(as_FloatRegister($dst_src1$$reg), __ $2,
 288          ptrue, as_FloatRegister($src2$$reg));
 289   %}
 290   ins_pipe(pipe_slow);
 291 %}')dnl
 292 
 293 // sve float div
 294 VDIVF(F, S, 4)
 295 VDIVF(D, D, 2)
 296 
 297 dnl
 298 dnl BINARY_OP_TRUE_PREDICATE_ETYPE($1,        $2,      $3,           $4,   $5,          $6  )
 299 dnl BINARY_OP_TRUE_PREDICATE_ETYPE(insn_name, op_name, element_type, size, min_vec_len, insn)
 300 define(`BINARY_OP_TRUE_PREDICATE_ETYPE', `
 301 instruct $1(vecA dst_src1, vecA src2) %{
 302   predicate(UseSVE > 0 && n->as_Vector()->length() >= $5 &&
 303             n->bottom_type()->is_vect()->element_basic_type() == $3);
 304   match(Set dst_src1 ($2 dst_src1 src2));
 305   ins_cost(SVE_COST);
 306   format %{ "$6 $dst_src1, $dst_src1, $src2\t # vector (sve) ($4)" %}
 307   ins_encode %{
 308     __ $6(as_FloatRegister($dst_src1$$reg), __ $4,
 309          ptrue, as_FloatRegister($src2$$reg));
 310   %}
 311   ins_pipe(pipe_slow);
 312 %}')dnl
 313 dnl
 314 // sve max
 315 BINARY_OP_TRUE_PREDICATE_ETYPE(vmaxAF, MaxV, T_FLOAT,  S, 4,  sve_fmax)
 316 BINARY_OP_TRUE_PREDICATE_ETYPE(vmaxAD, MaxV, T_DOUBLE, D, 2,  sve_fmax)
 317 BINARY_OP_TRUE_PREDICATE_ETYPE(vminAF, MinV, T_FLOAT,  S, 4,  sve_fmin)
 318 BINARY_OP_TRUE_PREDICATE_ETYPE(vminAD, MinV, T_DOUBLE, D, 2,  sve_fmin)
 319 
 320 dnl
 321 dnl VFMLA($1           $2    $3         )
 322 dnl VFMLA(name_suffix, size, min_vec_len)
 323 define(`VFMLA', `
 324 // dst_src1 = dst_src1 + src2 * src3
 325 instruct vfmlaA$1(vecA dst_src1, vecA src2, vecA src3) %{
 326   predicate(UseFMA && UseSVE > 0 && n->as_Vector()->length() >= $3);
 327   match(Set dst_src1 (FmaV$1 dst_src1 (Binary src2 src3)));
 328   ins_cost(SVE_COST);
 329   format %{ "sve_fmla $dst_src1, $src2, $src3\t # vector (sve) ($2)" %}
 330   ins_encode %{
 331     __ sve_fmla(as_FloatRegister($dst_src1$$reg), __ $2,
 332          ptrue, as_FloatRegister($src2$$reg), as_FloatRegister($src3$$reg));
 333   %}
 334   ins_pipe(pipe_slow);
 335 %}')dnl
 336 dnl
 337 // sve fmla
 338 VFMLA(F, S, 4)
 339 VFMLA(D, D, 2)
 340 
 341 dnl
 342 dnl VFMLS($1           $2    $3         )
 343 dnl VFMLS(name_suffix, size, min_vec_len)
 344 define(`VFMLS', `
 345 // dst_src1 = dst_src1 + -src2 * src3
 346 // dst_src1 = dst_src1 + src2 * -src3
 347 instruct vfmlsA$1(vecA dst_src1, vecA src2, vecA src3) %{
 348   predicate(UseFMA && UseSVE > 0 && n->as_Vector()->length() >= $3);
 349   match(Set dst_src1 (FmaV$1 dst_src1 (Binary (NegV$1 src2) src3)));
 350   match(Set dst_src1 (FmaV$1 dst_src1 (Binary src2 (NegV$1 src3))));
 351   ins_cost(SVE_COST);
 352   format %{ "sve_fmls $dst_src1, $src2, $src3\t # vector (sve) ($2)" %}
 353   ins_encode %{
 354     __ sve_fmls(as_FloatRegister($dst_src1$$reg), __ $2,
 355          ptrue, as_FloatRegister($src2$$reg), as_FloatRegister($src3$$reg));
 356   %}
 357   ins_pipe(pipe_slow);
 358 %}')dnl
 359 dnl
 360 // sve fmls
 361 VFMLS(F, S, 4)
 362 VFMLS(D, D, 2)
 363 
 364 dnl
 365 dnl VFNMLA($1           $2    $3         )
 366 dnl VFNMLA(name_suffix, size, min_vec_len)
 367 define(`VFNMLA', `
 368 // dst_src1 = -dst_src1 + -src2 * src3
 369 // dst_src1 = -dst_src1 + src2 * -src3
 370 instruct vfnmlaA$1(vecA dst_src1, vecA src2, vecA src3) %{
 371   predicate(UseFMA && UseSVE > 0 && n->as_Vector()->length() >= $3);
 372   match(Set dst_src1 (FmaV$1 (NegV$1 dst_src1) (Binary (NegV$1 src2) src3)));
 373   match(Set dst_src1 (FmaV$1 (NegV$1 dst_src1) (Binary src2 (NegV$1 src3))));
 374   ins_cost(SVE_COST);
 375   format %{ "sve_fnmla $dst_src1, $src2, $src3\t # vector (sve) ($2)" %}
 376   ins_encode %{
 377     __ sve_fnmla(as_FloatRegister($dst_src1$$reg), __ $2,
 378          ptrue, as_FloatRegister($src2$$reg), as_FloatRegister($src3$$reg));
 379   %}
 380   ins_pipe(pipe_slow);
 381 %}')dnl
 382 dnl
 383 // sve fnmla
 384 VFNMLA(F, S, 4)
 385 VFNMLA(D, D, 2)
 386 
 387 dnl
 388 dnl VFNMLS($1           $2    $3         )
 389 dnl VFNMLS(name_suffix, size, min_vec_len)
 390 define(`VFNMLS', `
 391 // dst_src1 = -dst_src1 + src2 * src3
 392 instruct vfnmlsA$1(vecA dst_src1, vecA src2, vecA src3) %{
 393   predicate(UseFMA && UseSVE > 0 && n->as_Vector()->length() >= $3);
 394   match(Set dst_src1 (FmaV$1 (NegV$1 dst_src1) (Binary src2 src3)));
 395   ins_cost(SVE_COST);
 396   format %{ "sve_fnmls $dst_src1, $src2, $src3\t # vector (sve) ($2)" %}
 397   ins_encode %{
 398     __ sve_fnmls(as_FloatRegister($dst_src1$$reg), __ $2,
 399          ptrue, as_FloatRegister($src2$$reg), as_FloatRegister($src3$$reg));
 400   %}
 401   ins_pipe(pipe_slow);
 402 %}')dnl
 403 dnl
 404 // sve fnmls
 405 VFNMLS(F, S, 4)
 406 VFNMLS(D, D, 2)
 407 
 408 dnl
 409 dnl VMLA($1           $2    $3         )
 410 dnl VMLA(name_suffix, size, min_vec_len)
 411 define(`VMLA', `
 412 // dst_src1 = dst_src1 + src2 * src3
 413 instruct vmlaA$1(vecA dst_src1, vecA src2, vecA src3)
 414 %{
 415   predicate(UseSVE > 0 && n->as_Vector()->length() >= $3);
 416   match(Set dst_src1 (AddV$1 dst_src1 (MulV$1 src2 src3)));
 417   ins_cost(SVE_COST);
 418   format %{ "sve_mla $dst_src1, src2, src3\t # vector (sve) ($2)" %}
 419   ins_encode %{
 420     __ sve_mla(as_FloatRegister($dst_src1$$reg), __ $2,
 421       ptrue, as_FloatRegister($src2$$reg), as_FloatRegister($src3$$reg));
 422   %}
 423   ins_pipe(pipe_slow);
 424 %}')dnl
 425 dnl
 426 // sve mla
 427 VMLA(B, B, 16)
 428 VMLA(S, H, 8)
 429 VMLA(I, S, 4)
 430 VMLA(L, D, 2)
 431 
 432 dnl
 433 dnl VMLS($1           $2    $3         )
 434 dnl VMLS(name_suffix, size, min_vec_len)
 435 define(`VMLS', `
 436 // dst_src1 = dst_src1 - src2 * src3
 437 instruct vmlsA$1(vecA dst_src1, vecA src2, vecA src3)
 438 %{
 439   predicate(UseSVE > 0 && n->as_Vector()->length() >= $3);
 440   match(Set dst_src1 (SubV$1 dst_src1 (MulV$1 src2 src3)));
 441   ins_cost(SVE_COST);
 442   format %{ "sve_mls $dst_src1, src2, src3\t # vector (sve) ($2)" %}
 443   ins_encode %{
 444     __ sve_mls(as_FloatRegister($dst_src1$$reg), __ $2,
 445       ptrue, as_FloatRegister($src2$$reg), as_FloatRegister($src3$$reg));
 446   %}
 447   ins_pipe(pipe_slow);
 448 %}')dnl
 449 dnl
 450 // sve mls
 451 VMLS(B, B, 16)
 452 VMLS(S, H, 8)
 453 VMLS(I, S, 4)
 454 VMLS(L, D, 2)
 455 
 456 dnl
 457 dnl BINARY_OP_TRUE_PREDICATE($1,        $2,      $3,   $4,          $5  )
 458 dnl BINARY_OP_TRUE_PREDICATE(insn_name, op_name, size, min_vec_len, insn)
 459 define(`BINARY_OP_TRUE_PREDICATE', `
 460 instruct $1(vecA dst_src1, vecA src2) %{
 461   predicate(UseSVE > 0 && n->as_Vector()->length() >= $4);
 462   match(Set dst_src1 ($2 dst_src1 src2));
 463   ins_cost(SVE_COST);
 464   format %{ "$5 $dst_src1, $dst_src1, $src2\t # vector (sve) ($3)" %}
 465   ins_encode %{
 466     __ $5(as_FloatRegister($dst_src1$$reg), __ $3,
 467          ptrue, as_FloatRegister($src2$$reg));
 468   %}
 469   ins_pipe(pipe_slow);
 470 %}')dnl
 471 
 472 // sve mul
 473 BINARY_OP_TRUE_PREDICATE(vmulAB, MulVB, B, 16, sve_mul)
 474 BINARY_OP_TRUE_PREDICATE(vmulAS, MulVS, H, 8,  sve_mul)
 475 BINARY_OP_TRUE_PREDICATE(vmulAI, MulVI, S, 4,  sve_mul)
 476 BINARY_OP_TRUE_PREDICATE(vmulAL, MulVL, D, 2,  sve_mul)
 477 BINARY_OP_UNPREDICATED(vmulAF, MulVF, S, 4, sve_fmul)
 478 BINARY_OP_UNPREDICATED(vmulAD, MulVD, D, 2, sve_fmul)
 479 
 480 dnl
 481 dnl UNARY_OP_TRUE_PREDICATE($1,        $2,      $3,   $4,            $5  )
 482 dnl UNARY_OP_TRUE_PREDICATE(insn_name, op_name, size, min_vec_bytes, insn)
 483 define(`UNARY_OP_TRUE_PREDICATE', `
 484 instruct $1(vecA dst, vecA src) %{
 485   predicate(UseSVE > 0 && n->as_Vector()->length_in_bytes() >= $4);
 486   match(Set dst ($2 src));
 487   ins_cost(SVE_COST);
 488   format %{ "$5 $dst, $src\t# vector (sve) ($3)" %}
 489   ins_encode %{
 490     __ $5(as_FloatRegister($dst$$reg), __ $3,
 491          ptrue, as_FloatRegister($src$$reg));
 492   %}
 493   ins_pipe(pipe_slow);
 494 %}')dnl
 495 dnl
 496 // sve fneg
 497 UNARY_OP_TRUE_PREDICATE(vnegAF, NegVF, S, 16, sve_fneg)
 498 UNARY_OP_TRUE_PREDICATE(vnegAD, NegVD, D, 16, sve_fneg)
 499 
 500 // sve popcount vector
 501 
 502 instruct vpopcountAI(vecA dst, vecA src) %{
 503   predicate(UseSVE > 0 && n->as_Vector()->length() >= 4);
 504   match(Set dst (PopCountVI src));
 505   format %{ "sve_cnt $dst, $src\t# vector (sve) (S)\n\t"  %}
 506   ins_encode %{
 507      __ sve_cnt(as_FloatRegister($dst$$reg), __ S, ptrue, as_FloatRegister($src$$reg));
 508   %}
 509   ins_pipe(pipe_slow);
 510 %}
 511 
 512 dnl
 513 dnl REDUCE_ADD($1,        $2,      $3,      $4,      $5,   $6,        $7   )
 514 dnl REDUCE_ADD(insn_name, op_name, reg_dst, reg_src, size, elem_type, insn1)
 515 define(`REDUCE_ADD', `
 516 instruct $1($3 dst, $4 src1, vecA src2, vRegD tmp) %{
 517   predicate(UseSVE > 0 && n->in(2)->bottom_type()->is_vect()->length_in_bytes() >= 16 &&
 518             ELEMENT_SHORT_CHAR($6, n->in(2)));
 519   match(Set dst ($2 src1 src2));
 520   effect(TEMP_DEF dst, TEMP tmp);
 521   ins_cost(SVE_COST);
 522   format %{ "sve_uaddv $tmp, $src2\t# vector (sve) ($5)\n\t"
 523             "umov  $dst, $tmp, $5, 0\n\t"
 524             "$7  $dst, $dst, $src1\t # add reduction $5" %}
 525   ins_encode %{
 526     __ sve_uaddv(as_FloatRegister($tmp$$reg), __ $5,
 527          ptrue, as_FloatRegister($src2$$reg));
 528     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ $5, 0);
 529     __ $7($dst$$Register, $dst$$Register, $src1$$Register);
 530   %}
 531   ins_pipe(pipe_slow);
 532 %}')dnl
 533 dnl
 534 dnl REDUCE_ADDF($1,        $2,      $3,      $4  )
 535 dnl REDUCE_ADDF(insn_name, op_name, reg_dst, size)
 536 define(`REDUCE_ADDF', `
 537 instruct $1($3 src1_dst, vecA src2) %{
 538   predicate(UseSVE > 0 && n->in(2)->bottom_type()->is_vect()->length_in_bytes() >= 16);
 539   match(Set src1_dst ($2 src1_dst src2));
 540   ins_cost(SVE_COST);
 541   format %{ "sve_fadda $src1_dst, $src1_dst, $src2\t# vector (sve) ($4)" %}
 542   ins_encode %{
 543     __ sve_fadda(as_FloatRegister($src1_dst$$reg), __ $4,
 544          ptrue, as_FloatRegister($src2$$reg));
 545   %}
 546   ins_pipe(pipe_slow);
 547 %}')dnl
 548 dnl
 549 // sve add reduction
 550 REDUCE_ADD(reduce_addAI, AddReductionVI, iRegINoSp, iRegIorL2I, S, T_INT, addw)
 551 REDUCE_ADD(reduce_addAL, AddReductionVL, iRegLNoSp, iRegL, D, T_LONG, add)
 552 REDUCE_ADDF(reduce_addAF, AddReductionVF, vRegF, S)
 553 REDUCE_ADDF(reduce_addAD, AddReductionVD, vRegD, D)
 554 
 555 dnl
 556 dnl REDUCE_FMINMAX($1,      $2,          $3,           $4,   $5         )
 557 dnl REDUCE_FMINMAX(min_max, name_suffix, element_type, size, reg_src_dst)
 558 define(`REDUCE_FMINMAX', `
 559 instruct reduce_$1A$2($5 dst, $5 src1, vecA src2) %{
 560   predicate(UseSVE > 0 && n->in(2)->bottom_type()->is_vect()->element_basic_type() == $3 &&
 561             n->in(2)->bottom_type()->is_vect()->length_in_bytes() >= 16);
 562   match(Set dst (translit($1, `m', `M')ReductionV src1 src2));
 563   ins_cost(INSN_COST);
 564   effect(TEMP_DEF dst);
 565   format %{ "sve_f$1v $dst, $src2 # vector (sve) (S)\n\t"
 566             "f$1s $dst, $dst, $src1\t # $1 reduction $2" %}
 567   ins_encode %{
 568     __ sve_f$1v(as_FloatRegister($dst$$reg), __ $4,
 569          ptrue, as_FloatRegister($src2$$reg));
 570     __ f`$1'translit($4, `SD', `sd')(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
 571   %}
 572   ins_pipe(pipe_slow);
 573 %}')dnl
 574 // sve max reduction
 575 REDUCE_FMINMAX(max, F, T_FLOAT,  S, vRegF)
 576 REDUCE_FMINMAX(max, D, T_DOUBLE, D, vRegD)
 577 
 578 // sve min reduction
 579 REDUCE_FMINMAX(min, F, T_FLOAT,  S, vRegF)
 580 REDUCE_FMINMAX(min, D, T_DOUBLE, D, vRegD)
 581 
 582 // sve vector Math.rint, floor, ceil
 583 
 584 instruct vroundAD(vecA dst, vecA src, immI rmode) %{
 585   predicate(UseSVE > 0 && n->as_Vector()->length() >= 2 &&
 586             n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
 587   match(Set dst (RoundDoubleModeV src rmode));
 588   format %{ "sve_frint $dst, $src, $rmode\t# vector (sve) (D)" %}
 589   ins_encode %{
 590     switch ($rmode$$constant) {
 591       case RoundDoubleModeNode::rmode_rint:
 592         __ sve_frintn(as_FloatRegister($dst$$reg), __ D,
 593              ptrue, as_FloatRegister($src$$reg));
 594         break;
 595       case RoundDoubleModeNode::rmode_floor:
 596         __ sve_frintm(as_FloatRegister($dst$$reg), __ D,
 597              ptrue, as_FloatRegister($src$$reg));
 598         break;
 599       case RoundDoubleModeNode::rmode_ceil:
 600         __ sve_frintp(as_FloatRegister($dst$$reg), __ D,
 601              ptrue, as_FloatRegister($src$$reg));
 602         break;
 603     }
 604   %}
 605   ins_pipe(pipe_slow);
 606 %}
 607 dnl
 608 dnl REPLICATE($1,        $2,      $3,      $4,   $5         )
 609 dnl REPLICATE(insn_name, op_name, reg_src, size, min_vec_len)
 610 define(`REPLICATE', `
 611 instruct $1(vecA dst, $3 src) %{
 612   predicate(UseSVE > 0 && n->as_Vector()->length() >= $5);
 613   match(Set dst ($2 src));
 614   ins_cost(SVE_COST);
 615   format %{ "sve_dup  $dst, $src\t# vector (sve) ($4)" %}
 616   ins_encode %{
 617     __ sve_dup(as_FloatRegister($dst$$reg), __ $4, as_Register($src$$reg));
 618   %}
 619   ins_pipe(pipe_slow);
 620 %}')dnl
 621 dnl
 622 dnl REPLICATE_IMM8($1,        $2,      $3,       $4,   $5         )
 623 dnl REPLICATE_IMM8(insn_name, op_name, imm_type, size, min_vec_len)
 624 define(`REPLICATE_IMM8', `
 625 instruct $1(vecA dst, $3 con) %{
 626   predicate(UseSVE > 0 && n->as_Vector()->length() >= $5);
 627   match(Set dst ($2 con));
 628   ins_cost(SVE_COST);
 629   format %{ "sve_dup  $dst, $con\t# vector (sve) ($4)" %}
 630   ins_encode %{
 631     __ sve_dup(as_FloatRegister($dst$$reg), __ $4, $con$$constant);
 632   %}
 633   ins_pipe(pipe_slow);
 634 %}')dnl
 635 dnl
 636 dnl FREPLICATE($1,        $2,      $3,      $4,   $5         )
 637 dnl FREPLICATE(insn_name, op_name, reg_src, size, min_vec_len)
 638 define(`FREPLICATE', `
 639 instruct $1(vecA dst, $3 src) %{
 640   predicate(UseSVE > 0 && n->as_Vector()->length() >= $5);
 641   match(Set dst ($2 src));
 642   ins_cost(SVE_COST);
 643   format %{ "sve_cpy  $dst, $src\t# vector (sve) ($4)" %}
 644   ins_encode %{
 645     __ sve_cpy(as_FloatRegister($dst$$reg), __ $4,
 646          ptrue, as_FloatRegister($src$$reg));
 647   %}
 648   ins_pipe(pipe_slow);
 649 %}')dnl
 650 
 651 // sve replicate
 652 REPLICATE(replicateAB, ReplicateB, iRegIorL2I, B, 16)
 653 REPLICATE(replicateAS, ReplicateS, iRegIorL2I, H, 8)
 654 REPLICATE(replicateAI, ReplicateI, iRegIorL2I, S, 4)
 655 REPLICATE(replicateAL, ReplicateL, iRegL,      D, 2)
 656 
 657 REPLICATE_IMM8(replicateAB_imm8, ReplicateB, immI8,        B, 16)
 658 REPLICATE_IMM8(replicateAS_imm8, ReplicateS, immI8_shift8, H, 8)
 659 REPLICATE_IMM8(replicateAI_imm8, ReplicateI, immI8_shift8, S, 4)
 660 REPLICATE_IMM8(replicateAL_imm8, ReplicateL, immL8_shift8, D, 2)
 661 
 662 FREPLICATE(replicateAF, ReplicateF, vRegF, S, 4)
 663 FREPLICATE(replicateAD, ReplicateD, vRegD, D, 2)
 664 dnl
 665 dnl VSHIFT_TRUE_PREDICATE($1,        $2,      $3,   $4,          $5  )
 666 dnl VSHIFT_TRUE_PREDICATE(insn_name, op_name, size, min_vec_len, insn)
 667 define(`VSHIFT_TRUE_PREDICATE', `
 668 instruct $1(vecA dst, vecA shift) %{
 669   predicate(UseSVE > 0 && n->as_Vector()->length() >= $4);
 670   match(Set dst ($2 dst shift));
 671   ins_cost(SVE_COST);
 672   format %{ "$5 $dst, $dst, $shift\t# vector (sve) ($3)" %}
 673   ins_encode %{
 674     __ $5(as_FloatRegister($dst$$reg), __ $3,
 675          ptrue, as_FloatRegister($shift$$reg));
 676   %}
 677   ins_pipe(pipe_slow);
 678 %}')dnl
 679 dnl
 680 dnl VSHIFT_IMM_UNPREDICATE($1,        $2,      $3,   $4,          $5  )
 681 dnl VSHIFT_IMM_UNPREDICATE(insn_name, op_name, size, min_vec_len, insn)
 682 define(`VSHIFT_IMM_UNPREDICATE', `
 683 instruct $1(vecA dst, vecA src, immI shift) %{
 684   predicate(UseSVE > 0 && n->as_Vector()->length() >= $4);
 685   match(Set dst ($2 src shift));
 686   ins_cost(SVE_COST);
 687   format %{ "$5 $dst, $src, $shift\t# vector (sve) ($3)" %}
 688   ins_encode %{
 689     int con = (int)$shift$$constant;dnl
 690 ifelse(eval(index(`$1', `vasr') == 0 || index(`$1', `vlsr') == 0), 1, `
 691     if (con == 0) {
 692       __ sve_orr(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
 693            as_FloatRegister($src$$reg));
 694       return;
 695     }')dnl
 696 ifelse(eval(index(`$1', `vasr') == 0), 1, `ifelse(eval(index(`$3', `B') == 0), 1, `
 697     if (con >= 8) con = 7;')ifelse(eval(index(`$3', `H') == 0), 1, `
 698     if (con >= 16) con = 15;')')dnl
 699 ifelse(eval((index(`$1', `vlsl') == 0 || index(`$1', `vlsr') == 0) && (index(`$3', `B') == 0 || index(`$3', `H') == 0)), 1, `
 700     if (con >= 8) {
 701       __ sve_eor(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
 702            as_FloatRegister($src$$reg));
 703       return;
 704     }')
 705     __ $5(as_FloatRegister($dst$$reg), __ $3,
 706          as_FloatRegister($src$$reg), con);
 707   %}
 708   ins_pipe(pipe_slow);
 709 %}')dnl
 710 dnl
 711 dnl VSHIFT_COUNT($1,        $2,   $3,          $4  )
 712 dnl VSHIFT_COUNT(insn_name, size, min_vec_len, type)
 713 define(`VSHIFT_COUNT', `
 714 instruct $1(vecA dst, iRegIorL2I cnt) %{
 715   predicate(UseSVE > 0 && n->as_Vector()->length() >= $3 &&
 716             ELEMENT_SHORT_CHAR($4, n));
 717   match(Set dst (LShiftCntV cnt));
 718   match(Set dst (RShiftCntV cnt));
 719   format %{ "sve_dup $dst, $cnt\t# vector shift count (sve) ($2)" %}
 720   ins_encode %{
 721     __ sve_dup(as_FloatRegister($dst$$reg), __ $2, as_Register($cnt$$reg));
 722   %}
 723   ins_pipe(pipe_slow);
 724 %}')dnl
 725 
 726 // sve shift
 727 VSHIFT_TRUE_PREDICATE(vasrAB, RShiftVB,  B, 16, sve_asr)
 728 VSHIFT_TRUE_PREDICATE(vasrAS, RShiftVS,  H,  8, sve_asr)
 729 VSHIFT_TRUE_PREDICATE(vasrAI, RShiftVI,  S,  4, sve_asr)
 730 VSHIFT_TRUE_PREDICATE(vasrAL, RShiftVL,  D,  2, sve_asr)
 731 VSHIFT_TRUE_PREDICATE(vlslAB, LShiftVB,  B, 16, sve_lsl)
 732 VSHIFT_TRUE_PREDICATE(vlslAS, LShiftVS,  H,  8, sve_lsl)
 733 VSHIFT_TRUE_PREDICATE(vlslAI, LShiftVI,  S,  4, sve_lsl)
 734 VSHIFT_TRUE_PREDICATE(vlslAL, LShiftVL,  D,  2, sve_lsl)
 735 VSHIFT_TRUE_PREDICATE(vlsrAB, URShiftVB, B, 16, sve_lsr)
 736 VSHIFT_TRUE_PREDICATE(vlsrAS, URShiftVS, H,  8, sve_lsr)
 737 VSHIFT_TRUE_PREDICATE(vlsrAI, URShiftVI, S,  4, sve_lsr)
 738 VSHIFT_TRUE_PREDICATE(vlsrAL, URShiftVL, D,  2, sve_lsr)
 739 VSHIFT_IMM_UNPREDICATE(vasrAB_imm, RShiftVB,  B, 16, sve_asr)
 740 VSHIFT_IMM_UNPREDICATE(vasrAS_imm, RShiftVS,  H,  8, sve_asr)
 741 VSHIFT_IMM_UNPREDICATE(vasrAI_imm, RShiftVI,  S,  4, sve_asr)
 742 VSHIFT_IMM_UNPREDICATE(vasrAL_imm, RShiftVL,  D,  2, sve_asr)
 743 VSHIFT_IMM_UNPREDICATE(vlsrAB_imm, URShiftVB, B, 16, sve_lsr)
 744 VSHIFT_IMM_UNPREDICATE(vlsrAS_imm, URShiftVS, H,  8, sve_lsr)
 745 VSHIFT_IMM_UNPREDICATE(vlsrAI_imm, URShiftVI, S,  4, sve_lsr)
 746 VSHIFT_IMM_UNPREDICATE(vlsrAL_imm, URShiftVL, D,  2, sve_lsr)
 747 VSHIFT_IMM_UNPREDICATE(vlslAB_imm, LShiftVB,  B, 16, sve_lsl)
 748 VSHIFT_IMM_UNPREDICATE(vlslAS_imm, LShiftVS,  H,  8, sve_lsl)
 749 VSHIFT_IMM_UNPREDICATE(vlslAI_imm, LShiftVI,  S,  4, sve_lsl)
 750 VSHIFT_IMM_UNPREDICATE(vlslAL_imm, LShiftVL,  D,  2, sve_lsl)
 751 VSHIFT_COUNT(vshiftcntAB, B, 16, T_BYTE)
 752 VSHIFT_COUNT(vshiftcntAS, H,  8, T_SHORT)
 753 VSHIFT_COUNT(vshiftcntAI, S,  4, T_INT)
 754 VSHIFT_COUNT(vshiftcntAL, D,  2, T_LONG)
 755 
 756 // sve sqrt
 757 UNARY_OP_TRUE_PREDICATE(vsqrtAF, SqrtVF, S, 16, sve_fsqrt)
 758 UNARY_OP_TRUE_PREDICATE(vsqrtAD, SqrtVD, D, 16, sve_fsqrt)
 759 
 760 // sve sub
 761 BINARY_OP_UNPREDICATED(vsubAB, SubVB, B, 16, sve_sub)
 762 BINARY_OP_UNPREDICATED(vsubAS, SubVS, H, 8, sve_sub)
 763 BINARY_OP_UNPREDICATED(vsubAI, SubVI, S, 4, sve_sub)
 764 BINARY_OP_UNPREDICATED(vsubAL, SubVL, D, 2, sve_sub)
 765 BINARY_OP_UNPREDICATED(vsubAF, SubVF, S, 4, sve_fsub)
 766 BINARY_OP_UNPREDICATED(vsubAD, SubVD, D, 2, sve_fsub)
 767