1 //
   2 // Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2020, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit general purpose registers
 439 reg_class all_reg32(
 440     R0,
 441     R1,
 442     R2,
 443     R3,
 444     R4,
 445     R5,
 446     R6,
 447     R7,
 448     R10,
 449     R11,
 450     R12,
 451     R13,
 452     R14,
 453     R15,
 454     R16,
 455     R17,
 456     R18,
 457     R19,
 458     R20,
 459     R21,
 460     R22,
 461     R23,
 462     R24,
 463     R25,
 464     R26,
 465     R27,
 466     R28,
 467     R29,
 468     R30,
 469     R31
 470 );
 471 
 472 
 473 // Class for all 32 bit integer registers (excluding SP which
 474 // will never be used as an integer register)
 475 reg_class any_reg32 %{
 476   return _ANY_REG32_mask;
 477 %}
 478 
 479 // Singleton class for R0 int register
 480 reg_class int_r0_reg(R0);
 481 
 482 // Singleton class for R2 int register
 483 reg_class int_r2_reg(R2);
 484 
 485 // Singleton class for R3 int register
 486 reg_class int_r3_reg(R3);
 487 
 488 // Singleton class for R4 int register
 489 reg_class int_r4_reg(R4);
 490 
 491 // Singleton class for R31 int register
 492 reg_class int_r31_reg(R31);
 493 
 494 // Class for all 64 bit general purpose registers
 495 reg_class all_reg(
 496     R0, R0_H,
 497     R1, R1_H,
 498     R2, R2_H,
 499     R3, R3_H,
 500     R4, R4_H,
 501     R5, R5_H,
 502     R6, R6_H,
 503     R7, R7_H,
 504     R10, R10_H,
 505     R11, R11_H,
 506     R12, R12_H,
 507     R13, R13_H,
 508     R14, R14_H,
 509     R15, R15_H,
 510     R16, R16_H,
 511     R17, R17_H,
 512     R18, R18_H,
 513     R19, R19_H,
 514     R20, R20_H,
 515     R21, R21_H,
 516     R22, R22_H,
 517     R23, R23_H,
 518     R24, R24_H,
 519     R25, R25_H,
 520     R26, R26_H,
 521     R27, R27_H,
 522     R28, R28_H,
 523     R29, R29_H,
 524     R30, R30_H,
 525     R31, R31_H
 526 );
 527 
 528 // Class for all long integer registers (including SP)
 529 reg_class any_reg %{
 530   return _ANY_REG_mask;
 531 %}
 532 
 533 // Class for non-allocatable 32 bit registers
 534 reg_class non_allocatable_reg32(
 535     R28,                        // thread
 536     R30,                        // lr
 537     R31                         // sp
 538 );
 539 
 540 // Class for non-allocatable 64 bit registers
 541 reg_class non_allocatable_reg(
 542     R28, R28_H,                 // thread
 543     R30, R30_H,                 // lr
 544     R31, R31_H                  // sp
 545 );
 546 
 547 // Class for all non-special integer registers
 548 reg_class no_special_reg32 %{
 549   return _NO_SPECIAL_REG32_mask;
 550 %}
 551 
 552 // Class for all non-special long integer registers
 553 reg_class no_special_reg %{
 554   return _NO_SPECIAL_REG_mask;
 555 %}
 556 
 557 // Class for 64 bit register r0
 558 reg_class r0_reg(
 559     R0, R0_H
 560 );
 561 
 562 // Class for 64 bit register r1
 563 reg_class r1_reg(
 564     R1, R1_H
 565 );
 566 
 567 // Class for 64 bit register r2
 568 reg_class r2_reg(
 569     R2, R2_H
 570 );
 571 
 572 // Class for 64 bit register r3
 573 reg_class r3_reg(
 574     R3, R3_H
 575 );
 576 
 577 // Class for 64 bit register r4
 578 reg_class r4_reg(
 579     R4, R4_H
 580 );
 581 
 582 // Class for 64 bit register r5
 583 reg_class r5_reg(
 584     R5, R5_H
 585 );
 586 
 587 // Class for 64 bit register r10
 588 reg_class r10_reg(
 589     R10, R10_H
 590 );
 591 
 592 // Class for 64 bit register r11
 593 reg_class r11_reg(
 594     R11, R11_H
 595 );
 596 
 597 // Class for method register
 598 reg_class method_reg(
 599     R12, R12_H
 600 );
 601 
 602 // Class for heapbase register
 603 reg_class heapbase_reg(
 604     R27, R27_H
 605 );
 606 
 607 // Class for thread register
 608 reg_class thread_reg(
 609     R28, R28_H
 610 );
 611 
 612 // Class for frame pointer register
 613 reg_class fp_reg(
 614     R29, R29_H
 615 );
 616 
 617 // Class for link register
 618 reg_class lr_reg(
 619     R30, R30_H
 620 );
 621 
 622 // Class for long sp register
 623 reg_class sp_reg(
 624   R31, R31_H
 625 );
 626 
 627 // Class for all pointer registers
 628 reg_class ptr_reg %{
 629   return _PTR_REG_mask;
 630 %}
 631 
 632 // Class for all non_special pointer registers
 633 reg_class no_special_ptr_reg %{
 634   return _NO_SPECIAL_PTR_REG_mask;
 635 %}
 636 
 637 // Class for all float registers
 638 reg_class float_reg(
 639     V0,
 640     V1,
 641     V2,
 642     V3,
 643     V4,
 644     V5,
 645     V6,
 646     V7,
 647     V8,
 648     V9,
 649     V10,
 650     V11,
 651     V12,
 652     V13,
 653     V14,
 654     V15,
 655     V16,
 656     V17,
 657     V18,
 658     V19,
 659     V20,
 660     V21,
 661     V22,
 662     V23,
 663     V24,
 664     V25,
 665     V26,
 666     V27,
 667     V28,
 668     V29,
 669     V30,
 670     V31
 671 );
 672 
 673 // Double precision float registers have virtual `high halves' that
 674 // are needed by the allocator.
 675 // Class for all double registers
 676 reg_class double_reg(
 677     V0, V0_H,
 678     V1, V1_H,
 679     V2, V2_H,
 680     V3, V3_H,
 681     V4, V4_H,
 682     V5, V5_H,
 683     V6, V6_H,
 684     V7, V7_H,
 685     V8, V8_H,
 686     V9, V9_H,
 687     V10, V10_H,
 688     V11, V11_H,
 689     V12, V12_H,
 690     V13, V13_H,
 691     V14, V14_H,
 692     V15, V15_H,
 693     V16, V16_H,
 694     V17, V17_H,
 695     V18, V18_H,
 696     V19, V19_H,
 697     V20, V20_H,
 698     V21, V21_H,
 699     V22, V22_H,
 700     V23, V23_H,
 701     V24, V24_H,
 702     V25, V25_H,
 703     V26, V26_H,
 704     V27, V27_H,
 705     V28, V28_H,
 706     V29, V29_H,
 707     V30, V30_H,
 708     V31, V31_H
 709 );
 710 
 711 // Class for all 64bit vector registers
 712 reg_class vectord_reg(
 713     V0, V0_H,
 714     V1, V1_H,
 715     V2, V2_H,
 716     V3, V3_H,
 717     V4, V4_H,
 718     V5, V5_H,
 719     V6, V6_H,
 720     V7, V7_H,
 721     V8, V8_H,
 722     V9, V9_H,
 723     V10, V10_H,
 724     V11, V11_H,
 725     V12, V12_H,
 726     V13, V13_H,
 727     V14, V14_H,
 728     V15, V15_H,
 729     V16, V16_H,
 730     V17, V17_H,
 731     V18, V18_H,
 732     V19, V19_H,
 733     V20, V20_H,
 734     V21, V21_H,
 735     V22, V22_H,
 736     V23, V23_H,
 737     V24, V24_H,
 738     V25, V25_H,
 739     V26, V26_H,
 740     V27, V27_H,
 741     V28, V28_H,
 742     V29, V29_H,
 743     V30, V30_H,
 744     V31, V31_H
 745 );
 746 
 747 // Class for all 128bit vector registers
 748 reg_class vectorx_reg(
 749     V0, V0_H, V0_J, V0_K,
 750     V1, V1_H, V1_J, V1_K,
 751     V2, V2_H, V2_J, V2_K,
 752     V3, V3_H, V3_J, V3_K,
 753     V4, V4_H, V4_J, V4_K,
 754     V5, V5_H, V5_J, V5_K,
 755     V6, V6_H, V6_J, V6_K,
 756     V7, V7_H, V7_J, V7_K,
 757     V8, V8_H, V8_J, V8_K,
 758     V9, V9_H, V9_J, V9_K,
 759     V10, V10_H, V10_J, V10_K,
 760     V11, V11_H, V11_J, V11_K,
 761     V12, V12_H, V12_J, V12_K,
 762     V13, V13_H, V13_J, V13_K,
 763     V14, V14_H, V14_J, V14_K,
 764     V15, V15_H, V15_J, V15_K,
 765     V16, V16_H, V16_J, V16_K,
 766     V17, V17_H, V17_J, V17_K,
 767     V18, V18_H, V18_J, V18_K,
 768     V19, V19_H, V19_J, V19_K,
 769     V20, V20_H, V20_J, V20_K,
 770     V21, V21_H, V21_J, V21_K,
 771     V22, V22_H, V22_J, V22_K,
 772     V23, V23_H, V23_J, V23_K,
 773     V24, V24_H, V24_J, V24_K,
 774     V25, V25_H, V25_J, V25_K,
 775     V26, V26_H, V26_J, V26_K,
 776     V27, V27_H, V27_J, V27_K,
 777     V28, V28_H, V28_J, V28_K,
 778     V29, V29_H, V29_J, V29_K,
 779     V30, V30_H, V30_J, V30_K,
 780     V31, V31_H, V31_J, V31_K
 781 );
 782 
 783 // Class for 128 bit register v0
 784 reg_class v0_reg(
 785     V0, V0_H
 786 );
 787 
 788 // Class for 128 bit register v1
 789 reg_class v1_reg(
 790     V1, V1_H
 791 );
 792 
 793 // Class for 128 bit register v2
 794 reg_class v2_reg(
 795     V2, V2_H
 796 );
 797 
 798 // Class for 128 bit register v3
 799 reg_class v3_reg(
 800     V3, V3_H
 801 );
 802 
 803 // Class for 128 bit register v4
 804 reg_class v4_reg(
 805     V4, V4_H
 806 );
 807 
 808 // Class for 128 bit register v5
 809 reg_class v5_reg(
 810     V5, V5_H
 811 );
 812 
 813 // Class for 128 bit register v6
 814 reg_class v6_reg(
 815     V6, V6_H
 816 );
 817 
 818 // Class for 128 bit register v7
 819 reg_class v7_reg(
 820     V7, V7_H
 821 );
 822 
 823 // Class for 128 bit register v8
 824 reg_class v8_reg(
 825     V8, V8_H
 826 );
 827 
 828 // Class for 128 bit register v9
 829 reg_class v9_reg(
 830     V9, V9_H
 831 );
 832 
 833 // Class for 128 bit register v10
 834 reg_class v10_reg(
 835     V10, V10_H
 836 );
 837 
 838 // Class for 128 bit register v11
 839 reg_class v11_reg(
 840     V11, V11_H
 841 );
 842 
 843 // Class for 128 bit register v12
 844 reg_class v12_reg(
 845     V12, V12_H
 846 );
 847 
 848 // Class for 128 bit register v13
 849 reg_class v13_reg(
 850     V13, V13_H
 851 );
 852 
 853 // Class for 128 bit register v14
 854 reg_class v14_reg(
 855     V14, V14_H
 856 );
 857 
 858 // Class for 128 bit register v15
 859 reg_class v15_reg(
 860     V15, V15_H
 861 );
 862 
 863 // Class for 128 bit register v16
 864 reg_class v16_reg(
 865     V16, V16_H
 866 );
 867 
 868 // Class for 128 bit register v17
 869 reg_class v17_reg(
 870     V17, V17_H
 871 );
 872 
 873 // Class for 128 bit register v18
 874 reg_class v18_reg(
 875     V18, V18_H
 876 );
 877 
 878 // Class for 128 bit register v19
 879 reg_class v19_reg(
 880     V19, V19_H
 881 );
 882 
 883 // Class for 128 bit register v20
 884 reg_class v20_reg(
 885     V20, V20_H
 886 );
 887 
 888 // Class for 128 bit register v21
 889 reg_class v21_reg(
 890     V21, V21_H
 891 );
 892 
 893 // Class for 128 bit register v22
 894 reg_class v22_reg(
 895     V22, V22_H
 896 );
 897 
 898 // Class for 128 bit register v23
 899 reg_class v23_reg(
 900     V23, V23_H
 901 );
 902 
 903 // Class for 128 bit register v24
 904 reg_class v24_reg(
 905     V24, V24_H
 906 );
 907 
 908 // Class for 128 bit register v25
 909 reg_class v25_reg(
 910     V25, V25_H
 911 );
 912 
 913 // Class for 128 bit register v26
 914 reg_class v26_reg(
 915     V26, V26_H
 916 );
 917 
 918 // Class for 128 bit register v27
 919 reg_class v27_reg(
 920     V27, V27_H
 921 );
 922 
 923 // Class for 128 bit register v28
 924 reg_class v28_reg(
 925     V28, V28_H
 926 );
 927 
 928 // Class for 128 bit register v29
 929 reg_class v29_reg(
 930     V29, V29_H
 931 );
 932 
 933 // Class for 128 bit register v30
 934 reg_class v30_reg(
 935     V30, V30_H
 936 );
 937 
 938 // Class for 128 bit register v31
 939 reg_class v31_reg(
 940     V31, V31_H
 941 );
 942 
 943 // Singleton class for condition codes
 944 reg_class int_flags(RFLAGS);
 945 
 946 %}
 947 
 948 //----------DEFINITION BLOCK---------------------------------------------------
 949 // Define name --> value mappings to inform the ADLC of an integer valued name
 950 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 951 // Format:
 952 //        int_def  <name>         ( <int_value>, <expression>);
 953 // Generated Code in ad_<arch>.hpp
 954 //        #define  <name>   (<expression>)
 955 //        // value == <int_value>
 956 // Generated code in ad_<arch>.cpp adlc_verification()
 957 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 958 //
 959 
 960 // we follow the ppc-aix port in using a simple cost model which ranks
 961 // register operations as cheap, memory ops as more expensive and
 962 // branches as most expensive. the first two have a low as well as a
 963 // normal cost. huge cost appears to be a way of saying don't do
 964 // something
 965 
 966 definitions %{
 967   // The default cost (of a register move instruction).
 968   int_def INSN_COST            (    100,     100);
 969   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 970   int_def CALL_COST            (    200,     2 * INSN_COST);
 971   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 972 %}
 973 
 974 
 975 //----------SOURCE BLOCK-------------------------------------------------------
 976 // This is a block of C++ code which provides values, functions, and
 977 // definitions necessary in the rest of the architecture description
 978 
 979 source_hpp %{
 980 
 981 #include "asm/macroAssembler.hpp"
 982 #include "gc/shared/cardTable.hpp"
 983 #include "gc/shared/cardTableBarrierSet.hpp"
 984 #include "gc/shared/collectedHeap.hpp"
 985 #include "opto/addnode.hpp"
 986 #include "opto/convertnode.hpp"
 987 
 988 extern RegMask _ANY_REG32_mask;
 989 extern RegMask _ANY_REG_mask;
 990 extern RegMask _PTR_REG_mask;
 991 extern RegMask _NO_SPECIAL_REG32_mask;
 992 extern RegMask _NO_SPECIAL_REG_mask;
 993 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 994 
 995 class CallStubImpl {
 996 
 997   //--------------------------------------------------------------
 998   //---<  Used for optimization in Compile::shorten_branches  >---
 999   //--------------------------------------------------------------
1000 
1001  public:
1002   // Size of call trampoline stub.
1003   static uint size_call_trampoline() {
1004     return 0; // no call trampolines on this platform
1005   }
1006 
1007   // number of relocations needed by a call trampoline stub
1008   static uint reloc_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 };
1012 
1013 class HandlerImpl {
1014 
1015  public:
1016 
1017   static int emit_exception_handler(CodeBuffer &cbuf);
1018   static int emit_deopt_handler(CodeBuffer& cbuf);
1019 
1020   static uint size_exception_handler() {
1021     return MacroAssembler::far_branch_size();
1022   }
1023 
1024   static uint size_deopt_handler() {
1025     // count one adr and one far branch instruction
1026     return 4 * NativeInstruction::instruction_size;
1027   }
1028 };
1029 
1030  bool is_CAS(int opcode, bool maybe_volatile);
1031 
1032   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1033 
1034   bool unnecessary_acquire(const Node *barrier);
1035   bool needs_acquiring_load(const Node *load);
1036 
1037   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1038 
1039   bool unnecessary_release(const Node *barrier);
1040   bool unnecessary_volatile(const Node *barrier);
1041   bool needs_releasing_store(const Node *store);
1042 
1043   // predicate controlling translation of CompareAndSwapX
1044   bool needs_acquiring_load_exclusive(const Node *load);
1045 
1046   // predicate controlling addressing modes
1047   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1048 %}
1049 
1050 source %{
1051 
1052   // Derived RegMask with conditionally allocatable registers
1053 
1054   RegMask _ANY_REG32_mask;
1055   RegMask _ANY_REG_mask;
1056   RegMask _PTR_REG_mask;
1057   RegMask _NO_SPECIAL_REG32_mask;
1058   RegMask _NO_SPECIAL_REG_mask;
1059   RegMask _NO_SPECIAL_PTR_REG_mask;
1060 
1061   void reg_mask_init() {
1062     // We derive below RegMask(s) from the ones which are auto-generated from
1063     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
1064     // registers conditionally reserved.
1065 
1066     _ANY_REG32_mask = _ALL_REG32_mask;
1067     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
1068 
1069     _ANY_REG_mask = _ALL_REG_mask;
1070 
1071     _PTR_REG_mask = _ALL_REG_mask;
1072 
1073     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
1074     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
1075 
1076     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
1077     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
1078 
1079     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
1080     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
1081 
1082     // r27 is not allocatable when compressed oops is on, compressed klass
1083     // pointers doesn't use r27 after JDK-8234794
1084     if (UseCompressedOops) {
1085       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
1086       _NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
1087       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
1088     }
1089 
1090     // r29 is not allocatable when PreserveFramePointer is on
1091     if (PreserveFramePointer) {
1092       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
1093       _NO_SPECIAL_REG_mask.SUBTRACT(_FP_REG_mask);
1094       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_FP_REG_mask);
1095     }
1096   }
1097 
1098   // Optimizaton of volatile gets and puts
1099   // -------------------------------------
1100   //
1101   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1102   // use to implement volatile reads and writes. For a volatile read
1103   // we simply need
1104   //
1105   //   ldar<x>
1106   //
1107   // and for a volatile write we need
1108   //
1109   //   stlr<x>
1110   //
1111   // Alternatively, we can implement them by pairing a normal
1112   // load/store with a memory barrier. For a volatile read we need
1113   //
1114   //   ldr<x>
1115   //   dmb ishld
1116   //
1117   // for a volatile write
1118   //
1119   //   dmb ish
1120   //   str<x>
1121   //   dmb ish
1122   //
1123   // We can also use ldaxr and stlxr to implement compare and swap CAS
1124   // sequences. These are normally translated to an instruction
1125   // sequence like the following
1126   //
1127   //   dmb      ish
1128   // retry:
1129   //   ldxr<x>   rval raddr
1130   //   cmp       rval rold
1131   //   b.ne done
1132   //   stlxr<x>  rval, rnew, rold
1133   //   cbnz      rval retry
1134   // done:
1135   //   cset      r0, eq
1136   //   dmb ishld
1137   //
1138   // Note that the exclusive store is already using an stlxr
1139   // instruction. That is required to ensure visibility to other
1140   // threads of the exclusive write (assuming it succeeds) before that
1141   // of any subsequent writes.
1142   //
1143   // The following instruction sequence is an improvement on the above
1144   //
1145   // retry:
1146   //   ldaxr<x>  rval raddr
1147   //   cmp       rval rold
1148   //   b.ne done
1149   //   stlxr<x>  rval, rnew, rold
1150   //   cbnz      rval retry
1151   // done:
1152   //   cset      r0, eq
1153   //
1154   // We don't need the leading dmb ish since the stlxr guarantees
1155   // visibility of prior writes in the case that the swap is
1156   // successful. Crucially we don't have to worry about the case where
1157   // the swap is not successful since no valid program should be
1158   // relying on visibility of prior changes by the attempting thread
1159   // in the case where the CAS fails.
1160   //
1161   // Similarly, we don't need the trailing dmb ishld if we substitute
1162   // an ldaxr instruction since that will provide all the guarantees we
1163   // require regarding observation of changes made by other threads
1164   // before any change to the CAS address observed by the load.
1165   //
1166   // In order to generate the desired instruction sequence we need to
1167   // be able to identify specific 'signature' ideal graph node
1168   // sequences which i) occur as a translation of a volatile reads or
1169   // writes or CAS operations and ii) do not occur through any other
1170   // translation or graph transformation. We can then provide
1171   // alternative aldc matching rules which translate these node
1172   // sequences to the desired machine code sequences. Selection of the
1173   // alternative rules can be implemented by predicates which identify
1174   // the relevant node sequences.
1175   //
1176   // The ideal graph generator translates a volatile read to the node
1177   // sequence
1178   //
1179   //   LoadX[mo_acquire]
1180   //   MemBarAcquire
1181   //
1182   // As a special case when using the compressed oops optimization we
1183   // may also see this variant
1184   //
1185   //   LoadN[mo_acquire]
1186   //   DecodeN
1187   //   MemBarAcquire
1188   //
1189   // A volatile write is translated to the node sequence
1190   //
1191   //   MemBarRelease
1192   //   StoreX[mo_release] {CardMark}-optional
1193   //   MemBarVolatile
1194   //
1195   // n.b. the above node patterns are generated with a strict
1196   // 'signature' configuration of input and output dependencies (see
1197   // the predicates below for exact details). The card mark may be as
1198   // simple as a few extra nodes or, in a few GC configurations, may
1199   // include more complex control flow between the leading and
1200   // trailing memory barriers. However, whatever the card mark
1201   // configuration these signatures are unique to translated volatile
1202   // reads/stores -- they will not appear as a result of any other
1203   // bytecode translation or inlining nor as a consequence of
1204   // optimizing transforms.
1205   //
1206   // We also want to catch inlined unsafe volatile gets and puts and
1207   // be able to implement them using either ldar<x>/stlr<x> or some
1208   // combination of ldr<x>/stlr<x> and dmb instructions.
1209   //
1210   // Inlined unsafe volatiles puts manifest as a minor variant of the
1211   // normal volatile put node sequence containing an extra cpuorder
1212   // membar
1213   //
1214   //   MemBarRelease
1215   //   MemBarCPUOrder
1216   //   StoreX[mo_release] {CardMark}-optional
1217   //   MemBarCPUOrder
1218   //   MemBarVolatile
1219   //
1220   // n.b. as an aside, a cpuorder membar is not itself subject to
1221   // matching and translation by adlc rules.  However, the rule
1222   // predicates need to detect its presence in order to correctly
1223   // select the desired adlc rules.
1224   //
1225   // Inlined unsafe volatile gets manifest as a slightly different
1226   // node sequence to a normal volatile get because of the
1227   // introduction of some CPUOrder memory barriers to bracket the
1228   // Load. However, but the same basic skeleton of a LoadX feeding a
1229   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1230   // present
1231   //
1232   //   MemBarCPUOrder
1233   //        ||       \\
1234   //   MemBarCPUOrder LoadX[mo_acquire]
1235   //        ||            |
1236   //        ||       {DecodeN} optional
1237   //        ||       /
1238   //     MemBarAcquire
1239   //
1240   // In this case the acquire membar does not directly depend on the
1241   // load. However, we can be sure that the load is generated from an
1242   // inlined unsafe volatile get if we see it dependent on this unique
1243   // sequence of membar nodes. Similarly, given an acquire membar we
1244   // can know that it was added because of an inlined unsafe volatile
1245   // get if it is fed and feeds a cpuorder membar and if its feed
1246   // membar also feeds an acquiring load.
1247   //
1248   // Finally an inlined (Unsafe) CAS operation is translated to the
1249   // following ideal graph
1250   //
1251   //   MemBarRelease
1252   //   MemBarCPUOrder
1253   //   CompareAndSwapX {CardMark}-optional
1254   //   MemBarCPUOrder
1255   //   MemBarAcquire
1256   //
1257   // So, where we can identify these volatile read and write
1258   // signatures we can choose to plant either of the above two code
1259   // sequences. For a volatile read we can simply plant a normal
1260   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1261   // also choose to inhibit translation of the MemBarAcquire and
1262   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1263   //
1264   // When we recognise a volatile store signature we can choose to
1265   // plant at a dmb ish as a translation for the MemBarRelease, a
1266   // normal str<x> and then a dmb ish for the MemBarVolatile.
1267   // Alternatively, we can inhibit translation of the MemBarRelease
1268   // and MemBarVolatile and instead plant a simple stlr<x>
1269   // instruction.
1270   //
1271   // when we recognise a CAS signature we can choose to plant a dmb
1272   // ish as a translation for the MemBarRelease, the conventional
1273   // macro-instruction sequence for the CompareAndSwap node (which
1274   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1275   // Alternatively, we can elide generation of the dmb instructions
1276   // and plant the alternative CompareAndSwap macro-instruction
1277   // sequence (which uses ldaxr<x>).
1278   //
1279   // Of course, the above only applies when we see these signature
1280   // configurations. We still want to plant dmb instructions in any
1281   // other cases where we may see a MemBarAcquire, MemBarRelease or
1282   // MemBarVolatile. For example, at the end of a constructor which
1283   // writes final/volatile fields we will see a MemBarRelease
1284   // instruction and this needs a 'dmb ish' lest we risk the
1285   // constructed object being visible without making the
1286   // final/volatile field writes visible.
1287   //
1288   // n.b. the translation rules below which rely on detection of the
1289   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1290   // If we see anything other than the signature configurations we
1291   // always just translate the loads and stores to ldr<x> and str<x>
1292   // and translate acquire, release and volatile membars to the
1293   // relevant dmb instructions.
1294   //
1295 
1296   // is_CAS(int opcode, bool maybe_volatile)
1297   //
1298   // return true if opcode is one of the possible CompareAndSwapX
1299   // values otherwise false.
1300 
1301   bool is_CAS(int opcode, bool maybe_volatile)
1302   {
1303     switch(opcode) {
1304       // We handle these
1305     case Op_CompareAndSwapI:
1306     case Op_CompareAndSwapL:
1307     case Op_CompareAndSwapP:
1308     case Op_CompareAndSwapN:
1309     case Op_ShenandoahCompareAndSwapP:
1310     case Op_ShenandoahCompareAndSwapN:
1311     case Op_CompareAndSwapB:
1312     case Op_CompareAndSwapS:
1313     case Op_GetAndSetI:
1314     case Op_GetAndSetL:
1315     case Op_GetAndSetP:
1316     case Op_GetAndSetN:
1317     case Op_GetAndAddI:
1318     case Op_GetAndAddL:
1319       return true;
1320     case Op_CompareAndExchangeI:
1321     case Op_CompareAndExchangeN:
1322     case Op_CompareAndExchangeB:
1323     case Op_CompareAndExchangeS:
1324     case Op_CompareAndExchangeL:
1325     case Op_CompareAndExchangeP:
1326     case Op_WeakCompareAndSwapB:
1327     case Op_WeakCompareAndSwapS:
1328     case Op_WeakCompareAndSwapI:
1329     case Op_WeakCompareAndSwapL:
1330     case Op_WeakCompareAndSwapP:
1331     case Op_WeakCompareAndSwapN:
1332     case Op_ShenandoahWeakCompareAndSwapP:
1333     case Op_ShenandoahWeakCompareAndSwapN:
1334     case Op_ShenandoahCompareAndExchangeP:
1335     case Op_ShenandoahCompareAndExchangeN:
1336       return maybe_volatile;
1337     default:
1338       return false;
1339     }
1340   }
1341 
1342   // helper to determine the maximum number of Phi nodes we may need to
1343   // traverse when searching from a card mark membar for the merge mem
1344   // feeding a trailing membar or vice versa
1345 
1346 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1347 
1348 bool unnecessary_acquire(const Node *barrier)
1349 {
1350   assert(barrier->is_MemBar(), "expecting a membar");
1351 
1352   if (UseBarriersForVolatile) {
1353     // we need to plant a dmb
1354     return false;
1355   }
1356 
1357   MemBarNode* mb = barrier->as_MemBar();
1358 
1359   if (mb->trailing_load()) {
1360     return true;
1361   }
1362 
1363   if (mb->trailing_load_store()) {
1364     Node* load_store = mb->in(MemBarNode::Precedent);
1365     assert(load_store->is_LoadStore(), "unexpected graph shape");
1366     return is_CAS(load_store->Opcode(), true);
1367   }
1368 
1369   return false;
1370 }
1371 
1372 bool needs_acquiring_load(const Node *n)
1373 {
1374   assert(n->is_Load(), "expecting a load");
1375   if (UseBarriersForVolatile) {
1376     // we use a normal load and a dmb
1377     return false;
1378   }
1379 
1380   LoadNode *ld = n->as_Load();
1381 
1382   return ld->is_acquire();
1383 }
1384 
1385 bool unnecessary_release(const Node *n)
1386 {
1387   assert((n->is_MemBar() &&
1388           n->Opcode() == Op_MemBarRelease),
1389          "expecting a release membar");
1390 
1391   if (UseBarriersForVolatile) {
1392     // we need to plant a dmb
1393     return false;
1394   }
1395 
1396   MemBarNode *barrier = n->as_MemBar();
1397   if (!barrier->leading()) {
1398     return false;
1399   } else {
1400     Node* trailing = barrier->trailing_membar();
1401     MemBarNode* trailing_mb = trailing->as_MemBar();
1402     assert(trailing_mb->trailing(), "Not a trailing membar?");
1403     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1404 
1405     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1406     if (mem->is_Store()) {
1407       assert(mem->as_Store()->is_release(), "");
1408       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1409       return true;
1410     } else {
1411       assert(mem->is_LoadStore(), "");
1412       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1413       return is_CAS(mem->Opcode(), true);
1414     }
1415   }
1416   return false;
1417 }
1418 
1419 bool unnecessary_volatile(const Node *n)
1420 {
1421   // assert n->is_MemBar();
1422   if (UseBarriersForVolatile) {
1423     // we need to plant a dmb
1424     return false;
1425   }
1426 
1427   MemBarNode *mbvol = n->as_MemBar();
1428 
1429   bool release = mbvol->trailing_store();
1430   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1431 #ifdef ASSERT
1432   if (release) {
1433     Node* leading = mbvol->leading_membar();
1434     assert(leading->Opcode() == Op_MemBarRelease, "");
1435     assert(leading->as_MemBar()->leading_store(), "");
1436     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1437   }
1438 #endif
1439 
1440   return release;
1441 }
1442 
1443 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1444 
1445 bool needs_releasing_store(const Node *n)
1446 {
1447   // assert n->is_Store();
1448   if (UseBarriersForVolatile) {
1449     // we use a normal store and dmb combination
1450     return false;
1451   }
1452 
1453   StoreNode *st = n->as_Store();
1454 
1455   return st->trailing_membar() != NULL;
1456 }
1457 
1458 // predicate controlling translation of CAS
1459 //
1460 // returns true if CAS needs to use an acquiring load otherwise false
1461 
1462 bool needs_acquiring_load_exclusive(const Node *n)
1463 {
1464   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1465   if (UseBarriersForVolatile) {
1466     return false;
1467   }
1468 
1469   LoadStoreNode* ldst = n->as_LoadStore();
1470   if (is_CAS(n->Opcode(), false)) {
1471     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1472   } else {
1473     return ldst->trailing_membar() != NULL;
1474   }
1475 
1476   // so we can just return true here
1477   return true;
1478 }
1479 
1480 #define __ _masm.
1481 
1482 // advance declarations for helper functions to convert register
1483 // indices to register objects
1484 
1485 // the ad file has to provide implementations of certain methods
1486 // expected by the generic code
1487 //
1488 // REQUIRED FUNCTIONALITY
1489 
1490 //=============================================================================
1491 
1492 // !!!!! Special hack to get all types of calls to specify the byte offset
1493 //       from the start of the call to the point where the return address
1494 //       will point.
1495 
1496 int MachCallStaticJavaNode::ret_addr_offset()
1497 {
1498   // call should be a simple bl
1499   int off = 4;
1500   return off;
1501 }
1502 
1503 int MachCallDynamicJavaNode::ret_addr_offset()
1504 {
1505   return 16; // movz, movk, movk, bl
1506 }
1507 
1508 int MachCallRuntimeNode::ret_addr_offset() {
1509   // for generated stubs the call will be
1510   //   far_call(addr)
1511   // for real runtime callouts it will be six instructions
1512   // see aarch64_enc_java_to_runtime
1513   //   adr(rscratch2, retaddr)
1514   //   lea(rscratch1, RuntimeAddress(addr)
1515   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1516   //   blr(rscratch1)
1517   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1518   if (cb) {
1519     return MacroAssembler::far_branch_size();
1520   } else {
1521     return 6 * NativeInstruction::instruction_size;
1522   }
1523 }
1524 
1525 // Indicate if the safepoint node needs the polling page as an input
1526 
1527 // the shared code plants the oop data at the start of the generated
1528 // code for the safepoint node and that needs ot be at the load
1529 // instruction itself. so we cannot plant a mov of the safepoint poll
1530 // address followed by a load. setting this to true means the mov is
1531 // scheduled as a prior instruction. that's better for scheduling
1532 // anyway.
1533 
1534 bool SafePointNode::needs_polling_address_input()
1535 {
1536   return true;
1537 }
1538 
1539 //=============================================================================
1540 
1541 #ifndef PRODUCT
1542 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1543   st->print("BREAKPOINT");
1544 }
1545 #endif
1546 
1547 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1548   C2_MacroAssembler _masm(&cbuf);
1549   __ brk(0);
1550 }
1551 
1552 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1553   return MachNode::size(ra_);
1554 }
1555 
1556 //=============================================================================
1557 
1558 #ifndef PRODUCT
1559   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1560     st->print("nop \t# %d bytes pad for loops and calls", _count);
1561   }
1562 #endif
1563 
1564   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1565     C2_MacroAssembler _masm(&cbuf);
1566     for (int i = 0; i < _count; i++) {
1567       __ nop();
1568     }
1569   }
1570 
1571   uint MachNopNode::size(PhaseRegAlloc*) const {
1572     return _count * NativeInstruction::instruction_size;
1573   }
1574 
1575 //=============================================================================
1576 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1577 
1578 int ConstantTable::calculate_table_base_offset() const {
1579   return 0;  // absolute addressing, no offset
1580 }
1581 
1582 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1583 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1584   ShouldNotReachHere();
1585 }
1586 
1587 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1588   // Empty encoding
1589 }
1590 
1591 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1592   return 0;
1593 }
1594 
1595 #ifndef PRODUCT
1596 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1597   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1598 }
1599 #endif
1600 
1601 #ifndef PRODUCT
1602 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1603   Compile* C = ra_->C;
1604 
1605   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1606 
1607   if (C->output()->need_stack_bang(framesize))
1608     st->print("# stack bang size=%d\n\t", framesize);
1609 
1610   if (framesize < ((1 << 9) + 2 * wordSize)) {
1611     st->print("sub  sp, sp, #%d\n\t", framesize);
1612     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1613     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1614   } else {
1615     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1616     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1617     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1618     st->print("sub  sp, sp, rscratch1");
1619   }
1620 }
1621 #endif
1622 
1623 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1624   Compile* C = ra_->C;
1625   C2_MacroAssembler _masm(&cbuf);
1626 
1627   // n.b. frame size includes space for return pc and rfp
1628   const long framesize = C->output()->frame_size_in_bytes();
1629   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1630 
1631   // insert a nop at the start of the prolog so we can patch in a
1632   // branch if we need to invalidate the method later
1633   __ nop();
1634 
1635   if (C->clinit_barrier_on_entry()) {
1636     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
1637 
1638     Label L_skip_barrier;
1639 
1640     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
1641     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1642     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1643     __ bind(L_skip_barrier);
1644   }
1645 
1646   int bangsize = C->output()->bang_size_in_bytes();
1647   if (C->output()->need_stack_bang(bangsize) && UseStackBanging)
1648     __ generate_stack_overflow_check(bangsize);
1649 
1650   __ build_frame(framesize);
1651 
1652   if (VerifyStackAtCalls) {
1653     Unimplemented();
1654   }
1655 
1656   C->output()->set_frame_complete(cbuf.insts_size());
1657 
1658   if (C->has_mach_constant_base_node()) {
1659     // NOTE: We set the table base offset here because users might be
1660     // emitted before MachConstantBaseNode.
1661     ConstantTable& constant_table = C->output()->constant_table();
1662     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1663   }
1664 }
1665 
1666 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1667 {
1668   return MachNode::size(ra_); // too many variables; just compute it
1669                               // the hard way
1670 }
1671 
1672 int MachPrologNode::reloc() const
1673 {
1674   return 0;
1675 }
1676 
1677 //=============================================================================
1678 
1679 #ifndef PRODUCT
1680 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1681   Compile* C = ra_->C;
1682   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1683 
1684   st->print("# pop frame %d\n\t",framesize);
1685 
1686   if (framesize == 0) {
1687     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1688   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1689     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1690     st->print("add  sp, sp, #%d\n\t", framesize);
1691   } else {
1692     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1693     st->print("add  sp, sp, rscratch1\n\t");
1694     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1695   }
1696 
1697   if (do_polling() && C->is_method_compilation()) {
1698     st->print("# touch polling page\n\t");
1699     st->print("ldr rscratch1, [rthread],#polling_page_offset\n\t");
1700     st->print("ldr zr, [rscratch1]");
1701   }
1702 }
1703 #endif
1704 
1705 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1706   Compile* C = ra_->C;
1707   C2_MacroAssembler _masm(&cbuf);
1708   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1709 
1710   __ remove_frame(framesize);
1711 
1712   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1713     __ reserved_stack_check();
1714   }
1715 
1716   if (do_polling() && C->is_method_compilation()) {
1717     __ fetch_and_read_polling_page(rscratch1, relocInfo::poll_return_type);
1718   }
1719 }
1720 
1721 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1722   // Variable size. Determine dynamically.
1723   return MachNode::size(ra_);
1724 }
1725 
1726 int MachEpilogNode::reloc() const {
1727   // Return number of relocatable values contained in this instruction.
1728   return 1; // 1 for polling page.
1729 }
1730 
1731 const Pipeline * MachEpilogNode::pipeline() const {
1732   return MachNode::pipeline_class();
1733 }
1734 
1735 //=============================================================================
1736 
1737 // Figure out which register class each belongs in: rc_int, rc_float or
1738 // rc_stack.
1739 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1740 
1741 static enum RC rc_class(OptoReg::Name reg) {
1742 
1743   if (reg == OptoReg::Bad) {
1744     return rc_bad;
1745   }
1746 
1747   // we have 30 int registers * 2 halves
1748   // (rscratch1 and rscratch2 are omitted)
1749   int slots_of_int_registers = RegisterImpl::max_slots_per_register * (RegisterImpl::number_of_registers - 2);
1750 
1751   if (reg < slots_of_int_registers) {
1752     return rc_int;
1753   }
1754 
1755   // we have 32 float register * 4 halves
1756   if (reg < slots_of_int_registers + FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers) {
1757     return rc_float;
1758   }
1759 
1760   // Between float regs & stack is the flags regs.
1761   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1762 
1763   return rc_stack;
1764 }
1765 
1766 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1767   Compile* C = ra_->C;
1768 
1769   // Get registers to move.
1770   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1771   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1772   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1773   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1774 
1775   enum RC src_hi_rc = rc_class(src_hi);
1776   enum RC src_lo_rc = rc_class(src_lo);
1777   enum RC dst_hi_rc = rc_class(dst_hi);
1778   enum RC dst_lo_rc = rc_class(dst_lo);
1779 
1780   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1781 
1782   if (src_hi != OptoReg::Bad) {
1783     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1784            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1785            "expected aligned-adjacent pairs");
1786   }
1787 
1788   if (src_lo == dst_lo && src_hi == dst_hi) {
1789     return 0;            // Self copy, no move.
1790   }
1791 
1792   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1793               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1794   int src_offset = ra_->reg2offset(src_lo);
1795   int dst_offset = ra_->reg2offset(dst_lo);
1796 
1797   if (bottom_type()->isa_vect() != NULL) {
1798     uint ireg = ideal_reg();
1799     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1800     if (cbuf) {
1801       C2_MacroAssembler _masm(cbuf);
1802       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1803       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1804         // stack->stack
1805         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1806         if (ireg == Op_VecD) {
1807           __ unspill(rscratch1, true, src_offset);
1808           __ spill(rscratch1, true, dst_offset);
1809         } else {
1810           __ spill_copy128(src_offset, dst_offset);
1811         }
1812       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1813         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1814                ireg == Op_VecD ? __ T8B : __ T16B,
1815                as_FloatRegister(Matcher::_regEncode[src_lo]));
1816       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1817         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1818                        ireg == Op_VecD ? __ D : __ Q,
1819                        ra_->reg2offset(dst_lo));
1820       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1821         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1822                        ireg == Op_VecD ? __ D : __ Q,
1823                        ra_->reg2offset(src_lo));
1824       } else {
1825         ShouldNotReachHere();
1826       }
1827     }
1828   } else if (cbuf) {
1829     C2_MacroAssembler _masm(cbuf);
1830     switch (src_lo_rc) {
1831     case rc_int:
1832       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1833         if (is64) {
1834             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1835                    as_Register(Matcher::_regEncode[src_lo]));
1836         } else {
1837             C2_MacroAssembler _masm(cbuf);
1838             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1839                     as_Register(Matcher::_regEncode[src_lo]));
1840         }
1841       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1842         if (is64) {
1843             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1844                      as_Register(Matcher::_regEncode[src_lo]));
1845         } else {
1846             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1847                      as_Register(Matcher::_regEncode[src_lo]));
1848         }
1849       } else {                    // gpr --> stack spill
1850         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1851         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1852       }
1853       break;
1854     case rc_float:
1855       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1856         if (is64) {
1857             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1858                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1859         } else {
1860             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1861                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1862         }
1863       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1864           if (cbuf) {
1865             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1866                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1867         } else {
1868             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1869                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1870         }
1871       } else {                    // fpr --> stack spill
1872         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1873         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1874                  is64 ? __ D : __ S, dst_offset);
1875       }
1876       break;
1877     case rc_stack:
1878       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1879         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1880       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1881         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1882                    is64 ? __ D : __ S, src_offset);
1883       } else {                    // stack --> stack copy
1884         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1885         __ unspill(rscratch1, is64, src_offset);
1886         __ spill(rscratch1, is64, dst_offset);
1887       }
1888       break;
1889     default:
1890       assert(false, "bad rc_class for spill");
1891       ShouldNotReachHere();
1892     }
1893   }
1894 
1895   if (st) {
1896     st->print("spill ");
1897     if (src_lo_rc == rc_stack) {
1898       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1899     } else {
1900       st->print("%s -> ", Matcher::regName[src_lo]);
1901     }
1902     if (dst_lo_rc == rc_stack) {
1903       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1904     } else {
1905       st->print("%s", Matcher::regName[dst_lo]);
1906     }
1907     if (bottom_type()->isa_vect() != NULL) {
1908       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1909     } else {
1910       st->print("\t# spill size = %d", is64 ? 64:32);
1911     }
1912   }
1913 
1914   return 0;
1915 
1916 }
1917 
1918 #ifndef PRODUCT
1919 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1920   if (!ra_)
1921     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1922   else
1923     implementation(NULL, ra_, false, st);
1924 }
1925 #endif
1926 
1927 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1928   implementation(&cbuf, ra_, false, NULL);
1929 }
1930 
1931 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1932   return MachNode::size(ra_);
1933 }
1934 
1935 //=============================================================================
1936 
1937 #ifndef PRODUCT
1938 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1939   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1940   int reg = ra_->get_reg_first(this);
1941   st->print("add %s, rsp, #%d]\t# box lock",
1942             Matcher::regName[reg], offset);
1943 }
1944 #endif
1945 
1946 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1947   C2_MacroAssembler _masm(&cbuf);
1948 
1949   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1950   int reg    = ra_->get_encode(this);
1951 
1952   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1953     __ add(as_Register(reg), sp, offset);
1954   } else {
1955     ShouldNotReachHere();
1956   }
1957 }
1958 
1959 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1960   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1961   return 4;
1962 }
1963 
1964 //=============================================================================
1965 
1966 #ifndef PRODUCT
1967 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1968 {
1969   st->print_cr("# MachUEPNode");
1970   if (UseCompressedClassPointers) {
1971     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1972     if (CompressedKlassPointers::shift() != 0) {
1973       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1974     }
1975   } else {
1976    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1977   }
1978   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1979   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1980 }
1981 #endif
1982 
1983 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1984 {
1985   // This is the unverified entry point.
1986   C2_MacroAssembler _masm(&cbuf);
1987 
1988   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1989   Label skip;
1990   // TODO
1991   // can we avoid this skip and still use a reloc?
1992   __ br(Assembler::EQ, skip);
1993   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1994   __ bind(skip);
1995 }
1996 
1997 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1998 {
1999   return MachNode::size(ra_);
2000 }
2001 
2002 // REQUIRED EMIT CODE
2003 
2004 //=============================================================================
2005 
2006 // Emit exception handler code.
2007 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2008 {
2009   // mov rscratch1 #exception_blob_entry_point
2010   // br rscratch1
2011   // Note that the code buffer's insts_mark is always relative to insts.
2012   // That's why we must use the macroassembler to generate a handler.
2013   C2_MacroAssembler _masm(&cbuf);
2014   address base = __ start_a_stub(size_exception_handler());
2015   if (base == NULL) {
2016     ciEnv::current()->record_failure("CodeCache is full");
2017     return 0;  // CodeBuffer::expand failed
2018   }
2019   int offset = __ offset();
2020   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2021   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2022   __ end_a_stub();
2023   return offset;
2024 }
2025 
2026 // Emit deopt handler code.
2027 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2028 {
2029   // Note that the code buffer's insts_mark is always relative to insts.
2030   // That's why we must use the macroassembler to generate a handler.
2031   C2_MacroAssembler _masm(&cbuf);
2032   address base = __ start_a_stub(size_deopt_handler());
2033   if (base == NULL) {
2034     ciEnv::current()->record_failure("CodeCache is full");
2035     return 0;  // CodeBuffer::expand failed
2036   }
2037   int offset = __ offset();
2038 
2039   __ adr(lr, __ pc());
2040   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2041 
2042   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2043   __ end_a_stub();
2044   return offset;
2045 }
2046 
2047 // REQUIRED MATCHER CODE
2048 
2049 //=============================================================================
2050 
2051 const bool Matcher::match_rule_supported(int opcode) {
2052   if (!has_match_rule(opcode))
2053     return false;
2054 
2055   bool ret_value = true;
2056   switch (opcode) {
2057     case Op_CacheWB:
2058     case Op_CacheWBPreSync:
2059     case Op_CacheWBPostSync:
2060       if (!VM_Version::supports_data_cache_line_flush()) {
2061         ret_value = false;
2062       }
2063       break;
2064   }
2065 
2066   return ret_value; // Per default match rules are supported.
2067 }
2068 
2069 // Identify extra cases that we might want to provide match rules for vector nodes and
2070 // other intrinsics guarded with vector length (vlen) and element type (bt).
2071 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
2072   int bit_size = vlen * type2aelembytes(bt) * 8;
2073   if (bit_size > 128) {
2074     return false;
2075   }
2076 
2077   if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
2078     return false;
2079   }
2080 
2081   // Special cases which require vector length
2082   switch (opcode) {
2083     case Op_MulAddVS2VI: {
2084       if (vlen != 4) {
2085         return false;
2086       }
2087       break;
2088     }
2089   }
2090 
2091   return true; // Per default match rules are supported.
2092 }
2093 
2094 const bool Matcher::has_predicated_vectors(void) {
2095   return false;
2096 }
2097 
2098 const int Matcher::float_pressure(int default_pressure_threshold) {
2099   return default_pressure_threshold;
2100 }
2101 
2102 int Matcher::regnum_to_fpu_offset(int regnum)
2103 {
2104   Unimplemented();
2105   return 0;
2106 }
2107 
2108 // Is this branch offset short enough that a short branch can be used?
2109 //
2110 // NOTE: If the platform does not provide any short branch variants, then
2111 //       this method should return false for offset 0.
2112 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2113   // The passed offset is relative to address of the branch.
2114 
2115   return (-32768 <= offset && offset < 32768);
2116 }
2117 
2118 const bool Matcher::isSimpleConstant64(jlong value) {
2119   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2120   // Probably always true, even if a temp register is required.
2121   return true;
2122 }
2123 
2124 // true just means we have fast l2f conversion
2125 const bool Matcher::convL2FSupported(void) {
2126   return true;
2127 }
2128 
2129 // Vector width in bytes.
2130 const int Matcher::vector_width_in_bytes(BasicType bt) {
2131   int size = MIN2((UseSVE > 0) ? 256 : 16, (int)MaxVectorSize);
2132   // Minimum 2 values in vector
2133   if (size < 2*type2aelembytes(bt)) size = 0;
2134   // But never < 4
2135   if (size < 4) size = 0;
2136   return size;
2137 }
2138 
2139 // Limits on vector size (number of elements) loaded into vector.
2140 const int Matcher::max_vector_size(const BasicType bt) {
2141   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2142 }
2143 const int Matcher::min_vector_size(const BasicType bt) {
2144   int max_size = max_vector_size(bt);
2145   // To support vector load mask for long data,  set min size
2146   // which can be loaded into vector as 2 bytes.
2147   int size = 2;
2148   return MIN2(size,max_size);
2149 }
2150 
2151 // Vector ideal reg.
2152 const uint Matcher::vector_ideal_reg(int len) {
2153   switch(len) {
2154     // For 16-bit/32-bit mask vector, reuse VecD.
2155     case  2:
2156     case  4:
2157     case  8: return Op_VecD;
2158     case 16: return Op_VecX;
2159   }
2160   ShouldNotReachHere();
2161   return 0;
2162 }
2163 
2164 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2165   switch(size) {
2166     case  4:
2167     case  8: return Op_VecD;
2168     case 16: return Op_VecX;
2169   }
2170   ShouldNotReachHere();
2171   return 0;
2172 }
2173 
2174 // AES support not yet implemented
2175 const bool Matcher::pass_original_key_for_aes() {
2176   return false;
2177 }
2178 
2179 // aarch64 supports misaligned vectors store/load.
2180 const bool Matcher::misaligned_vectors_ok() {
2181   return true;
2182 }
2183 
2184 // false => size gets scaled to BytesPerLong, ok.
2185 const bool Matcher::init_array_count_is_in_bytes = false;
2186 
2187 // Use conditional move (CMOVL)
2188 const int Matcher::long_cmove_cost() {
2189   // long cmoves are no more expensive than int cmoves
2190   return 0;
2191 }
2192 
2193 const int Matcher::float_cmove_cost() {
2194   // float cmoves are no more expensive than int cmoves
2195   return 0;
2196 }
2197 
2198 // Does the CPU require late expand (see block.cpp for description of late expand)?
2199 const bool Matcher::require_postalloc_expand = false;
2200 
2201 // Do we need to mask the count passed to shift instructions or does
2202 // the cpu only look at the lower 5/6 bits anyway?
2203 const bool Matcher::need_masked_shift_count = false;
2204 
2205 // No support for generic vector operands.
2206 const bool Matcher::supports_generic_vector_operands  = false;
2207 
2208 MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
2209   ShouldNotReachHere(); // generic vector operands not supported
2210   return NULL;
2211 }
2212 
2213 bool Matcher::is_generic_reg2reg_move(MachNode* m) {
2214   ShouldNotReachHere();  // generic vector operands not supported
2215   return false;
2216 }
2217 
2218 bool Matcher::is_generic_vector(MachOper* opnd)  {
2219   ShouldNotReachHere();  // generic vector operands not supported
2220   return false;
2221 }
2222 
2223 // This affects two different things:
2224 //  - how Decode nodes are matched
2225 //  - how ImplicitNullCheck opportunities are recognized
2226 // If true, the matcher will try to remove all Decodes and match them
2227 // (as operands) into nodes. NullChecks are not prepared to deal with
2228 // Decodes by final_graph_reshaping().
2229 // If false, final_graph_reshaping() forces the decode behind the Cmp
2230 // for a NullCheck. The matcher matches the Decode node into a register.
2231 // Implicit_null_check optimization moves the Decode along with the
2232 // memory operation back up before the NullCheck.
2233 bool Matcher::narrow_oop_use_complex_address() {
2234   return CompressedOops::shift() == 0;
2235 }
2236 
2237 bool Matcher::narrow_klass_use_complex_address() {
2238 // TODO
2239 // decide whether we need to set this to true
2240   return false;
2241 }
2242 
2243 bool Matcher::const_oop_prefer_decode() {
2244   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2245   return CompressedOops::base() == NULL;
2246 }
2247 
2248 bool Matcher::const_klass_prefer_decode() {
2249   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2250   return CompressedKlassPointers::base() == NULL;
2251 }
2252 
2253 // Is it better to copy float constants, or load them directly from
2254 // memory?  Intel can load a float constant from a direct address,
2255 // requiring no extra registers.  Most RISCs will have to materialize
2256 // an address into a register first, so they would do better to copy
2257 // the constant from stack.
2258 const bool Matcher::rematerialize_float_constants = false;
2259 
2260 // If CPU can load and store mis-aligned doubles directly then no
2261 // fixup is needed.  Else we split the double into 2 integer pieces
2262 // and move it piece-by-piece.  Only happens when passing doubles into
2263 // C code as the Java calling convention forces doubles to be aligned.
2264 const bool Matcher::misaligned_doubles_ok = true;
2265 
2266 // No-op on amd64
2267 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2268   Unimplemented();
2269 }
2270 
2271 // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
2272 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2273 
2274 // Are floats converted to double when stored to stack during
2275 // deoptimization?
2276 bool Matcher::float_in_double() { return false; }
2277 
2278 // Do ints take an entire long register or just half?
2279 // The relevant question is how the int is callee-saved:
2280 // the whole long is written but de-opt'ing will have to extract
2281 // the relevant 32 bits.
2282 const bool Matcher::int_in_long = true;
2283 
2284 // Return whether or not this register is ever used as an argument.
2285 // This function is used on startup to build the trampoline stubs in
2286 // generateOptoStub.  Registers not mentioned will be killed by the VM
2287 // call in the trampoline, and arguments in those registers not be
2288 // available to the callee.
2289 bool Matcher::can_be_java_arg(int reg)
2290 {
2291   return
2292     reg ==  R0_num || reg == R0_H_num ||
2293     reg ==  R1_num || reg == R1_H_num ||
2294     reg ==  R2_num || reg == R2_H_num ||
2295     reg ==  R3_num || reg == R3_H_num ||
2296     reg ==  R4_num || reg == R4_H_num ||
2297     reg ==  R5_num || reg == R5_H_num ||
2298     reg ==  R6_num || reg == R6_H_num ||
2299     reg ==  R7_num || reg == R7_H_num ||
2300     reg ==  V0_num || reg == V0_H_num ||
2301     reg ==  V1_num || reg == V1_H_num ||
2302     reg ==  V2_num || reg == V2_H_num ||
2303     reg ==  V3_num || reg == V3_H_num ||
2304     reg ==  V4_num || reg == V4_H_num ||
2305     reg ==  V5_num || reg == V5_H_num ||
2306     reg ==  V6_num || reg == V6_H_num ||
2307     reg ==  V7_num || reg == V7_H_num;
2308 }
2309 
2310 bool Matcher::is_spillable_arg(int reg)
2311 {
2312   return can_be_java_arg(reg);
2313 }
2314 
2315 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2316   return false;
2317 }
2318 
2319 RegMask Matcher::divI_proj_mask() {
2320   ShouldNotReachHere();
2321   return RegMask();
2322 }
2323 
2324 // Register for MODI projection of divmodI.
2325 RegMask Matcher::modI_proj_mask() {
2326   ShouldNotReachHere();
2327   return RegMask();
2328 }
2329 
2330 // Register for DIVL projection of divmodL.
2331 RegMask Matcher::divL_proj_mask() {
2332   ShouldNotReachHere();
2333   return RegMask();
2334 }
2335 
2336 // Register for MODL projection of divmodL.
2337 RegMask Matcher::modL_proj_mask() {
2338   ShouldNotReachHere();
2339   return RegMask();
2340 }
2341 
2342 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2343   return FP_REG_mask();
2344 }
2345 
2346 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2347   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2348     Node* u = addp->fast_out(i);
2349     if (u->is_Mem()) {
2350       int opsize = u->as_Mem()->memory_size();
2351       assert(opsize > 0, "unexpected memory operand size");
2352       if (u->as_Mem()->memory_size() != (1<<shift)) {
2353         return false;
2354       }
2355     }
2356   }
2357   return true;
2358 }
2359 
2360 const bool Matcher::convi2l_type_required = false;
2361 
2362 // Should the Matcher clone shifts on addressing modes, expecting them
2363 // to be subsumed into complex addressing expressions or compute them
2364 // into registers?
2365 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2366   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2367     return true;
2368   }
2369 
2370   Node *off = m->in(AddPNode::Offset);
2371   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2372       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2373       // Are there other uses besides address expressions?
2374       !is_visited(off)) {
2375     address_visited.set(off->_idx); // Flag as address_visited
2376     mstack.push(off->in(2), Visit);
2377     Node *conv = off->in(1);
2378     if (conv->Opcode() == Op_ConvI2L &&
2379         // Are there other uses besides address expressions?
2380         !is_visited(conv)) {
2381       address_visited.set(conv->_idx); // Flag as address_visited
2382       mstack.push(conv->in(1), Pre_Visit);
2383     } else {
2384       mstack.push(conv, Pre_Visit);
2385     }
2386     address_visited.test_set(m->_idx); // Flag as address_visited
2387     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2388     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2389     return true;
2390   } else if (off->Opcode() == Op_ConvI2L &&
2391              // Are there other uses besides address expressions?
2392              !is_visited(off)) {
2393     address_visited.test_set(m->_idx); // Flag as address_visited
2394     address_visited.set(off->_idx); // Flag as address_visited
2395     mstack.push(off->in(1), Pre_Visit);
2396     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2397     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2398     return true;
2399   }
2400   return false;
2401 }
2402 
2403 void Compile::reshape_address(AddPNode* addp) {
2404 }
2405 
2406 
2407 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2408   C2_MacroAssembler _masm(&cbuf);                                       \
2409   {                                                                     \
2410     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2411     guarantee(DISP == 0, "mode not permitted for volatile");            \
2412     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2413     __ INSN(REG, as_Register(BASE));                                    \
2414   }
2415 
2416 
2417 static Address mem2address(int opcode, Register base, int index, int size, int disp)
2418   {
2419     Address::extend scale;
2420 
2421     // Hooboy, this is fugly.  We need a way to communicate to the
2422     // encoder that the index needs to be sign extended, so we have to
2423     // enumerate all the cases.
2424     switch (opcode) {
2425     case INDINDEXSCALEDI2L:
2426     case INDINDEXSCALEDI2LN:
2427     case INDINDEXI2L:
2428     case INDINDEXI2LN:
2429       scale = Address::sxtw(size);
2430       break;
2431     default:
2432       scale = Address::lsl(size);
2433     }
2434 
2435     if (index == -1) {
2436       return Address(base, disp);
2437     } else {
2438       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2439       return Address(base, as_Register(index), scale);
2440     }
2441   }
2442 
2443 
2444 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2445 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
2446 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2447 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2448                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2449 
2450   // Used for all non-volatile memory accesses.  The use of
2451   // $mem->opcode() to discover whether this pattern uses sign-extended
2452   // offsets is something of a kludge.
2453   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
2454                         Register reg, int opcode,
2455                         Register base, int index, int scale, int disp,
2456                         int size_in_memory)
2457   {
2458     Address addr = mem2address(opcode, base, index, scale, disp);
2459     if (addr.getMode() == Address::base_plus_offset) {
2460       /* If we get an out-of-range offset it is a bug in the compiler,
2461          so we assert here. */
2462       assert(Address::offset_ok_for_immed(addr.offset(), exact_log2(size_in_memory)),
2463              "c2 compiler bug");
2464       /* Fix up any out-of-range offsets. */
2465       assert_different_registers(rscratch1, base);
2466       assert_different_registers(rscratch1, reg);
2467       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
2468     }
2469     (masm.*insn)(reg, addr);
2470   }
2471 
2472   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
2473                         FloatRegister reg, int opcode,
2474                         Register base, int index, int size, int disp,
2475                         int size_in_memory)
2476   {
2477     Address::extend scale;
2478 
2479     switch (opcode) {
2480     case INDINDEXSCALEDI2L:
2481     case INDINDEXSCALEDI2LN:
2482       scale = Address::sxtw(size);
2483       break;
2484     default:
2485       scale = Address::lsl(size);
2486     }
2487 
2488     if (index == -1) {
2489       /* If we get an out-of-range offset it is a bug in the compiler,
2490          so we assert here. */
2491       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
2492       /* Fix up any out-of-range offsets. */
2493       assert_different_registers(rscratch1, base);
2494       Address addr = Address(base, disp);
2495       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
2496       (masm.*insn)(reg, addr);
2497     } else {
2498       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2499       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2500     }
2501   }
2502 
2503   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
2504                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2505                         int opcode, Register base, int index, int size, int disp)
2506   {
2507     if (index == -1) {
2508       (masm.*insn)(reg, T, Address(base, disp));
2509     } else {
2510       assert(disp == 0, "unsupported address mode");
2511       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2512     }
2513   }
2514 
2515 %}
2516 
2517 
2518 
2519 //----------ENCODING BLOCK-----------------------------------------------------
2520 // This block specifies the encoding classes used by the compiler to
2521 // output byte streams.  Encoding classes are parameterized macros
2522 // used by Machine Instruction Nodes in order to generate the bit
2523 // encoding of the instruction.  Operands specify their base encoding
2524 // interface with the interface keyword.  There are currently
2525 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2526 // COND_INTER.  REG_INTER causes an operand to generate a function
2527 // which returns its register number when queried.  CONST_INTER causes
2528 // an operand to generate a function which returns the value of the
2529 // constant when queried.  MEMORY_INTER causes an operand to generate
2530 // four functions which return the Base Register, the Index Register,
2531 // the Scale Value, and the Offset Value of the operand when queried.
2532 // COND_INTER causes an operand to generate six functions which return
2533 // the encoding code (ie - encoding bits for the instruction)
2534 // associated with each basic boolean condition for a conditional
2535 // instruction.
2536 //
2537 // Instructions specify two basic values for encoding.  Again, a
2538 // function is available to check if the constant displacement is an
2539 // oop. They use the ins_encode keyword to specify their encoding
2540 // classes (which must be a sequence of enc_class names, and their
2541 // parameters, specified in the encoding block), and they use the
2542 // opcode keyword to specify, in order, their primary, secondary, and
2543 // tertiary opcode.  Only the opcode sections which a particular
2544 // instruction needs for encoding need to be specified.
2545 encode %{
2546   // Build emit functions for each basic byte or larger field in the
2547   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2548   // from C++ code in the enc_class source block.  Emit functions will
2549   // live in the main source block for now.  In future, we can
2550   // generalize this by adding a syntax that specifies the sizes of
2551   // fields in an order, so that the adlc can build the emit functions
2552   // automagically
2553 
2554   // catch all for unimplemented encodings
2555   enc_class enc_unimplemented %{
2556     C2_MacroAssembler _masm(&cbuf);
2557     __ unimplemented("C2 catch all");
2558   %}
2559 
2560   // BEGIN Non-volatile memory access
2561 
2562   // This encoding class is generated automatically from ad_encode.m4.
2563   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2564   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
2565     Register dst_reg = as_Register($dst$$reg);
2566     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2567                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2568   %}
2569 
2570   // This encoding class is generated automatically from ad_encode.m4.
2571   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2572   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
2573     Register dst_reg = as_Register($dst$$reg);
2574     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2575                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2576   %}
2577 
2578   // This encoding class is generated automatically from ad_encode.m4.
2579   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2580   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
2581     Register dst_reg = as_Register($dst$$reg);
2582     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2583                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2584   %}
2585 
2586   // This encoding class is generated automatically from ad_encode.m4.
2587   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2588   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
2589     Register dst_reg = as_Register($dst$$reg);
2590     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2591                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2592   %}
2593 
2594   // This encoding class is generated automatically from ad_encode.m4.
2595   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2596   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
2597     Register dst_reg = as_Register($dst$$reg);
2598     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2599                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2600   %}
2601 
2602   // This encoding class is generated automatically from ad_encode.m4.
2603   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2604   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
2605     Register dst_reg = as_Register($dst$$reg);
2606     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2607                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2608   %}
2609 
2610   // This encoding class is generated automatically from ad_encode.m4.
2611   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2612   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
2613     Register dst_reg = as_Register($dst$$reg);
2614     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2615                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2616   %}
2617 
2618   // This encoding class is generated automatically from ad_encode.m4.
2619   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2620   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
2621     Register dst_reg = as_Register($dst$$reg);
2622     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2623                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2624   %}
2625 
2626   // This encoding class is generated automatically from ad_encode.m4.
2627   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2628   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
2629     Register dst_reg = as_Register($dst$$reg);
2630     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2631                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2632   %}
2633 
2634   // This encoding class is generated automatically from ad_encode.m4.
2635   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2636   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
2637     Register dst_reg = as_Register($dst$$reg);
2638     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2639                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2640   %}
2641 
2642   // This encoding class is generated automatically from ad_encode.m4.
2643   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2644   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
2645     Register dst_reg = as_Register($dst$$reg);
2646     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2647                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2648   %}
2649 
2650   // This encoding class is generated automatically from ad_encode.m4.
2651   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2652   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
2653     Register dst_reg = as_Register($dst$$reg);
2654     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2655                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2656   %}
2657 
2658   // This encoding class is generated automatically from ad_encode.m4.
2659   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2660   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
2661     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2662     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2663                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2664   %}
2665 
2666   // This encoding class is generated automatically from ad_encode.m4.
2667   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2668   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
2669     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2670     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2671                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2672   %}
2673 
2674   // This encoding class is generated automatically from ad_encode.m4.
2675   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2676   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
2677     Register src_reg = as_Register($src$$reg);
2678     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2679                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2680   %}
2681 
2682   // This encoding class is generated automatically from ad_encode.m4.
2683   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2684   enc_class aarch64_enc_strb0(memory1 mem) %{
2685     C2_MacroAssembler _masm(&cbuf);
2686     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2687                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2688   %}
2689 
2690   // This encoding class is generated automatically from ad_encode.m4.
2691   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2692   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
2693     Register src_reg = as_Register($src$$reg);
2694     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2695                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2696   %}
2697 
2698   // This encoding class is generated automatically from ad_encode.m4.
2699   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2700   enc_class aarch64_enc_strh0(memory2 mem) %{
2701     C2_MacroAssembler _masm(&cbuf);
2702     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2703                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2704   %}
2705 
2706   // This encoding class is generated automatically from ad_encode.m4.
2707   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2708   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
2709     Register src_reg = as_Register($src$$reg);
2710     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2711                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2712   %}
2713 
2714   // This encoding class is generated automatically from ad_encode.m4.
2715   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2716   enc_class aarch64_enc_strw0(memory4 mem) %{
2717     C2_MacroAssembler _masm(&cbuf);
2718     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2719                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2720   %}
2721 
2722   // This encoding class is generated automatically from ad_encode.m4.
2723   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2724   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
2725     Register src_reg = as_Register($src$$reg);
2726     // we sometimes get asked to store the stack pointer into the
2727     // current thread -- we cannot do that directly on AArch64
2728     if (src_reg == r31_sp) {
2729       C2_MacroAssembler _masm(&cbuf);
2730       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2731       __ mov(rscratch2, sp);
2732       src_reg = rscratch2;
2733     }
2734     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2735                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2736   %}
2737 
2738   // This encoding class is generated automatically from ad_encode.m4.
2739   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2740   enc_class aarch64_enc_str0(memory8 mem) %{
2741     C2_MacroAssembler _masm(&cbuf);
2742     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2743                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2744   %}
2745 
2746   // This encoding class is generated automatically from ad_encode.m4.
2747   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2748   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
2749     FloatRegister src_reg = as_FloatRegister($src$$reg);
2750     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2751                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2752   %}
2753 
2754   // This encoding class is generated automatically from ad_encode.m4.
2755   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2756   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
2757     FloatRegister src_reg = as_FloatRegister($src$$reg);
2758     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2759                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2760   %}
2761 
2762   // This encoding class is generated automatically from ad_encode.m4.
2763   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2764   enc_class aarch64_enc_strw_immn(immN src, memory1 mem) %{
2765     C2_MacroAssembler _masm(&cbuf);
2766     address con = (address)$src$$constant;
2767     // need to do this the hard way until we can manage relocs
2768     // for 32 bit constants
2769     __ movoop(rscratch2, (jobject)con);
2770     if (con) __ encode_heap_oop_not_null(rscratch2);
2771     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
2772                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2773   %}
2774 
2775   // This encoding class is generated automatically from ad_encode.m4.
2776   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2777   enc_class aarch64_enc_strw_immnk(immN src, memory4 mem) %{
2778     C2_MacroAssembler _masm(&cbuf);
2779     address con = (address)$src$$constant;
2780     // need to do this the hard way until we can manage relocs
2781     // for 32 bit constants
2782     __ movoop(rscratch2, (jobject)con);
2783     __ encode_klass_not_null(rscratch2);
2784     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
2785                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2786   %}
2787 
2788   // This encoding class is generated automatically from ad_encode.m4.
2789   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2790   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
2791       C2_MacroAssembler _masm(&cbuf);
2792       __ membar(Assembler::StoreStore);
2793       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2794                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2795   %}
2796 
2797   // END Non-volatile memory access
2798 
2799   // Vector loads and stores
2800   enc_class aarch64_enc_ldrvH(vecD dst, memory mem) %{
2801     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2802     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
2803        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2804   %}
2805 
2806   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2807     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2808     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2809        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2810   %}
2811 
2812   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2813     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2814     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2815        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2816   %}
2817 
2818   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2819     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2820     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2821        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2822   %}
2823 
2824   enc_class aarch64_enc_strvH(vecD src, memory mem) %{
2825     FloatRegister src_reg = as_FloatRegister($src$$reg);
2826     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::H,
2827        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2828   %}
2829 
2830   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2831     FloatRegister src_reg = as_FloatRegister($src$$reg);
2832     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2833        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2834   %}
2835 
2836   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2837     FloatRegister src_reg = as_FloatRegister($src$$reg);
2838     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2839        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2840   %}
2841 
2842   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2843     FloatRegister src_reg = as_FloatRegister($src$$reg);
2844     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2845        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2846   %}
2847 
2848   // volatile loads and stores
2849 
2850   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2851     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2852                  rscratch1, stlrb);
2853   %}
2854 
2855   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2856     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2857                  rscratch1, stlrh);
2858   %}
2859 
2860   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2861     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2862                  rscratch1, stlrw);
2863   %}
2864 
2865 
2866   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2867     Register dst_reg = as_Register($dst$$reg);
2868     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2869              rscratch1, ldarb);
2870     __ sxtbw(dst_reg, dst_reg);
2871   %}
2872 
2873   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2874     Register dst_reg = as_Register($dst$$reg);
2875     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2876              rscratch1, ldarb);
2877     __ sxtb(dst_reg, dst_reg);
2878   %}
2879 
2880   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2881     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2882              rscratch1, ldarb);
2883   %}
2884 
2885   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2886     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2887              rscratch1, ldarb);
2888   %}
2889 
2890   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2891     Register dst_reg = as_Register($dst$$reg);
2892     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2893              rscratch1, ldarh);
2894     __ sxthw(dst_reg, dst_reg);
2895   %}
2896 
2897   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2898     Register dst_reg = as_Register($dst$$reg);
2899     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2900              rscratch1, ldarh);
2901     __ sxth(dst_reg, dst_reg);
2902   %}
2903 
2904   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2905     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2906              rscratch1, ldarh);
2907   %}
2908 
2909   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2910     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2911              rscratch1, ldarh);
2912   %}
2913 
2914   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2915     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2916              rscratch1, ldarw);
2917   %}
2918 
2919   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2920     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2921              rscratch1, ldarw);
2922   %}
2923 
2924   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2925     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2926              rscratch1, ldar);
2927   %}
2928 
2929   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2930     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2931              rscratch1, ldarw);
2932     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2933   %}
2934 
2935   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2936     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2937              rscratch1, ldar);
2938     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2939   %}
2940 
2941   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2942     Register src_reg = as_Register($src$$reg);
2943     // we sometimes get asked to store the stack pointer into the
2944     // current thread -- we cannot do that directly on AArch64
2945     if (src_reg == r31_sp) {
2946       C2_MacroAssembler _masm(&cbuf);
2947       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2948       __ mov(rscratch2, sp);
2949       src_reg = rscratch2;
2950     }
2951     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2952                  rscratch1, stlr);
2953   %}
2954 
2955   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2956     {
2957       C2_MacroAssembler _masm(&cbuf);
2958       FloatRegister src_reg = as_FloatRegister($src$$reg);
2959       __ fmovs(rscratch2, src_reg);
2960     }
2961     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2962                  rscratch1, stlrw);
2963   %}
2964 
2965   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2966     {
2967       C2_MacroAssembler _masm(&cbuf);
2968       FloatRegister src_reg = as_FloatRegister($src$$reg);
2969       __ fmovd(rscratch2, src_reg);
2970     }
2971     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2972                  rscratch1, stlr);
2973   %}
2974 
2975   // synchronized read/update encodings
2976 
2977   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
2978     C2_MacroAssembler _masm(&cbuf);
2979     Register dst_reg = as_Register($dst$$reg);
2980     Register base = as_Register($mem$$base);
2981     int index = $mem$$index;
2982     int scale = $mem$$scale;
2983     int disp = $mem$$disp;
2984     if (index == -1) {
2985        if (disp != 0) {
2986         __ lea(rscratch1, Address(base, disp));
2987         __ ldaxr(dst_reg, rscratch1);
2988       } else {
2989         // TODO
2990         // should we ever get anything other than this case?
2991         __ ldaxr(dst_reg, base);
2992       }
2993     } else {
2994       Register index_reg = as_Register(index);
2995       if (disp == 0) {
2996         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2997         __ ldaxr(dst_reg, rscratch1);
2998       } else {
2999         __ lea(rscratch1, Address(base, disp));
3000         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
3001         __ ldaxr(dst_reg, rscratch1);
3002       }
3003     }
3004   %}
3005 
3006   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
3007     C2_MacroAssembler _masm(&cbuf);
3008     Register src_reg = as_Register($src$$reg);
3009     Register base = as_Register($mem$$base);
3010     int index = $mem$$index;
3011     int scale = $mem$$scale;
3012     int disp = $mem$$disp;
3013     if (index == -1) {
3014        if (disp != 0) {
3015         __ lea(rscratch2, Address(base, disp));
3016         __ stlxr(rscratch1, src_reg, rscratch2);
3017       } else {
3018         // TODO
3019         // should we ever get anything other than this case?
3020         __ stlxr(rscratch1, src_reg, base);
3021       }
3022     } else {
3023       Register index_reg = as_Register(index);
3024       if (disp == 0) {
3025         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3026         __ stlxr(rscratch1, src_reg, rscratch2);
3027       } else {
3028         __ lea(rscratch2, Address(base, disp));
3029         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3030         __ stlxr(rscratch1, src_reg, rscratch2);
3031       }
3032     }
3033     __ cmpw(rscratch1, zr);
3034   %}
3035 
3036   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3037     C2_MacroAssembler _masm(&cbuf);
3038     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3039     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3040                Assembler::xword, /*acquire*/ false, /*release*/ true,
3041                /*weak*/ false, noreg);
3042   %}
3043 
3044   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3045     C2_MacroAssembler _masm(&cbuf);
3046     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3047     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3048                Assembler::word, /*acquire*/ false, /*release*/ true,
3049                /*weak*/ false, noreg);
3050   %}
3051 
3052   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3053     C2_MacroAssembler _masm(&cbuf);
3054     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3055     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3056                Assembler::halfword, /*acquire*/ false, /*release*/ true,
3057                /*weak*/ false, noreg);
3058   %}
3059 
3060   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3061     C2_MacroAssembler _masm(&cbuf);
3062     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3063     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3064                Assembler::byte, /*acquire*/ false, /*release*/ true,
3065                /*weak*/ false, noreg);
3066   %}
3067 
3068 
3069   // The only difference between aarch64_enc_cmpxchg and
3070   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
3071   // CompareAndSwap sequence to serve as a barrier on acquiring a
3072   // lock.
3073   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3074     C2_MacroAssembler _masm(&cbuf);
3075     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3076     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3077                Assembler::xword, /*acquire*/ true, /*release*/ true,
3078                /*weak*/ false, noreg);
3079   %}
3080 
3081   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3082     C2_MacroAssembler _masm(&cbuf);
3083     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3084     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3085                Assembler::word, /*acquire*/ true, /*release*/ true,
3086                /*weak*/ false, noreg);
3087   %}
3088 
3089   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3090     C2_MacroAssembler _masm(&cbuf);
3091     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3092     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3093                Assembler::halfword, /*acquire*/ true, /*release*/ true,
3094                /*weak*/ false, noreg);
3095   %}
3096 
3097   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3098     C2_MacroAssembler _masm(&cbuf);
3099     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3100     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3101                Assembler::byte, /*acquire*/ true, /*release*/ true,
3102                /*weak*/ false, noreg);
3103   %}
3104 
3105   // auxiliary used for CompareAndSwapX to set result register
3106   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3107     C2_MacroAssembler _masm(&cbuf);
3108     Register res_reg = as_Register($res$$reg);
3109     __ cset(res_reg, Assembler::EQ);
3110   %}
3111 
3112   // prefetch encodings
3113 
3114   enc_class aarch64_enc_prefetchw(memory mem) %{
3115     C2_MacroAssembler _masm(&cbuf);
3116     Register base = as_Register($mem$$base);
3117     int index = $mem$$index;
3118     int scale = $mem$$scale;
3119     int disp = $mem$$disp;
3120     if (index == -1) {
3121       __ prfm(Address(base, disp), PSTL1KEEP);
3122     } else {
3123       Register index_reg = as_Register(index);
3124       if (disp == 0) {
3125         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3126       } else {
3127         __ lea(rscratch1, Address(base, disp));
3128         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3129       }
3130     }
3131   %}
3132 
3133   /// mov envcodings
3134 
3135   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3136     C2_MacroAssembler _masm(&cbuf);
3137     u_int32_t con = (u_int32_t)$src$$constant;
3138     Register dst_reg = as_Register($dst$$reg);
3139     if (con == 0) {
3140       __ movw(dst_reg, zr);
3141     } else {
3142       __ movw(dst_reg, con);
3143     }
3144   %}
3145 
3146   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3147     C2_MacroAssembler _masm(&cbuf);
3148     Register dst_reg = as_Register($dst$$reg);
3149     u_int64_t con = (u_int64_t)$src$$constant;
3150     if (con == 0) {
3151       __ mov(dst_reg, zr);
3152     } else {
3153       __ mov(dst_reg, con);
3154     }
3155   %}
3156 
3157   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3158     C2_MacroAssembler _masm(&cbuf);
3159     Register dst_reg = as_Register($dst$$reg);
3160     address con = (address)$src$$constant;
3161     if (con == NULL || con == (address)1) {
3162       ShouldNotReachHere();
3163     } else {
3164       relocInfo::relocType rtype = $src->constant_reloc();
3165       if (rtype == relocInfo::oop_type) {
3166         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3167       } else if (rtype == relocInfo::metadata_type) {
3168         __ mov_metadata(dst_reg, (Metadata*)con);
3169       } else {
3170         assert(rtype == relocInfo::none, "unexpected reloc type");
3171         if (con < (address)(uintptr_t)os::vm_page_size()) {
3172           __ mov(dst_reg, con);
3173         } else {
3174           unsigned long offset;
3175           __ adrp(dst_reg, con, offset);
3176           __ add(dst_reg, dst_reg, offset);
3177         }
3178       }
3179     }
3180   %}
3181 
3182   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3183     C2_MacroAssembler _masm(&cbuf);
3184     Register dst_reg = as_Register($dst$$reg);
3185     __ mov(dst_reg, zr);
3186   %}
3187 
3188   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3189     C2_MacroAssembler _masm(&cbuf);
3190     Register dst_reg = as_Register($dst$$reg);
3191     __ mov(dst_reg, (u_int64_t)1);
3192   %}
3193 
3194   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3195     C2_MacroAssembler _masm(&cbuf);
3196     __ load_byte_map_base($dst$$Register);
3197   %}
3198 
3199   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3200     C2_MacroAssembler _masm(&cbuf);
3201     Register dst_reg = as_Register($dst$$reg);
3202     address con = (address)$src$$constant;
3203     if (con == NULL) {
3204       ShouldNotReachHere();
3205     } else {
3206       relocInfo::relocType rtype = $src->constant_reloc();
3207       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3208       __ set_narrow_oop(dst_reg, (jobject)con);
3209     }
3210   %}
3211 
3212   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3213     C2_MacroAssembler _masm(&cbuf);
3214     Register dst_reg = as_Register($dst$$reg);
3215     __ mov(dst_reg, zr);
3216   %}
3217 
3218   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3219     C2_MacroAssembler _masm(&cbuf);
3220     Register dst_reg = as_Register($dst$$reg);
3221     address con = (address)$src$$constant;
3222     if (con == NULL) {
3223       ShouldNotReachHere();
3224     } else {
3225       relocInfo::relocType rtype = $src->constant_reloc();
3226       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3227       __ set_narrow_klass(dst_reg, (Klass *)con);
3228     }
3229   %}
3230 
3231   // arithmetic encodings
3232 
3233   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3234     C2_MacroAssembler _masm(&cbuf);
3235     Register dst_reg = as_Register($dst$$reg);
3236     Register src_reg = as_Register($src1$$reg);
3237     int32_t con = (int32_t)$src2$$constant;
3238     // add has primary == 0, subtract has primary == 1
3239     if ($primary) { con = -con; }
3240     if (con < 0) {
3241       __ subw(dst_reg, src_reg, -con);
3242     } else {
3243       __ addw(dst_reg, src_reg, con);
3244     }
3245   %}
3246 
3247   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3248     C2_MacroAssembler _masm(&cbuf);
3249     Register dst_reg = as_Register($dst$$reg);
3250     Register src_reg = as_Register($src1$$reg);
3251     int32_t con = (int32_t)$src2$$constant;
3252     // add has primary == 0, subtract has primary == 1
3253     if ($primary) { con = -con; }
3254     if (con < 0) {
3255       __ sub(dst_reg, src_reg, -con);
3256     } else {
3257       __ add(dst_reg, src_reg, con);
3258     }
3259   %}
3260 
3261   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3262     C2_MacroAssembler _masm(&cbuf);
3263    Register dst_reg = as_Register($dst$$reg);
3264    Register src1_reg = as_Register($src1$$reg);
3265    Register src2_reg = as_Register($src2$$reg);
3266     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3267   %}
3268 
3269   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3270     C2_MacroAssembler _masm(&cbuf);
3271    Register dst_reg = as_Register($dst$$reg);
3272    Register src1_reg = as_Register($src1$$reg);
3273    Register src2_reg = as_Register($src2$$reg);
3274     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3275   %}
3276 
3277   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3278     C2_MacroAssembler _masm(&cbuf);
3279    Register dst_reg = as_Register($dst$$reg);
3280    Register src1_reg = as_Register($src1$$reg);
3281    Register src2_reg = as_Register($src2$$reg);
3282     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3283   %}
3284 
3285   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3286     C2_MacroAssembler _masm(&cbuf);
3287    Register dst_reg = as_Register($dst$$reg);
3288    Register src1_reg = as_Register($src1$$reg);
3289    Register src2_reg = as_Register($src2$$reg);
3290     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3291   %}
3292 
3293   // compare instruction encodings
3294 
3295   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3296     C2_MacroAssembler _masm(&cbuf);
3297     Register reg1 = as_Register($src1$$reg);
3298     Register reg2 = as_Register($src2$$reg);
3299     __ cmpw(reg1, reg2);
3300   %}
3301 
3302   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3303     C2_MacroAssembler _masm(&cbuf);
3304     Register reg = as_Register($src1$$reg);
3305     int32_t val = $src2$$constant;
3306     if (val >= 0) {
3307       __ subsw(zr, reg, val);
3308     } else {
3309       __ addsw(zr, reg, -val);
3310     }
3311   %}
3312 
3313   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3314     C2_MacroAssembler _masm(&cbuf);
3315     Register reg1 = as_Register($src1$$reg);
3316     u_int32_t val = (u_int32_t)$src2$$constant;
3317     __ movw(rscratch1, val);
3318     __ cmpw(reg1, rscratch1);
3319   %}
3320 
3321   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3322     C2_MacroAssembler _masm(&cbuf);
3323     Register reg1 = as_Register($src1$$reg);
3324     Register reg2 = as_Register($src2$$reg);
3325     __ cmp(reg1, reg2);
3326   %}
3327 
3328   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3329     C2_MacroAssembler _masm(&cbuf);
3330     Register reg = as_Register($src1$$reg);
3331     int64_t val = $src2$$constant;
3332     if (val >= 0) {
3333       __ subs(zr, reg, val);
3334     } else if (val != -val) {
3335       __ adds(zr, reg, -val);
3336     } else {
3337     // aargh, Long.MIN_VALUE is a special case
3338       __ orr(rscratch1, zr, (u_int64_t)val);
3339       __ subs(zr, reg, rscratch1);
3340     }
3341   %}
3342 
3343   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3344     C2_MacroAssembler _masm(&cbuf);
3345     Register reg1 = as_Register($src1$$reg);
3346     u_int64_t val = (u_int64_t)$src2$$constant;
3347     __ mov(rscratch1, val);
3348     __ cmp(reg1, rscratch1);
3349   %}
3350 
3351   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3352     C2_MacroAssembler _masm(&cbuf);
3353     Register reg1 = as_Register($src1$$reg);
3354     Register reg2 = as_Register($src2$$reg);
3355     __ cmp(reg1, reg2);
3356   %}
3357 
3358   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3359     C2_MacroAssembler _masm(&cbuf);
3360     Register reg1 = as_Register($src1$$reg);
3361     Register reg2 = as_Register($src2$$reg);
3362     __ cmpw(reg1, reg2);
3363   %}
3364 
3365   enc_class aarch64_enc_testp(iRegP src) %{
3366     C2_MacroAssembler _masm(&cbuf);
3367     Register reg = as_Register($src$$reg);
3368     __ cmp(reg, zr);
3369   %}
3370 
3371   enc_class aarch64_enc_testn(iRegN src) %{
3372     C2_MacroAssembler _masm(&cbuf);
3373     Register reg = as_Register($src$$reg);
3374     __ cmpw(reg, zr);
3375   %}
3376 
3377   enc_class aarch64_enc_b(label lbl) %{
3378     C2_MacroAssembler _masm(&cbuf);
3379     Label *L = $lbl$$label;
3380     __ b(*L);
3381   %}
3382 
3383   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3384     C2_MacroAssembler _masm(&cbuf);
3385     Label *L = $lbl$$label;
3386     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3387   %}
3388 
3389   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3390     C2_MacroAssembler _masm(&cbuf);
3391     Label *L = $lbl$$label;
3392     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3393   %}
3394 
3395   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3396   %{
3397      Register sub_reg = as_Register($sub$$reg);
3398      Register super_reg = as_Register($super$$reg);
3399      Register temp_reg = as_Register($temp$$reg);
3400      Register result_reg = as_Register($result$$reg);
3401 
3402      Label miss;
3403      C2_MacroAssembler _masm(&cbuf);
3404      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3405                                      NULL, &miss,
3406                                      /*set_cond_codes:*/ true);
3407      if ($primary) {
3408        __ mov(result_reg, zr);
3409      }
3410      __ bind(miss);
3411   %}
3412 
3413   enc_class aarch64_enc_java_static_call(method meth) %{
3414     C2_MacroAssembler _masm(&cbuf);
3415 
3416     address addr = (address)$meth$$method;
3417     address call;
3418     if (!_method) {
3419       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3420       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3421     } else {
3422       int method_index = resolved_method_index(cbuf);
3423       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3424                                                   : static_call_Relocation::spec(method_index);
3425       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3426 
3427       // Emit stub for static call
3428       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3429       if (stub == NULL) {
3430         ciEnv::current()->record_failure("CodeCache is full");
3431         return;
3432       }
3433     }
3434     if (call == NULL) {
3435       ciEnv::current()->record_failure("CodeCache is full");
3436       return;
3437     }
3438   %}
3439 
3440   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3441     C2_MacroAssembler _masm(&cbuf);
3442     int method_index = resolved_method_index(cbuf);
3443     address call = __ ic_call((address)$meth$$method, method_index);
3444     if (call == NULL) {
3445       ciEnv::current()->record_failure("CodeCache is full");
3446       return;
3447     }
3448   %}
3449 
3450   enc_class aarch64_enc_call_epilog() %{
3451     C2_MacroAssembler _masm(&cbuf);
3452     if (VerifyStackAtCalls) {
3453       // Check that stack depth is unchanged: find majik cookie on stack
3454       __ call_Unimplemented();
3455     }
3456   %}
3457 
3458   enc_class aarch64_enc_java_to_runtime(method meth) %{
3459     C2_MacroAssembler _masm(&cbuf);
3460 
3461     // some calls to generated routines (arraycopy code) are scheduled
3462     // by C2 as runtime calls. if so we can call them using a br (they
3463     // will be in a reachable segment) otherwise we have to use a blr
3464     // which loads the absolute address into a register.
3465     address entry = (address)$meth$$method;
3466     CodeBlob *cb = CodeCache::find_blob(entry);
3467     if (cb) {
3468       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3469       if (call == NULL) {
3470         ciEnv::current()->record_failure("CodeCache is full");
3471         return;
3472       }
3473     } else {
3474       Label retaddr;
3475       __ adr(rscratch2, retaddr);
3476       __ lea(rscratch1, RuntimeAddress(entry));
3477       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3478       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3479       __ blr(rscratch1);
3480       __ bind(retaddr);
3481       __ add(sp, sp, 2 * wordSize);
3482     }
3483   %}
3484 
3485   enc_class aarch64_enc_rethrow() %{
3486     C2_MacroAssembler _masm(&cbuf);
3487     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3488   %}
3489 
3490   enc_class aarch64_enc_ret() %{
3491     C2_MacroAssembler _masm(&cbuf);
3492     __ ret(lr);
3493   %}
3494 
3495   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3496     C2_MacroAssembler _masm(&cbuf);
3497     Register target_reg = as_Register($jump_target$$reg);
3498     __ br(target_reg);
3499   %}
3500 
3501   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3502     C2_MacroAssembler _masm(&cbuf);
3503     Register target_reg = as_Register($jump_target$$reg);
3504     // exception oop should be in r0
3505     // ret addr has been popped into lr
3506     // callee expects it in r3
3507     __ mov(r3, lr);
3508     __ br(target_reg);
3509   %}
3510 
3511   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3512     C2_MacroAssembler _masm(&cbuf);
3513     Register oop = as_Register($object$$reg);
3514     Register box = as_Register($box$$reg);
3515     Register disp_hdr = as_Register($tmp$$reg);
3516     Register tmp = as_Register($tmp2$$reg);
3517     Label cont;
3518     Label object_has_monitor;
3519     Label cas_failed;
3520 
3521     assert_different_registers(oop, box, tmp, disp_hdr);
3522 
3523     // Load markWord from object into displaced_header.
3524     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3525 
3526     if (UseBiasedLocking && !UseOptoBiasInlining) {
3527       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3528     }
3529 
3530     // Check for existing monitor
3531     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3532 
3533     // Set tmp to be (markWord of object | UNLOCK_VALUE).
3534     __ orr(tmp, disp_hdr, markWord::unlocked_value);
3535 
3536     // Initialize the box. (Must happen before we update the object mark!)
3537     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3538 
3539     // Compare object markWord with an unlocked value (tmp) and if
3540     // equal exchange the stack address of our box with object markWord.
3541     // On failure disp_hdr contains the possibly locked markWord.
3542     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
3543                /*release*/ true, /*weak*/ false, disp_hdr);
3544     __ br(Assembler::EQ, cont);
3545 
3546     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3547 
3548     // If the compare-and-exchange succeeded, then we found an unlocked
3549     // object, will have now locked it will continue at label cont
3550 
3551     __ bind(cas_failed);
3552     // We did not see an unlocked object so try the fast recursive case.
3553 
3554     // Check if the owner is self by comparing the value in the
3555     // markWord of object (disp_hdr) with the stack pointer.
3556     __ mov(rscratch1, sp);
3557     __ sub(disp_hdr, disp_hdr, rscratch1);
3558     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
3559     // If condition is true we are cont and hence we can store 0 as the
3560     // displaced header in the box, which indicates that it is a recursive lock.
3561     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
3562     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3563 
3564     __ b(cont);
3565 
3566     // Handle existing monitor.
3567     __ bind(object_has_monitor);
3568 
3569     // The object's monitor m is unlocked iff m->owner == NULL,
3570     // otherwise m->owner may contain a thread or a stack address.
3571     //
3572     // Try to CAS m->owner from NULL to current thread.
3573     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
3574     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
3575                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
3576 
3577     // Store a non-null value into the box to avoid looking like a re-entrant
3578     // lock. The fast-path monitor unlock code checks for
3579     // markWord::monitor_value so use markWord::unused_mark which has the
3580     // relevant bit set, and also matches ObjectSynchronizer::enter.
3581     __ mov(tmp, (address)markWord::unused_mark().value());
3582     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3583 
3584     __ bind(cont);
3585     // flag == EQ indicates success
3586     // flag == NE indicates failure
3587   %}
3588 
3589   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3590     C2_MacroAssembler _masm(&cbuf);
3591     Register oop = as_Register($object$$reg);
3592     Register box = as_Register($box$$reg);
3593     Register disp_hdr = as_Register($tmp$$reg);
3594     Register tmp = as_Register($tmp2$$reg);
3595     Label cont;
3596     Label object_has_monitor;
3597 
3598     assert_different_registers(oop, box, tmp, disp_hdr);
3599 
3600     if (UseBiasedLocking && !UseOptoBiasInlining) {
3601       __ biased_locking_exit(oop, tmp, cont);
3602     }
3603 
3604     // Find the lock address and load the displaced header from the stack.
3605     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3606 
3607     // If the displaced header is 0, we have a recursive unlock.
3608     __ cmp(disp_hdr, zr);
3609     __ br(Assembler::EQ, cont);
3610 
3611     // Handle existing monitor.
3612     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3613     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3614 
3615     // Check if it is still a light weight lock, this is is true if we
3616     // see the stack address of the basicLock in the markWord of the
3617     // object.
3618 
3619     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
3620                /*release*/ true, /*weak*/ false, tmp);
3621     __ b(cont);
3622 
3623     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3624 
3625     // Handle existing monitor.
3626     __ bind(object_has_monitor);
3627     STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
3628     __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
3629     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3630     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3631     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3632     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3633     __ cmp(rscratch1, zr); // Sets flags for result
3634     __ br(Assembler::NE, cont);
3635 
3636     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3637     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3638     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3639     __ cmp(rscratch1, zr); // Sets flags for result
3640     __ cbnz(rscratch1, cont);
3641     // need a release store here
3642     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3643     __ stlr(zr, tmp); // set unowned
3644 
3645     __ bind(cont);
3646     // flag == EQ indicates success
3647     // flag == NE indicates failure
3648   %}
3649 
3650 %}
3651 
3652 //----------FRAME--------------------------------------------------------------
3653 // Definition of frame structure and management information.
3654 //
3655 //  S T A C K   L A Y O U T    Allocators stack-slot number
3656 //                             |   (to get allocators register number
3657 //  G  Owned by    |        |  v    add OptoReg::stack0())
3658 //  r   CALLER     |        |
3659 //  o     |        +--------+      pad to even-align allocators stack-slot
3660 //  w     V        |  pad0  |        numbers; owned by CALLER
3661 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3662 //  h     ^        |   in   |  5
3663 //        |        |  args  |  4   Holes in incoming args owned by SELF
3664 //  |     |        |        |  3
3665 //  |     |        +--------+
3666 //  V     |        | old out|      Empty on Intel, window on Sparc
3667 //        |    old |preserve|      Must be even aligned.
3668 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3669 //        |        |   in   |  3   area for Intel ret address
3670 //     Owned by    |preserve|      Empty on Sparc.
3671 //       SELF      +--------+
3672 //        |        |  pad2  |  2   pad to align old SP
3673 //        |        +--------+  1
3674 //        |        | locks  |  0
3675 //        |        +--------+----> OptoReg::stack0(), even aligned
3676 //        |        |  pad1  | 11   pad to align new SP
3677 //        |        +--------+
3678 //        |        |        | 10
3679 //        |        | spills |  9   spills
3680 //        V        |        |  8   (pad0 slot for callee)
3681 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3682 //        ^        |  out   |  7
3683 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3684 //     Owned by    +--------+
3685 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3686 //        |    new |preserve|      Must be even-aligned.
3687 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3688 //        |        |        |
3689 //
3690 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3691 //         known from SELF's arguments and the Java calling convention.
3692 //         Region 6-7 is determined per call site.
3693 // Note 2: If the calling convention leaves holes in the incoming argument
3694 //         area, those holes are owned by SELF.  Holes in the outgoing area
3695 //         are owned by the CALLEE.  Holes should not be nessecary in the
3696 //         incoming area, as the Java calling convention is completely under
3697 //         the control of the AD file.  Doubles can be sorted and packed to
3698 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3699 //         varargs C calling conventions.
3700 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3701 //         even aligned with pad0 as needed.
3702 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3703 //           (the latter is true on Intel but is it false on AArch64?)
3704 //         region 6-11 is even aligned; it may be padded out more so that
3705 //         the region from SP to FP meets the minimum stack alignment.
3706 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3707 //         alignment.  Region 11, pad1, may be dynamically extended so that
3708 //         SP meets the minimum alignment.
3709 
3710 frame %{
3711   // What direction does stack grow in (assumed to be same for C & Java)
3712   stack_direction(TOWARDS_LOW);
3713 
3714   // These three registers define part of the calling convention
3715   // between compiled code and the interpreter.
3716 
3717   // Inline Cache Register or methodOop for I2C.
3718   inline_cache_reg(R12);
3719 
3720   // Method Oop Register when calling interpreter.
3721   interpreter_method_oop_reg(R12);
3722 
3723   // Number of stack slots consumed by locking an object
3724   sync_stack_slots(2);
3725 
3726   // Compiled code's Frame Pointer
3727   frame_pointer(R31);
3728 
3729   // Interpreter stores its frame pointer in a register which is
3730   // stored to the stack by I2CAdaptors.
3731   // I2CAdaptors convert from interpreted java to compiled java.
3732   interpreter_frame_pointer(R29);
3733 
3734   // Stack alignment requirement
3735   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3736 
3737   // Number of stack slots between incoming argument block and the start of
3738   // a new frame.  The PROLOG must add this many slots to the stack.  The
3739   // EPILOG must remove this many slots. aarch64 needs two slots for
3740   // return address and fp.
3741   // TODO think this is correct but check
3742   in_preserve_stack_slots(4);
3743 
3744   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3745   // for calls to C.  Supports the var-args backing area for register parms.
3746   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3747 
3748   // The after-PROLOG location of the return address.  Location of
3749   // return address specifies a type (REG or STACK) and a number
3750   // representing the register number (i.e. - use a register name) or
3751   // stack slot.
3752   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3753   // Otherwise, it is above the locks and verification slot and alignment word
3754   // TODO this may well be correct but need to check why that - 2 is there
3755   // ppc port uses 0 but we definitely need to allow for fixed_slots
3756   // which folds in the space used for monitors
3757   return_addr(STACK - 2 +
3758               align_up((Compile::current()->in_preserve_stack_slots() +
3759                         Compile::current()->fixed_slots()),
3760                        stack_alignment_in_slots()));
3761 
3762   // Body of function which returns an integer array locating
3763   // arguments either in registers or in stack slots.  Passed an array
3764   // of ideal registers called "sig" and a "length" count.  Stack-slot
3765   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3766   // arguments for a CALLEE.  Incoming stack arguments are
3767   // automatically biased by the preserve_stack_slots field above.
3768 
3769   calling_convention
3770   %{
3771     // No difference between ingoing/outgoing just pass false
3772     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3773   %}
3774 
3775   c_calling_convention
3776   %{
3777     // This is obviously always outgoing
3778     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3779   %}
3780 
3781   // Location of compiled Java return values.  Same as C for now.
3782   return_value
3783   %{
3784     // TODO do we allow ideal_reg == Op_RegN???
3785     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3786            "only return normal values");
3787 
3788     static const int lo[Op_RegL + 1] = { // enum name
3789       0,                                 // Op_Node
3790       0,                                 // Op_Set
3791       R0_num,                            // Op_RegN
3792       R0_num,                            // Op_RegI
3793       R0_num,                            // Op_RegP
3794       V0_num,                            // Op_RegF
3795       V0_num,                            // Op_RegD
3796       R0_num                             // Op_RegL
3797     };
3798 
3799     static const int hi[Op_RegL + 1] = { // enum name
3800       0,                                 // Op_Node
3801       0,                                 // Op_Set
3802       OptoReg::Bad,                      // Op_RegN
3803       OptoReg::Bad,                      // Op_RegI
3804       R0_H_num,                          // Op_RegP
3805       OptoReg::Bad,                      // Op_RegF
3806       V0_H_num,                          // Op_RegD
3807       R0_H_num                           // Op_RegL
3808     };
3809 
3810     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3811   %}
3812 %}
3813 
3814 //----------ATTRIBUTES---------------------------------------------------------
3815 //----------Operand Attributes-------------------------------------------------
3816 op_attrib op_cost(1);        // Required cost attribute
3817 
3818 //----------Instruction Attributes---------------------------------------------
3819 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3820 ins_attrib ins_size(32);        // Required size attribute (in bits)
3821 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3822                                 // a non-matching short branch variant
3823                                 // of some long branch?
3824 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3825                                 // be a power of 2) specifies the
3826                                 // alignment that some part of the
3827                                 // instruction (not necessarily the
3828                                 // start) requires.  If > 1, a
3829                                 // compute_padding() function must be
3830                                 // provided for the instruction
3831 
3832 //----------OPERANDS-----------------------------------------------------------
3833 // Operand definitions must precede instruction definitions for correct parsing
3834 // in the ADLC because operands constitute user defined types which are used in
3835 // instruction definitions.
3836 
3837 //----------Simple Operands----------------------------------------------------
3838 
3839 // Integer operands 32 bit
3840 // 32 bit immediate
3841 operand immI()
3842 %{
3843   match(ConI);
3844 
3845   op_cost(0);
3846   format %{ %}
3847   interface(CONST_INTER);
3848 %}
3849 
3850 // 32 bit zero
3851 operand immI0()
3852 %{
3853   predicate(n->get_int() == 0);
3854   match(ConI);
3855 
3856   op_cost(0);
3857   format %{ %}
3858   interface(CONST_INTER);
3859 %}
3860 
3861 // 32 bit unit increment
3862 operand immI_1()
3863 %{
3864   predicate(n->get_int() == 1);
3865   match(ConI);
3866 
3867   op_cost(0);
3868   format %{ %}
3869   interface(CONST_INTER);
3870 %}
3871 
3872 // 32 bit unit decrement
3873 operand immI_M1()
3874 %{
3875   predicate(n->get_int() == -1);
3876   match(ConI);
3877 
3878   op_cost(0);
3879   format %{ %}
3880   interface(CONST_INTER);
3881 %}
3882 
3883 // Shift values for add/sub extension shift
3884 operand immIExt()
3885 %{
3886   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3887   match(ConI);
3888 
3889   op_cost(0);
3890   format %{ %}
3891   interface(CONST_INTER);
3892 %}
3893 
3894 operand immI_le_4()
3895 %{
3896   predicate(n->get_int() <= 4);
3897   match(ConI);
3898 
3899   op_cost(0);
3900   format %{ %}
3901   interface(CONST_INTER);
3902 %}
3903 
3904 operand immI_31()
3905 %{
3906   predicate(n->get_int() == 31);
3907   match(ConI);
3908 
3909   op_cost(0);
3910   format %{ %}
3911   interface(CONST_INTER);
3912 %}
3913 
3914 operand immI_2()
3915 %{
3916   predicate(n->get_int() == 2);
3917   match(ConI);
3918 
3919   op_cost(0);
3920   format %{ %}
3921   interface(CONST_INTER);
3922 %}
3923 
3924 operand immI_4()
3925 %{
3926   predicate(n->get_int() == 4);
3927   match(ConI);
3928 
3929   op_cost(0);
3930   format %{ %}
3931   interface(CONST_INTER);
3932 %}
3933 
3934 operand immI_8()
3935 %{
3936   predicate(n->get_int() == 8);
3937   match(ConI);
3938 
3939   op_cost(0);
3940   format %{ %}
3941   interface(CONST_INTER);
3942 %}
3943 
3944 operand immI_16()
3945 %{
3946   predicate(n->get_int() == 16);
3947   match(ConI);
3948 
3949   op_cost(0);
3950   format %{ %}
3951   interface(CONST_INTER);
3952 %}
3953 
3954 operand immI_24()
3955 %{
3956   predicate(n->get_int() == 24);
3957   match(ConI);
3958 
3959   op_cost(0);
3960   format %{ %}
3961   interface(CONST_INTER);
3962 %}
3963 
3964 operand immI_32()
3965 %{
3966   predicate(n->get_int() == 32);
3967   match(ConI);
3968 
3969   op_cost(0);
3970   format %{ %}
3971   interface(CONST_INTER);
3972 %}
3973 
3974 operand immI_48()
3975 %{
3976   predicate(n->get_int() == 48);
3977   match(ConI);
3978 
3979   op_cost(0);
3980   format %{ %}
3981   interface(CONST_INTER);
3982 %}
3983 
3984 operand immI_56()
3985 %{
3986   predicate(n->get_int() == 56);
3987   match(ConI);
3988 
3989   op_cost(0);
3990   format %{ %}
3991   interface(CONST_INTER);
3992 %}
3993 
3994 operand immI_63()
3995 %{
3996   predicate(n->get_int() == 63);
3997   match(ConI);
3998 
3999   op_cost(0);
4000   format %{ %}
4001   interface(CONST_INTER);
4002 %}
4003 
4004 operand immI_64()
4005 %{
4006   predicate(n->get_int() == 64);
4007   match(ConI);
4008 
4009   op_cost(0);
4010   format %{ %}
4011   interface(CONST_INTER);
4012 %}
4013 
4014 operand immI_255()
4015 %{
4016   predicate(n->get_int() == 255);
4017   match(ConI);
4018 
4019   op_cost(0);
4020   format %{ %}
4021   interface(CONST_INTER);
4022 %}
4023 
4024 operand immI_65535()
4025 %{
4026   predicate(n->get_int() == 65535);
4027   match(ConI);
4028 
4029   op_cost(0);
4030   format %{ %}
4031   interface(CONST_INTER);
4032 %}
4033 
4034 operand immL_255()
4035 %{
4036   predicate(n->get_long() == 255L);
4037   match(ConL);
4038 
4039   op_cost(0);
4040   format %{ %}
4041   interface(CONST_INTER);
4042 %}
4043 
4044 operand immL_65535()
4045 %{
4046   predicate(n->get_long() == 65535L);
4047   match(ConL);
4048 
4049   op_cost(0);
4050   format %{ %}
4051   interface(CONST_INTER);
4052 %}
4053 
4054 operand immL_4294967295()
4055 %{
4056   predicate(n->get_long() == 4294967295L);
4057   match(ConL);
4058 
4059   op_cost(0);
4060   format %{ %}
4061   interface(CONST_INTER);
4062 %}
4063 
4064 operand immL_bitmask()
4065 %{
4066   predicate((n->get_long() != 0)
4067             && ((n->get_long() & 0xc000000000000000l) == 0)
4068             && is_power_of_2(n->get_long() + 1));
4069   match(ConL);
4070 
4071   op_cost(0);
4072   format %{ %}
4073   interface(CONST_INTER);
4074 %}
4075 
4076 operand immI_bitmask()
4077 %{
4078   predicate((n->get_int() != 0)
4079             && ((n->get_int() & 0xc0000000) == 0)
4080             && is_power_of_2(n->get_int() + 1));
4081   match(ConI);
4082 
4083   op_cost(0);
4084   format %{ %}
4085   interface(CONST_INTER);
4086 %}
4087 
4088 // Scale values for scaled offset addressing modes (up to long but not quad)
4089 operand immIScale()
4090 %{
4091   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4092   match(ConI);
4093 
4094   op_cost(0);
4095   format %{ %}
4096   interface(CONST_INTER);
4097 %}
4098 
4099 // 26 bit signed offset -- for pc-relative branches
4100 operand immI26()
4101 %{
4102   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4103   match(ConI);
4104 
4105   op_cost(0);
4106   format %{ %}
4107   interface(CONST_INTER);
4108 %}
4109 
4110 // 19 bit signed offset -- for pc-relative loads
4111 operand immI19()
4112 %{
4113   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4114   match(ConI);
4115 
4116   op_cost(0);
4117   format %{ %}
4118   interface(CONST_INTER);
4119 %}
4120 
4121 // 12 bit unsigned offset -- for base plus immediate loads
4122 operand immIU12()
4123 %{
4124   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4125   match(ConI);
4126 
4127   op_cost(0);
4128   format %{ %}
4129   interface(CONST_INTER);
4130 %}
4131 
4132 operand immLU12()
4133 %{
4134   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4135   match(ConL);
4136 
4137   op_cost(0);
4138   format %{ %}
4139   interface(CONST_INTER);
4140 %}
4141 
4142 // Offset for scaled or unscaled immediate loads and stores
4143 operand immIOffset()
4144 %{
4145   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
4146   match(ConI);
4147 
4148   op_cost(0);
4149   format %{ %}
4150   interface(CONST_INTER);
4151 %}
4152 
4153 operand immIOffset1()
4154 %{
4155   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
4156   match(ConI);
4157 
4158   op_cost(0);
4159   format %{ %}
4160   interface(CONST_INTER);
4161 %}
4162 
4163 operand immIOffset2()
4164 %{
4165   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
4166   match(ConI);
4167 
4168   op_cost(0);
4169   format %{ %}
4170   interface(CONST_INTER);
4171 %}
4172 
4173 operand immIOffset4()
4174 %{
4175   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4176   match(ConI);
4177 
4178   op_cost(0);
4179   format %{ %}
4180   interface(CONST_INTER);
4181 %}
4182 
4183 operand immIOffset8()
4184 %{
4185   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4186   match(ConI);
4187 
4188   op_cost(0);
4189   format %{ %}
4190   interface(CONST_INTER);
4191 %}
4192 
4193 operand immIOffset16()
4194 %{
4195   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4196   match(ConI);
4197 
4198   op_cost(0);
4199   format %{ %}
4200   interface(CONST_INTER);
4201 %}
4202 
4203 operand immLoffset()
4204 %{
4205   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
4206   match(ConL);
4207 
4208   op_cost(0);
4209   format %{ %}
4210   interface(CONST_INTER);
4211 %}
4212 
4213 operand immLoffset1()
4214 %{
4215   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
4216   match(ConL);
4217 
4218   op_cost(0);
4219   format %{ %}
4220   interface(CONST_INTER);
4221 %}
4222 
4223 operand immLoffset2()
4224 %{
4225   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
4226   match(ConL);
4227 
4228   op_cost(0);
4229   format %{ %}
4230   interface(CONST_INTER);
4231 %}
4232 
4233 operand immLoffset4()
4234 %{
4235   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4236   match(ConL);
4237 
4238   op_cost(0);
4239   format %{ %}
4240   interface(CONST_INTER);
4241 %}
4242 
4243 operand immLoffset8()
4244 %{
4245   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4246   match(ConL);
4247 
4248   op_cost(0);
4249   format %{ %}
4250   interface(CONST_INTER);
4251 %}
4252 
4253 operand immLoffset16()
4254 %{
4255   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4256   match(ConL);
4257 
4258   op_cost(0);
4259   format %{ %}
4260   interface(CONST_INTER);
4261 %}
4262 
4263 // 32 bit integer valid for add sub immediate
4264 operand immIAddSub()
4265 %{
4266   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4267   match(ConI);
4268   op_cost(0);
4269   format %{ %}
4270   interface(CONST_INTER);
4271 %}
4272 
4273 // 32 bit unsigned integer valid for logical immediate
4274 // TODO -- check this is right when e.g the mask is 0x80000000
4275 operand immILog()
4276 %{
4277   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4278   match(ConI);
4279 
4280   op_cost(0);
4281   format %{ %}
4282   interface(CONST_INTER);
4283 %}
4284 
4285 // Integer operands 64 bit
4286 // 64 bit immediate
4287 operand immL()
4288 %{
4289   match(ConL);
4290 
4291   op_cost(0);
4292   format %{ %}
4293   interface(CONST_INTER);
4294 %}
4295 
4296 // 64 bit zero
4297 operand immL0()
4298 %{
4299   predicate(n->get_long() == 0);
4300   match(ConL);
4301 
4302   op_cost(0);
4303   format %{ %}
4304   interface(CONST_INTER);
4305 %}
4306 
4307 // 64 bit unit increment
4308 operand immL_1()
4309 %{
4310   predicate(n->get_long() == 1);
4311   match(ConL);
4312 
4313   op_cost(0);
4314   format %{ %}
4315   interface(CONST_INTER);
4316 %}
4317 
4318 // 64 bit unit decrement
4319 operand immL_M1()
4320 %{
4321   predicate(n->get_long() == -1);
4322   match(ConL);
4323 
4324   op_cost(0);
4325   format %{ %}
4326   interface(CONST_INTER);
4327 %}
4328 
4329 // 32 bit offset of pc in thread anchor
4330 
4331 operand immL_pc_off()
4332 %{
4333   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4334                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4335   match(ConL);
4336 
4337   op_cost(0);
4338   format %{ %}
4339   interface(CONST_INTER);
4340 %}
4341 
4342 // 64 bit integer valid for add sub immediate
4343 operand immLAddSub()
4344 %{
4345   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4346   match(ConL);
4347   op_cost(0);
4348   format %{ %}
4349   interface(CONST_INTER);
4350 %}
4351 
4352 // 64 bit integer valid for logical immediate
4353 operand immLLog()
4354 %{
4355   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4356   match(ConL);
4357   op_cost(0);
4358   format %{ %}
4359   interface(CONST_INTER);
4360 %}
4361 
4362 // Long Immediate: low 32-bit mask
4363 operand immL_32bits()
4364 %{
4365   predicate(n->get_long() == 0xFFFFFFFFL);
4366   match(ConL);
4367   op_cost(0);
4368   format %{ %}
4369   interface(CONST_INTER);
4370 %}
4371 
4372 // Pointer operands
4373 // Pointer Immediate
4374 operand immP()
4375 %{
4376   match(ConP);
4377 
4378   op_cost(0);
4379   format %{ %}
4380   interface(CONST_INTER);
4381 %}
4382 
4383 // NULL Pointer Immediate
4384 operand immP0()
4385 %{
4386   predicate(n->get_ptr() == 0);
4387   match(ConP);
4388 
4389   op_cost(0);
4390   format %{ %}
4391   interface(CONST_INTER);
4392 %}
4393 
4394 // Pointer Immediate One
4395 // this is used in object initialization (initial object header)
4396 operand immP_1()
4397 %{
4398   predicate(n->get_ptr() == 1);
4399   match(ConP);
4400 
4401   op_cost(0);
4402   format %{ %}
4403   interface(CONST_INTER);
4404 %}
4405 
4406 // Card Table Byte Map Base
4407 operand immByteMapBase()
4408 %{
4409   // Get base of card map
4410   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4411             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4412   match(ConP);
4413 
4414   op_cost(0);
4415   format %{ %}
4416   interface(CONST_INTER);
4417 %}
4418 
4419 // Pointer Immediate Minus One
4420 // this is used when we want to write the current PC to the thread anchor
4421 operand immP_M1()
4422 %{
4423   predicate(n->get_ptr() == -1);
4424   match(ConP);
4425 
4426   op_cost(0);
4427   format %{ %}
4428   interface(CONST_INTER);
4429 %}
4430 
4431 // Pointer Immediate Minus Two
4432 // this is used when we want to write the current PC to the thread anchor
4433 operand immP_M2()
4434 %{
4435   predicate(n->get_ptr() == -2);
4436   match(ConP);
4437 
4438   op_cost(0);
4439   format %{ %}
4440   interface(CONST_INTER);
4441 %}
4442 
4443 // Float and Double operands
4444 // Double Immediate
4445 operand immD()
4446 %{
4447   match(ConD);
4448   op_cost(0);
4449   format %{ %}
4450   interface(CONST_INTER);
4451 %}
4452 
4453 // Double Immediate: +0.0d
4454 operand immD0()
4455 %{
4456   predicate(jlong_cast(n->getd()) == 0);
4457   match(ConD);
4458 
4459   op_cost(0);
4460   format %{ %}
4461   interface(CONST_INTER);
4462 %}
4463 
4464 // constant 'double +0.0'.
4465 operand immDPacked()
4466 %{
4467   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4468   match(ConD);
4469   op_cost(0);
4470   format %{ %}
4471   interface(CONST_INTER);
4472 %}
4473 
4474 // Float Immediate
4475 operand immF()
4476 %{
4477   match(ConF);
4478   op_cost(0);
4479   format %{ %}
4480   interface(CONST_INTER);
4481 %}
4482 
4483 // Float Immediate: +0.0f.
4484 operand immF0()
4485 %{
4486   predicate(jint_cast(n->getf()) == 0);
4487   match(ConF);
4488 
4489   op_cost(0);
4490   format %{ %}
4491   interface(CONST_INTER);
4492 %}
4493 
4494 //
4495 operand immFPacked()
4496 %{
4497   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4498   match(ConF);
4499   op_cost(0);
4500   format %{ %}
4501   interface(CONST_INTER);
4502 %}
4503 
4504 // Narrow pointer operands
4505 // Narrow Pointer Immediate
4506 operand immN()
4507 %{
4508   match(ConN);
4509 
4510   op_cost(0);
4511   format %{ %}
4512   interface(CONST_INTER);
4513 %}
4514 
4515 // Narrow NULL Pointer Immediate
4516 operand immN0()
4517 %{
4518   predicate(n->get_narrowcon() == 0);
4519   match(ConN);
4520 
4521   op_cost(0);
4522   format %{ %}
4523   interface(CONST_INTER);
4524 %}
4525 
4526 operand immNKlass()
4527 %{
4528   match(ConNKlass);
4529 
4530   op_cost(0);
4531   format %{ %}
4532   interface(CONST_INTER);
4533 %}
4534 
4535 // Integer 32 bit Register Operands
4536 // Integer 32 bitRegister (excludes SP)
4537 operand iRegI()
4538 %{
4539   constraint(ALLOC_IN_RC(any_reg32));
4540   match(RegI);
4541   match(iRegINoSp);
4542   op_cost(0);
4543   format %{ %}
4544   interface(REG_INTER);
4545 %}
4546 
4547 // Integer 32 bit Register not Special
4548 operand iRegINoSp()
4549 %{
4550   constraint(ALLOC_IN_RC(no_special_reg32));
4551   match(RegI);
4552   op_cost(0);
4553   format %{ %}
4554   interface(REG_INTER);
4555 %}
4556 
4557 // Integer 64 bit Register Operands
4558 // Integer 64 bit Register (includes SP)
4559 operand iRegL()
4560 %{
4561   constraint(ALLOC_IN_RC(any_reg));
4562   match(RegL);
4563   match(iRegLNoSp);
4564   op_cost(0);
4565   format %{ %}
4566   interface(REG_INTER);
4567 %}
4568 
4569 // Integer 64 bit Register not Special
4570 operand iRegLNoSp()
4571 %{
4572   constraint(ALLOC_IN_RC(no_special_reg));
4573   match(RegL);
4574   match(iRegL_R0);
4575   format %{ %}
4576   interface(REG_INTER);
4577 %}
4578 
4579 // Pointer Register Operands
4580 // Pointer Register
4581 operand iRegP()
4582 %{
4583   constraint(ALLOC_IN_RC(ptr_reg));
4584   match(RegP);
4585   match(iRegPNoSp);
4586   match(iRegP_R0);
4587   //match(iRegP_R2);
4588   //match(iRegP_R4);
4589   //match(iRegP_R5);
4590   match(thread_RegP);
4591   op_cost(0);
4592   format %{ %}
4593   interface(REG_INTER);
4594 %}
4595 
4596 // Pointer 64 bit Register not Special
4597 operand iRegPNoSp()
4598 %{
4599   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4600   match(RegP);
4601   // match(iRegP);
4602   // match(iRegP_R0);
4603   // match(iRegP_R2);
4604   // match(iRegP_R4);
4605   // match(iRegP_R5);
4606   // match(thread_RegP);
4607   op_cost(0);
4608   format %{ %}
4609   interface(REG_INTER);
4610 %}
4611 
4612 // Pointer 64 bit Register R0 only
4613 operand iRegP_R0()
4614 %{
4615   constraint(ALLOC_IN_RC(r0_reg));
4616   match(RegP);
4617   // match(iRegP);
4618   match(iRegPNoSp);
4619   op_cost(0);
4620   format %{ %}
4621   interface(REG_INTER);
4622 %}
4623 
4624 // Pointer 64 bit Register R1 only
4625 operand iRegP_R1()
4626 %{
4627   constraint(ALLOC_IN_RC(r1_reg));
4628   match(RegP);
4629   // match(iRegP);
4630   match(iRegPNoSp);
4631   op_cost(0);
4632   format %{ %}
4633   interface(REG_INTER);
4634 %}
4635 
4636 // Pointer 64 bit Register R2 only
4637 operand iRegP_R2()
4638 %{
4639   constraint(ALLOC_IN_RC(r2_reg));
4640   match(RegP);
4641   // match(iRegP);
4642   match(iRegPNoSp);
4643   op_cost(0);
4644   format %{ %}
4645   interface(REG_INTER);
4646 %}
4647 
4648 // Pointer 64 bit Register R3 only
4649 operand iRegP_R3()
4650 %{
4651   constraint(ALLOC_IN_RC(r3_reg));
4652   match(RegP);
4653   // match(iRegP);
4654   match(iRegPNoSp);
4655   op_cost(0);
4656   format %{ %}
4657   interface(REG_INTER);
4658 %}
4659 
4660 // Pointer 64 bit Register R4 only
4661 operand iRegP_R4()
4662 %{
4663   constraint(ALLOC_IN_RC(r4_reg));
4664   match(RegP);
4665   // match(iRegP);
4666   match(iRegPNoSp);
4667   op_cost(0);
4668   format %{ %}
4669   interface(REG_INTER);
4670 %}
4671 
4672 // Pointer 64 bit Register R5 only
4673 operand iRegP_R5()
4674 %{
4675   constraint(ALLOC_IN_RC(r5_reg));
4676   match(RegP);
4677   // match(iRegP);
4678   match(iRegPNoSp);
4679   op_cost(0);
4680   format %{ %}
4681   interface(REG_INTER);
4682 %}
4683 
4684 // Pointer 64 bit Register R10 only
4685 operand iRegP_R10()
4686 %{
4687   constraint(ALLOC_IN_RC(r10_reg));
4688   match(RegP);
4689   // match(iRegP);
4690   match(iRegPNoSp);
4691   op_cost(0);
4692   format %{ %}
4693   interface(REG_INTER);
4694 %}
4695 
4696 // Long 64 bit Register R0 only
4697 operand iRegL_R0()
4698 %{
4699   constraint(ALLOC_IN_RC(r0_reg));
4700   match(RegL);
4701   match(iRegLNoSp);
4702   op_cost(0);
4703   format %{ %}
4704   interface(REG_INTER);
4705 %}
4706 
4707 // Long 64 bit Register R2 only
4708 operand iRegL_R2()
4709 %{
4710   constraint(ALLOC_IN_RC(r2_reg));
4711   match(RegL);
4712   match(iRegLNoSp);
4713   op_cost(0);
4714   format %{ %}
4715   interface(REG_INTER);
4716 %}
4717 
4718 // Long 64 bit Register R3 only
4719 operand iRegL_R3()
4720 %{
4721   constraint(ALLOC_IN_RC(r3_reg));
4722   match(RegL);
4723   match(iRegLNoSp);
4724   op_cost(0);
4725   format %{ %}
4726   interface(REG_INTER);
4727 %}
4728 
4729 // Long 64 bit Register R11 only
4730 operand iRegL_R11()
4731 %{
4732   constraint(ALLOC_IN_RC(r11_reg));
4733   match(RegL);
4734   match(iRegLNoSp);
4735   op_cost(0);
4736   format %{ %}
4737   interface(REG_INTER);
4738 %}
4739 
4740 // Pointer 64 bit Register FP only
4741 operand iRegP_FP()
4742 %{
4743   constraint(ALLOC_IN_RC(fp_reg));
4744   match(RegP);
4745   // match(iRegP);
4746   op_cost(0);
4747   format %{ %}
4748   interface(REG_INTER);
4749 %}
4750 
4751 // Register R0 only
4752 operand iRegI_R0()
4753 %{
4754   constraint(ALLOC_IN_RC(int_r0_reg));
4755   match(RegI);
4756   match(iRegINoSp);
4757   op_cost(0);
4758   format %{ %}
4759   interface(REG_INTER);
4760 %}
4761 
4762 // Register R2 only
4763 operand iRegI_R2()
4764 %{
4765   constraint(ALLOC_IN_RC(int_r2_reg));
4766   match(RegI);
4767   match(iRegINoSp);
4768   op_cost(0);
4769   format %{ %}
4770   interface(REG_INTER);
4771 %}
4772 
4773 // Register R3 only
4774 operand iRegI_R3()
4775 %{
4776   constraint(ALLOC_IN_RC(int_r3_reg));
4777   match(RegI);
4778   match(iRegINoSp);
4779   op_cost(0);
4780   format %{ %}
4781   interface(REG_INTER);
4782 %}
4783 
4784 
4785 // Register R4 only
4786 operand iRegI_R4()
4787 %{
4788   constraint(ALLOC_IN_RC(int_r4_reg));
4789   match(RegI);
4790   match(iRegINoSp);
4791   op_cost(0);
4792   format %{ %}
4793   interface(REG_INTER);
4794 %}
4795 
4796 
4797 // Pointer Register Operands
4798 // Narrow Pointer Register
4799 operand iRegN()
4800 %{
4801   constraint(ALLOC_IN_RC(any_reg32));
4802   match(RegN);
4803   match(iRegNNoSp);
4804   op_cost(0);
4805   format %{ %}
4806   interface(REG_INTER);
4807 %}
4808 
4809 operand iRegN_R0()
4810 %{
4811   constraint(ALLOC_IN_RC(r0_reg));
4812   match(iRegN);
4813   op_cost(0);
4814   format %{ %}
4815   interface(REG_INTER);
4816 %}
4817 
4818 operand iRegN_R2()
4819 %{
4820   constraint(ALLOC_IN_RC(r2_reg));
4821   match(iRegN);
4822   op_cost(0);
4823   format %{ %}
4824   interface(REG_INTER);
4825 %}
4826 
4827 operand iRegN_R3()
4828 %{
4829   constraint(ALLOC_IN_RC(r3_reg));
4830   match(iRegN);
4831   op_cost(0);
4832   format %{ %}
4833   interface(REG_INTER);
4834 %}
4835 
4836 // Integer 64 bit Register not Special
4837 operand iRegNNoSp()
4838 %{
4839   constraint(ALLOC_IN_RC(no_special_reg32));
4840   match(RegN);
4841   op_cost(0);
4842   format %{ %}
4843   interface(REG_INTER);
4844 %}
4845 
4846 // heap base register -- used for encoding immN0
4847 
4848 operand iRegIHeapbase()
4849 %{
4850   constraint(ALLOC_IN_RC(heapbase_reg));
4851   match(RegI);
4852   op_cost(0);
4853   format %{ %}
4854   interface(REG_INTER);
4855 %}
4856 
4857 // Float Register
4858 // Float register operands
4859 operand vRegF()
4860 %{
4861   constraint(ALLOC_IN_RC(float_reg));
4862   match(RegF);
4863 
4864   op_cost(0);
4865   format %{ %}
4866   interface(REG_INTER);
4867 %}
4868 
4869 // Double Register
4870 // Double register operands
4871 operand vRegD()
4872 %{
4873   constraint(ALLOC_IN_RC(double_reg));
4874   match(RegD);
4875 
4876   op_cost(0);
4877   format %{ %}
4878   interface(REG_INTER);
4879 %}
4880 
4881 operand vecD()
4882 %{
4883   constraint(ALLOC_IN_RC(vectord_reg));
4884   match(VecD);
4885 
4886   op_cost(0);
4887   format %{ %}
4888   interface(REG_INTER);
4889 %}
4890 
4891 operand vecX()
4892 %{
4893   constraint(ALLOC_IN_RC(vectorx_reg));
4894   match(VecX);
4895 
4896   op_cost(0);
4897   format %{ %}
4898   interface(REG_INTER);
4899 %}
4900 
4901 operand vRegD_V0()
4902 %{
4903   constraint(ALLOC_IN_RC(v0_reg));
4904   match(RegD);
4905   op_cost(0);
4906   format %{ %}
4907   interface(REG_INTER);
4908 %}
4909 
4910 operand vRegD_V1()
4911 %{
4912   constraint(ALLOC_IN_RC(v1_reg));
4913   match(RegD);
4914   op_cost(0);
4915   format %{ %}
4916   interface(REG_INTER);
4917 %}
4918 
4919 operand vRegD_V2()
4920 %{
4921   constraint(ALLOC_IN_RC(v2_reg));
4922   match(RegD);
4923   op_cost(0);
4924   format %{ %}
4925   interface(REG_INTER);
4926 %}
4927 
4928 operand vRegD_V3()
4929 %{
4930   constraint(ALLOC_IN_RC(v3_reg));
4931   match(RegD);
4932   op_cost(0);
4933   format %{ %}
4934   interface(REG_INTER);
4935 %}
4936 
4937 operand vRegD_V4()
4938 %{
4939   constraint(ALLOC_IN_RC(v4_reg));
4940   match(RegD);
4941   op_cost(0);
4942   format %{ %}
4943   interface(REG_INTER);
4944 %}
4945 
4946 operand vRegD_V5()
4947 %{
4948   constraint(ALLOC_IN_RC(v5_reg));
4949   match(RegD);
4950   op_cost(0);
4951   format %{ %}
4952   interface(REG_INTER);
4953 %}
4954 
4955 operand vRegD_V6()
4956 %{
4957   constraint(ALLOC_IN_RC(v6_reg));
4958   match(RegD);
4959   op_cost(0);
4960   format %{ %}
4961   interface(REG_INTER);
4962 %}
4963 
4964 operand vRegD_V7()
4965 %{
4966   constraint(ALLOC_IN_RC(v7_reg));
4967   match(RegD);
4968   op_cost(0);
4969   format %{ %}
4970   interface(REG_INTER);
4971 %}
4972 
4973 operand vRegD_V8()
4974 %{
4975   constraint(ALLOC_IN_RC(v8_reg));
4976   match(RegD);
4977   op_cost(0);
4978   format %{ %}
4979   interface(REG_INTER);
4980 %}
4981 
4982 operand vRegD_V9()
4983 %{
4984   constraint(ALLOC_IN_RC(v9_reg));
4985   match(RegD);
4986   op_cost(0);
4987   format %{ %}
4988   interface(REG_INTER);
4989 %}
4990 
4991 operand vRegD_V10()
4992 %{
4993   constraint(ALLOC_IN_RC(v10_reg));
4994   match(RegD);
4995   op_cost(0);
4996   format %{ %}
4997   interface(REG_INTER);
4998 %}
4999 
5000 operand vRegD_V11()
5001 %{
5002   constraint(ALLOC_IN_RC(v11_reg));
5003   match(RegD);
5004   op_cost(0);
5005   format %{ %}
5006   interface(REG_INTER);
5007 %}
5008 
5009 operand vRegD_V12()
5010 %{
5011   constraint(ALLOC_IN_RC(v12_reg));
5012   match(RegD);
5013   op_cost(0);
5014   format %{ %}
5015   interface(REG_INTER);
5016 %}
5017 
5018 operand vRegD_V13()
5019 %{
5020   constraint(ALLOC_IN_RC(v13_reg));
5021   match(RegD);
5022   op_cost(0);
5023   format %{ %}
5024   interface(REG_INTER);
5025 %}
5026 
5027 operand vRegD_V14()
5028 %{
5029   constraint(ALLOC_IN_RC(v14_reg));
5030   match(RegD);
5031   op_cost(0);
5032   format %{ %}
5033   interface(REG_INTER);
5034 %}
5035 
5036 operand vRegD_V15()
5037 %{
5038   constraint(ALLOC_IN_RC(v15_reg));
5039   match(RegD);
5040   op_cost(0);
5041   format %{ %}
5042   interface(REG_INTER);
5043 %}
5044 
5045 operand vRegD_V16()
5046 %{
5047   constraint(ALLOC_IN_RC(v16_reg));
5048   match(RegD);
5049   op_cost(0);
5050   format %{ %}
5051   interface(REG_INTER);
5052 %}
5053 
5054 operand vRegD_V17()
5055 %{
5056   constraint(ALLOC_IN_RC(v17_reg));
5057   match(RegD);
5058   op_cost(0);
5059   format %{ %}
5060   interface(REG_INTER);
5061 %}
5062 
5063 operand vRegD_V18()
5064 %{
5065   constraint(ALLOC_IN_RC(v18_reg));
5066   match(RegD);
5067   op_cost(0);
5068   format %{ %}
5069   interface(REG_INTER);
5070 %}
5071 
5072 operand vRegD_V19()
5073 %{
5074   constraint(ALLOC_IN_RC(v19_reg));
5075   match(RegD);
5076   op_cost(0);
5077   format %{ %}
5078   interface(REG_INTER);
5079 %}
5080 
5081 operand vRegD_V20()
5082 %{
5083   constraint(ALLOC_IN_RC(v20_reg));
5084   match(RegD);
5085   op_cost(0);
5086   format %{ %}
5087   interface(REG_INTER);
5088 %}
5089 
5090 operand vRegD_V21()
5091 %{
5092   constraint(ALLOC_IN_RC(v21_reg));
5093   match(RegD);
5094   op_cost(0);
5095   format %{ %}
5096   interface(REG_INTER);
5097 %}
5098 
5099 operand vRegD_V22()
5100 %{
5101   constraint(ALLOC_IN_RC(v22_reg));
5102   match(RegD);
5103   op_cost(0);
5104   format %{ %}
5105   interface(REG_INTER);
5106 %}
5107 
5108 operand vRegD_V23()
5109 %{
5110   constraint(ALLOC_IN_RC(v23_reg));
5111   match(RegD);
5112   op_cost(0);
5113   format %{ %}
5114   interface(REG_INTER);
5115 %}
5116 
5117 operand vRegD_V24()
5118 %{
5119   constraint(ALLOC_IN_RC(v24_reg));
5120   match(RegD);
5121   op_cost(0);
5122   format %{ %}
5123   interface(REG_INTER);
5124 %}
5125 
5126 operand vRegD_V25()
5127 %{
5128   constraint(ALLOC_IN_RC(v25_reg));
5129   match(RegD);
5130   op_cost(0);
5131   format %{ %}
5132   interface(REG_INTER);
5133 %}
5134 
5135 operand vRegD_V26()
5136 %{
5137   constraint(ALLOC_IN_RC(v26_reg));
5138   match(RegD);
5139   op_cost(0);
5140   format %{ %}
5141   interface(REG_INTER);
5142 %}
5143 
5144 operand vRegD_V27()
5145 %{
5146   constraint(ALLOC_IN_RC(v27_reg));
5147   match(RegD);
5148   op_cost(0);
5149   format %{ %}
5150   interface(REG_INTER);
5151 %}
5152 
5153 operand vRegD_V28()
5154 %{
5155   constraint(ALLOC_IN_RC(v28_reg));
5156   match(RegD);
5157   op_cost(0);
5158   format %{ %}
5159   interface(REG_INTER);
5160 %}
5161 
5162 operand vRegD_V29()
5163 %{
5164   constraint(ALLOC_IN_RC(v29_reg));
5165   match(RegD);
5166   op_cost(0);
5167   format %{ %}
5168   interface(REG_INTER);
5169 %}
5170 
5171 operand vRegD_V30()
5172 %{
5173   constraint(ALLOC_IN_RC(v30_reg));
5174   match(RegD);
5175   op_cost(0);
5176   format %{ %}
5177   interface(REG_INTER);
5178 %}
5179 
5180 operand vRegD_V31()
5181 %{
5182   constraint(ALLOC_IN_RC(v31_reg));
5183   match(RegD);
5184   op_cost(0);
5185   format %{ %}
5186   interface(REG_INTER);
5187 %}
5188 
5189 // Flags register, used as output of signed compare instructions
5190 
5191 // note that on AArch64 we also use this register as the output for
5192 // for floating point compare instructions (CmpF CmpD). this ensures
5193 // that ordered inequality tests use GT, GE, LT or LE none of which
5194 // pass through cases where the result is unordered i.e. one or both
5195 // inputs to the compare is a NaN. this means that the ideal code can
5196 // replace e.g. a GT with an LE and not end up capturing the NaN case
5197 // (where the comparison should always fail). EQ and NE tests are
5198 // always generated in ideal code so that unordered folds into the NE
5199 // case, matching the behaviour of AArch64 NE.
5200 //
5201 // This differs from x86 where the outputs of FP compares use a
5202 // special FP flags registers and where compares based on this
5203 // register are distinguished into ordered inequalities (cmpOpUCF) and
5204 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5205 // to explicitly handle the unordered case in branches. x86 also has
5206 // to include extra CMoveX rules to accept a cmpOpUCF input.
5207 
5208 operand rFlagsReg()
5209 %{
5210   constraint(ALLOC_IN_RC(int_flags));
5211   match(RegFlags);
5212 
5213   op_cost(0);
5214   format %{ "RFLAGS" %}
5215   interface(REG_INTER);
5216 %}
5217 
5218 // Flags register, used as output of unsigned compare instructions
5219 operand rFlagsRegU()
5220 %{
5221   constraint(ALLOC_IN_RC(int_flags));
5222   match(RegFlags);
5223 
5224   op_cost(0);
5225   format %{ "RFLAGSU" %}
5226   interface(REG_INTER);
5227 %}
5228 
5229 // Special Registers
5230 
5231 // Method Register
5232 operand inline_cache_RegP(iRegP reg)
5233 %{
5234   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5235   match(reg);
5236   match(iRegPNoSp);
5237   op_cost(0);
5238   format %{ %}
5239   interface(REG_INTER);
5240 %}
5241 
5242 operand interpreter_method_oop_RegP(iRegP reg)
5243 %{
5244   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5245   match(reg);
5246   match(iRegPNoSp);
5247   op_cost(0);
5248   format %{ %}
5249   interface(REG_INTER);
5250 %}
5251 
5252 // Thread Register
5253 operand thread_RegP(iRegP reg)
5254 %{
5255   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5256   match(reg);
5257   op_cost(0);
5258   format %{ %}
5259   interface(REG_INTER);
5260 %}
5261 
5262 operand lr_RegP(iRegP reg)
5263 %{
5264   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5265   match(reg);
5266   op_cost(0);
5267   format %{ %}
5268   interface(REG_INTER);
5269 %}
5270 
5271 //----------Memory Operands----------------------------------------------------
5272 
5273 operand indirect(iRegP reg)
5274 %{
5275   constraint(ALLOC_IN_RC(ptr_reg));
5276   match(reg);
5277   op_cost(0);
5278   format %{ "[$reg]" %}
5279   interface(MEMORY_INTER) %{
5280     base($reg);
5281     index(0xffffffff);
5282     scale(0x0);
5283     disp(0x0);
5284   %}
5285 %}
5286 
5287 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5288 %{
5289   constraint(ALLOC_IN_RC(ptr_reg));
5290   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5291   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5292   op_cost(0);
5293   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5294   interface(MEMORY_INTER) %{
5295     base($reg);
5296     index($ireg);
5297     scale($scale);
5298     disp(0x0);
5299   %}
5300 %}
5301 
5302 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5303 %{
5304   constraint(ALLOC_IN_RC(ptr_reg));
5305   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5306   match(AddP reg (LShiftL lreg scale));
5307   op_cost(0);
5308   format %{ "$reg, $lreg lsl($scale)" %}
5309   interface(MEMORY_INTER) %{
5310     base($reg);
5311     index($lreg);
5312     scale($scale);
5313     disp(0x0);
5314   %}
5315 %}
5316 
5317 operand indIndexI2L(iRegP reg, iRegI ireg)
5318 %{
5319   constraint(ALLOC_IN_RC(ptr_reg));
5320   match(AddP reg (ConvI2L ireg));
5321   op_cost(0);
5322   format %{ "$reg, $ireg, 0, I2L" %}
5323   interface(MEMORY_INTER) %{
5324     base($reg);
5325     index($ireg);
5326     scale(0x0);
5327     disp(0x0);
5328   %}
5329 %}
5330 
5331 operand indIndex(iRegP reg, iRegL lreg)
5332 %{
5333   constraint(ALLOC_IN_RC(ptr_reg));
5334   match(AddP reg lreg);
5335   op_cost(0);
5336   format %{ "$reg, $lreg" %}
5337   interface(MEMORY_INTER) %{
5338     base($reg);
5339     index($lreg);
5340     scale(0x0);
5341     disp(0x0);
5342   %}
5343 %}
5344 
5345 operand indOffI(iRegP reg, immIOffset off)
5346 %{
5347   constraint(ALLOC_IN_RC(ptr_reg));
5348   match(AddP reg off);
5349   op_cost(0);
5350   format %{ "[$reg, $off]" %}
5351   interface(MEMORY_INTER) %{
5352     base($reg);
5353     index(0xffffffff);
5354     scale(0x0);
5355     disp($off);
5356   %}
5357 %}
5358 
5359 operand indOffI1(iRegP reg, immIOffset1 off)
5360 %{
5361   constraint(ALLOC_IN_RC(ptr_reg));
5362   match(AddP reg off);
5363   op_cost(0);
5364   format %{ "[$reg, $off]" %}
5365   interface(MEMORY_INTER) %{
5366     base($reg);
5367     index(0xffffffff);
5368     scale(0x0);
5369     disp($off);
5370   %}
5371 %}
5372 
5373 operand indOffI2(iRegP reg, immIOffset2 off)
5374 %{
5375   constraint(ALLOC_IN_RC(ptr_reg));
5376   match(AddP reg off);
5377   op_cost(0);
5378   format %{ "[$reg, $off]" %}
5379   interface(MEMORY_INTER) %{
5380     base($reg);
5381     index(0xffffffff);
5382     scale(0x0);
5383     disp($off);
5384   %}
5385 %}
5386 
5387 operand indOffI4(iRegP reg, immIOffset4 off)
5388 %{
5389   constraint(ALLOC_IN_RC(ptr_reg));
5390   match(AddP reg off);
5391   op_cost(0);
5392   format %{ "[$reg, $off]" %}
5393   interface(MEMORY_INTER) %{
5394     base($reg);
5395     index(0xffffffff);
5396     scale(0x0);
5397     disp($off);
5398   %}
5399 %}
5400 
5401 operand indOffI8(iRegP reg, immIOffset8 off)
5402 %{
5403   constraint(ALLOC_IN_RC(ptr_reg));
5404   match(AddP reg off);
5405   op_cost(0);
5406   format %{ "[$reg, $off]" %}
5407   interface(MEMORY_INTER) %{
5408     base($reg);
5409     index(0xffffffff);
5410     scale(0x0);
5411     disp($off);
5412   %}
5413 %}
5414 
5415 operand indOffI16(iRegP reg, immIOffset16 off)
5416 %{
5417   constraint(ALLOC_IN_RC(ptr_reg));
5418   match(AddP reg off);
5419   op_cost(0);
5420   format %{ "[$reg, $off]" %}
5421   interface(MEMORY_INTER) %{
5422     base($reg);
5423     index(0xffffffff);
5424     scale(0x0);
5425     disp($off);
5426   %}
5427 %}
5428 
5429 operand indOffL(iRegP reg, immLoffset off)
5430 %{
5431   constraint(ALLOC_IN_RC(ptr_reg));
5432   match(AddP reg off);
5433   op_cost(0);
5434   format %{ "[$reg, $off]" %}
5435   interface(MEMORY_INTER) %{
5436     base($reg);
5437     index(0xffffffff);
5438     scale(0x0);
5439     disp($off);
5440   %}
5441 %}
5442 
5443 operand indOffL1(iRegP reg, immLoffset1 off)
5444 %{
5445   constraint(ALLOC_IN_RC(ptr_reg));
5446   match(AddP reg off);
5447   op_cost(0);
5448   format %{ "[$reg, $off]" %}
5449   interface(MEMORY_INTER) %{
5450     base($reg);
5451     index(0xffffffff);
5452     scale(0x0);
5453     disp($off);
5454   %}
5455 %}
5456 
5457 operand indOffL2(iRegP reg, immLoffset2 off)
5458 %{
5459   constraint(ALLOC_IN_RC(ptr_reg));
5460   match(AddP reg off);
5461   op_cost(0);
5462   format %{ "[$reg, $off]" %}
5463   interface(MEMORY_INTER) %{
5464     base($reg);
5465     index(0xffffffff);
5466     scale(0x0);
5467     disp($off);
5468   %}
5469 %}
5470 
5471 operand indOffL4(iRegP reg, immLoffset4 off)
5472 %{
5473   constraint(ALLOC_IN_RC(ptr_reg));
5474   match(AddP reg off);
5475   op_cost(0);
5476   format %{ "[$reg, $off]" %}
5477   interface(MEMORY_INTER) %{
5478     base($reg);
5479     index(0xffffffff);
5480     scale(0x0);
5481     disp($off);
5482   %}
5483 %}
5484 
5485 operand indOffL8(iRegP reg, immLoffset8 off)
5486 %{
5487   constraint(ALLOC_IN_RC(ptr_reg));
5488   match(AddP reg off);
5489   op_cost(0);
5490   format %{ "[$reg, $off]" %}
5491   interface(MEMORY_INTER) %{
5492     base($reg);
5493     index(0xffffffff);
5494     scale(0x0);
5495     disp($off);
5496   %}
5497 %}
5498 
5499 operand indOffL16(iRegP reg, immLoffset16 off)
5500 %{
5501   constraint(ALLOC_IN_RC(ptr_reg));
5502   match(AddP reg off);
5503   op_cost(0);
5504   format %{ "[$reg, $off]" %}
5505   interface(MEMORY_INTER) %{
5506     base($reg);
5507     index(0xffffffff);
5508     scale(0x0);
5509     disp($off);
5510   %}
5511 %}
5512 
5513 operand indirectN(iRegN reg)
5514 %{
5515   predicate(CompressedOops::shift() == 0);
5516   constraint(ALLOC_IN_RC(ptr_reg));
5517   match(DecodeN reg);
5518   op_cost(0);
5519   format %{ "[$reg]\t# narrow" %}
5520   interface(MEMORY_INTER) %{
5521     base($reg);
5522     index(0xffffffff);
5523     scale(0x0);
5524     disp(0x0);
5525   %}
5526 %}
5527 
5528 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5529 %{
5530   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5531   constraint(ALLOC_IN_RC(ptr_reg));
5532   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5533   op_cost(0);
5534   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5535   interface(MEMORY_INTER) %{
5536     base($reg);
5537     index($ireg);
5538     scale($scale);
5539     disp(0x0);
5540   %}
5541 %}
5542 
5543 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5544 %{
5545   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5546   constraint(ALLOC_IN_RC(ptr_reg));
5547   match(AddP (DecodeN reg) (LShiftL lreg scale));
5548   op_cost(0);
5549   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5550   interface(MEMORY_INTER) %{
5551     base($reg);
5552     index($lreg);
5553     scale($scale);
5554     disp(0x0);
5555   %}
5556 %}
5557 
5558 operand indIndexI2LN(iRegN reg, iRegI ireg)
5559 %{
5560   predicate(CompressedOops::shift() == 0);
5561   constraint(ALLOC_IN_RC(ptr_reg));
5562   match(AddP (DecodeN reg) (ConvI2L ireg));
5563   op_cost(0);
5564   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5565   interface(MEMORY_INTER) %{
5566     base($reg);
5567     index($ireg);
5568     scale(0x0);
5569     disp(0x0);
5570   %}
5571 %}
5572 
5573 operand indIndexN(iRegN reg, iRegL lreg)
5574 %{
5575   predicate(CompressedOops::shift() == 0);
5576   constraint(ALLOC_IN_RC(ptr_reg));
5577   match(AddP (DecodeN reg) lreg);
5578   op_cost(0);
5579   format %{ "$reg, $lreg\t# narrow" %}
5580   interface(MEMORY_INTER) %{
5581     base($reg);
5582     index($lreg);
5583     scale(0x0);
5584     disp(0x0);
5585   %}
5586 %}
5587 
5588 operand indOffIN(iRegN reg, immIOffset off)
5589 %{
5590   predicate(CompressedOops::shift() == 0);
5591   constraint(ALLOC_IN_RC(ptr_reg));
5592   match(AddP (DecodeN reg) off);
5593   op_cost(0);
5594   format %{ "[$reg, $off]\t# narrow" %}
5595   interface(MEMORY_INTER) %{
5596     base($reg);
5597     index(0xffffffff);
5598     scale(0x0);
5599     disp($off);
5600   %}
5601 %}
5602 
5603 operand indOffLN(iRegN reg, immLoffset off)
5604 %{
5605   predicate(CompressedOops::shift() == 0);
5606   constraint(ALLOC_IN_RC(ptr_reg));
5607   match(AddP (DecodeN reg) off);
5608   op_cost(0);
5609   format %{ "[$reg, $off]\t# narrow" %}
5610   interface(MEMORY_INTER) %{
5611     base($reg);
5612     index(0xffffffff);
5613     scale(0x0);
5614     disp($off);
5615   %}
5616 %}
5617 
5618 
5619 
5620 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5621 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5622 %{
5623   constraint(ALLOC_IN_RC(ptr_reg));
5624   match(AddP reg off);
5625   op_cost(0);
5626   format %{ "[$reg, $off]" %}
5627   interface(MEMORY_INTER) %{
5628     base($reg);
5629     index(0xffffffff);
5630     scale(0x0);
5631     disp($off);
5632   %}
5633 %}
5634 
5635 //----------Special Memory Operands--------------------------------------------
5636 // Stack Slot Operand - This operand is used for loading and storing temporary
5637 //                      values on the stack where a match requires a value to
5638 //                      flow through memory.
5639 operand stackSlotP(sRegP reg)
5640 %{
5641   constraint(ALLOC_IN_RC(stack_slots));
5642   op_cost(100);
5643   // No match rule because this operand is only generated in matching
5644   // match(RegP);
5645   format %{ "[$reg]" %}
5646   interface(MEMORY_INTER) %{
5647     base(0x1e);  // RSP
5648     index(0x0);  // No Index
5649     scale(0x0);  // No Scale
5650     disp($reg);  // Stack Offset
5651   %}
5652 %}
5653 
5654 operand stackSlotI(sRegI reg)
5655 %{
5656   constraint(ALLOC_IN_RC(stack_slots));
5657   // No match rule because this operand is only generated in matching
5658   // match(RegI);
5659   format %{ "[$reg]" %}
5660   interface(MEMORY_INTER) %{
5661     base(0x1e);  // RSP
5662     index(0x0);  // No Index
5663     scale(0x0);  // No Scale
5664     disp($reg);  // Stack Offset
5665   %}
5666 %}
5667 
5668 operand stackSlotF(sRegF reg)
5669 %{
5670   constraint(ALLOC_IN_RC(stack_slots));
5671   // No match rule because this operand is only generated in matching
5672   // match(RegF);
5673   format %{ "[$reg]" %}
5674   interface(MEMORY_INTER) %{
5675     base(0x1e);  // RSP
5676     index(0x0);  // No Index
5677     scale(0x0);  // No Scale
5678     disp($reg);  // Stack Offset
5679   %}
5680 %}
5681 
5682 operand stackSlotD(sRegD reg)
5683 %{
5684   constraint(ALLOC_IN_RC(stack_slots));
5685   // No match rule because this operand is only generated in matching
5686   // match(RegD);
5687   format %{ "[$reg]" %}
5688   interface(MEMORY_INTER) %{
5689     base(0x1e);  // RSP
5690     index(0x0);  // No Index
5691     scale(0x0);  // No Scale
5692     disp($reg);  // Stack Offset
5693   %}
5694 %}
5695 
5696 operand stackSlotL(sRegL reg)
5697 %{
5698   constraint(ALLOC_IN_RC(stack_slots));
5699   // No match rule because this operand is only generated in matching
5700   // match(RegL);
5701   format %{ "[$reg]" %}
5702   interface(MEMORY_INTER) %{
5703     base(0x1e);  // RSP
5704     index(0x0);  // No Index
5705     scale(0x0);  // No Scale
5706     disp($reg);  // Stack Offset
5707   %}
5708 %}
5709 
5710 // Operands for expressing Control Flow
5711 // NOTE: Label is a predefined operand which should not be redefined in
5712 //       the AD file. It is generically handled within the ADLC.
5713 
5714 //----------Conditional Branch Operands----------------------------------------
5715 // Comparison Op  - This is the operation of the comparison, and is limited to
5716 //                  the following set of codes:
5717 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5718 //
5719 // Other attributes of the comparison, such as unsignedness, are specified
5720 // by the comparison instruction that sets a condition code flags register.
5721 // That result is represented by a flags operand whose subtype is appropriate
5722 // to the unsignedness (etc.) of the comparison.
5723 //
5724 // Later, the instruction which matches both the Comparison Op (a Bool) and
5725 // the flags (produced by the Cmp) specifies the coding of the comparison op
5726 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5727 
5728 // used for signed integral comparisons and fp comparisons
5729 
5730 operand cmpOp()
5731 %{
5732   match(Bool);
5733 
5734   format %{ "" %}
5735   interface(COND_INTER) %{
5736     equal(0x0, "eq");
5737     not_equal(0x1, "ne");
5738     less(0xb, "lt");
5739     greater_equal(0xa, "ge");
5740     less_equal(0xd, "le");
5741     greater(0xc, "gt");
5742     overflow(0x6, "vs");
5743     no_overflow(0x7, "vc");
5744   %}
5745 %}
5746 
5747 // used for unsigned integral comparisons
5748 
5749 operand cmpOpU()
5750 %{
5751   match(Bool);
5752 
5753   format %{ "" %}
5754   interface(COND_INTER) %{
5755     equal(0x0, "eq");
5756     not_equal(0x1, "ne");
5757     less(0x3, "lo");
5758     greater_equal(0x2, "hs");
5759     less_equal(0x9, "ls");
5760     greater(0x8, "hi");
5761     overflow(0x6, "vs");
5762     no_overflow(0x7, "vc");
5763   %}
5764 %}
5765 
5766 // used for certain integral comparisons which can be
5767 // converted to cbxx or tbxx instructions
5768 
5769 operand cmpOpEqNe()
5770 %{
5771   match(Bool);
5772   op_cost(0);
5773   predicate(n->as_Bool()->_test._test == BoolTest::ne
5774             || n->as_Bool()->_test._test == BoolTest::eq);
5775 
5776   format %{ "" %}
5777   interface(COND_INTER) %{
5778     equal(0x0, "eq");
5779     not_equal(0x1, "ne");
5780     less(0xb, "lt");
5781     greater_equal(0xa, "ge");
5782     less_equal(0xd, "le");
5783     greater(0xc, "gt");
5784     overflow(0x6, "vs");
5785     no_overflow(0x7, "vc");
5786   %}
5787 %}
5788 
5789 // used for certain integral comparisons which can be
5790 // converted to cbxx or tbxx instructions
5791 
5792 operand cmpOpLtGe()
5793 %{
5794   match(Bool);
5795   op_cost(0);
5796 
5797   predicate(n->as_Bool()->_test._test == BoolTest::lt
5798             || n->as_Bool()->_test._test == BoolTest::ge);
5799 
5800   format %{ "" %}
5801   interface(COND_INTER) %{
5802     equal(0x0, "eq");
5803     not_equal(0x1, "ne");
5804     less(0xb, "lt");
5805     greater_equal(0xa, "ge");
5806     less_equal(0xd, "le");
5807     greater(0xc, "gt");
5808     overflow(0x6, "vs");
5809     no_overflow(0x7, "vc");
5810   %}
5811 %}
5812 
5813 // used for certain unsigned integral comparisons which can be
5814 // converted to cbxx or tbxx instructions
5815 
5816 operand cmpOpUEqNeLtGe()
5817 %{
5818   match(Bool);
5819   op_cost(0);
5820 
5821   predicate(n->as_Bool()->_test._test == BoolTest::eq
5822             || n->as_Bool()->_test._test == BoolTest::ne
5823             || n->as_Bool()->_test._test == BoolTest::lt
5824             || n->as_Bool()->_test._test == BoolTest::ge);
5825 
5826   format %{ "" %}
5827   interface(COND_INTER) %{
5828     equal(0x0, "eq");
5829     not_equal(0x1, "ne");
5830     less(0xb, "lt");
5831     greater_equal(0xa, "ge");
5832     less_equal(0xd, "le");
5833     greater(0xc, "gt");
5834     overflow(0x6, "vs");
5835     no_overflow(0x7, "vc");
5836   %}
5837 %}
5838 
5839 // Special operand allowing long args to int ops to be truncated for free
5840 
5841 operand iRegL2I(iRegL reg) %{
5842 
5843   op_cost(0);
5844 
5845   match(ConvL2I reg);
5846 
5847   format %{ "l2i($reg)" %}
5848 
5849   interface(REG_INTER)
5850 %}
5851 
5852 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5853 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5854 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5855 
5856 //----------OPERAND CLASSES----------------------------------------------------
5857 // Operand Classes are groups of operands that are used as to simplify
5858 // instruction definitions by not requiring the AD writer to specify
5859 // separate instructions for every form of operand when the
5860 // instruction accepts multiple operand types with the same basic
5861 // encoding and format. The classic case of this is memory operands.
5862 
5863 // memory is used to define read/write location for load/store
5864 // instruction defs. we can turn a memory op into an Address
5865 
5866 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
5867                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
5868 
5869 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
5870                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
5871 
5872 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
5873                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5874 
5875 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
5876                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5877 
5878 // All of the memory operands. For the pipeline description.
5879 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
5880                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
5881                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5882 
5883 
5884 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5885 // operations. it allows the src to be either an iRegI or a (ConvL2I
5886 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5887 // can be elided because the 32-bit instruction will just employ the
5888 // lower 32 bits anyway.
5889 //
5890 // n.b. this does not elide all L2I conversions. if the truncated
5891 // value is consumed by more than one operation then the ConvL2I
5892 // cannot be bundled into the consuming nodes so an l2i gets planted
5893 // (actually a movw $dst $src) and the downstream instructions consume
5894 // the result of the l2i as an iRegI input. That's a shame since the
5895 // movw is actually redundant but its not too costly.
5896 
5897 opclass iRegIorL2I(iRegI, iRegL2I);
5898 
5899 //----------PIPELINE-----------------------------------------------------------
5900 // Rules which define the behavior of the target architectures pipeline.
5901 
5902 // For specific pipelines, eg A53, define the stages of that pipeline
5903 //pipe_desc(ISS, EX1, EX2, WR);
5904 #define ISS S0
5905 #define EX1 S1
5906 #define EX2 S2
5907 #define WR  S3
5908 
5909 // Integer ALU reg operation
5910 pipeline %{
5911 
5912 attributes %{
5913   // ARM instructions are of fixed length
5914   fixed_size_instructions;        // Fixed size instructions TODO does
5915   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5916   // ARM instructions come in 32-bit word units
5917   instruction_unit_size = 4;         // An instruction is 4 bytes long
5918   instruction_fetch_unit_size = 64;  // The processor fetches one line
5919   instruction_fetch_units = 1;       // of 64 bytes
5920 
5921   // List of nop instructions
5922   nops( MachNop );
5923 %}
5924 
5925 // We don't use an actual pipeline model so don't care about resources
5926 // or description. we do use pipeline classes to introduce fixed
5927 // latencies
5928 
5929 //----------RESOURCES----------------------------------------------------------
5930 // Resources are the functional units available to the machine
5931 
5932 resources( INS0, INS1, INS01 = INS0 | INS1,
5933            ALU0, ALU1, ALU = ALU0 | ALU1,
5934            MAC,
5935            DIV,
5936            BRANCH,
5937            LDST,
5938            NEON_FP);
5939 
5940 //----------PIPELINE DESCRIPTION-----------------------------------------------
5941 // Pipeline Description specifies the stages in the machine's pipeline
5942 
5943 // Define the pipeline as a generic 6 stage pipeline
5944 pipe_desc(S0, S1, S2, S3, S4, S5);
5945 
5946 //----------PIPELINE CLASSES---------------------------------------------------
5947 // Pipeline Classes describe the stages in which input and output are
5948 // referenced by the hardware pipeline.
5949 
5950 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5951 %{
5952   single_instruction;
5953   src1   : S1(read);
5954   src2   : S2(read);
5955   dst    : S5(write);
5956   INS01  : ISS;
5957   NEON_FP : S5;
5958 %}
5959 
5960 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5961 %{
5962   single_instruction;
5963   src1   : S1(read);
5964   src2   : S2(read);
5965   dst    : S5(write);
5966   INS01  : ISS;
5967   NEON_FP : S5;
5968 %}
5969 
5970 pipe_class fp_uop_s(vRegF dst, vRegF src)
5971 %{
5972   single_instruction;
5973   src    : S1(read);
5974   dst    : S5(write);
5975   INS01  : ISS;
5976   NEON_FP : S5;
5977 %}
5978 
5979 pipe_class fp_uop_d(vRegD dst, vRegD src)
5980 %{
5981   single_instruction;
5982   src    : S1(read);
5983   dst    : S5(write);
5984   INS01  : ISS;
5985   NEON_FP : S5;
5986 %}
5987 
5988 pipe_class fp_d2f(vRegF dst, vRegD src)
5989 %{
5990   single_instruction;
5991   src    : S1(read);
5992   dst    : S5(write);
5993   INS01  : ISS;
5994   NEON_FP : S5;
5995 %}
5996 
5997 pipe_class fp_f2d(vRegD dst, vRegF src)
5998 %{
5999   single_instruction;
6000   src    : S1(read);
6001   dst    : S5(write);
6002   INS01  : ISS;
6003   NEON_FP : S5;
6004 %}
6005 
6006 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6007 %{
6008   single_instruction;
6009   src    : S1(read);
6010   dst    : S5(write);
6011   INS01  : ISS;
6012   NEON_FP : S5;
6013 %}
6014 
6015 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6016 %{
6017   single_instruction;
6018   src    : S1(read);
6019   dst    : S5(write);
6020   INS01  : ISS;
6021   NEON_FP : S5;
6022 %}
6023 
6024 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6025 %{
6026   single_instruction;
6027   src    : S1(read);
6028   dst    : S5(write);
6029   INS01  : ISS;
6030   NEON_FP : S5;
6031 %}
6032 
6033 pipe_class fp_l2f(vRegF dst, iRegL src)
6034 %{
6035   single_instruction;
6036   src    : S1(read);
6037   dst    : S5(write);
6038   INS01  : ISS;
6039   NEON_FP : S5;
6040 %}
6041 
6042 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6043 %{
6044   single_instruction;
6045   src    : S1(read);
6046   dst    : S5(write);
6047   INS01  : ISS;
6048   NEON_FP : S5;
6049 %}
6050 
6051 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6052 %{
6053   single_instruction;
6054   src    : S1(read);
6055   dst    : S5(write);
6056   INS01  : ISS;
6057   NEON_FP : S5;
6058 %}
6059 
6060 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6061 %{
6062   single_instruction;
6063   src    : S1(read);
6064   dst    : S5(write);
6065   INS01  : ISS;
6066   NEON_FP : S5;
6067 %}
6068 
6069 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6070 %{
6071   single_instruction;
6072   src    : S1(read);
6073   dst    : S5(write);
6074   INS01  : ISS;
6075   NEON_FP : S5;
6076 %}
6077 
6078 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6079 %{
6080   single_instruction;
6081   src1   : S1(read);
6082   src2   : S2(read);
6083   dst    : S5(write);
6084   INS0   : ISS;
6085   NEON_FP : S5;
6086 %}
6087 
6088 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6089 %{
6090   single_instruction;
6091   src1   : S1(read);
6092   src2   : S2(read);
6093   dst    : S5(write);
6094   INS0   : ISS;
6095   NEON_FP : S5;
6096 %}
6097 
6098 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6099 %{
6100   single_instruction;
6101   cr     : S1(read);
6102   src1   : S1(read);
6103   src2   : S1(read);
6104   dst    : S3(write);
6105   INS01  : ISS;
6106   NEON_FP : S3;
6107 %}
6108 
6109 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6110 %{
6111   single_instruction;
6112   cr     : S1(read);
6113   src1   : S1(read);
6114   src2   : S1(read);
6115   dst    : S3(write);
6116   INS01  : ISS;
6117   NEON_FP : S3;
6118 %}
6119 
6120 pipe_class fp_imm_s(vRegF dst)
6121 %{
6122   single_instruction;
6123   dst    : S3(write);
6124   INS01  : ISS;
6125   NEON_FP : S3;
6126 %}
6127 
6128 pipe_class fp_imm_d(vRegD dst)
6129 %{
6130   single_instruction;
6131   dst    : S3(write);
6132   INS01  : ISS;
6133   NEON_FP : S3;
6134 %}
6135 
6136 pipe_class fp_load_constant_s(vRegF dst)
6137 %{
6138   single_instruction;
6139   dst    : S4(write);
6140   INS01  : ISS;
6141   NEON_FP : S4;
6142 %}
6143 
6144 pipe_class fp_load_constant_d(vRegD dst)
6145 %{
6146   single_instruction;
6147   dst    : S4(write);
6148   INS01  : ISS;
6149   NEON_FP : S4;
6150 %}
6151 
6152 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6153 %{
6154   single_instruction;
6155   dst    : S5(write);
6156   src1   : S1(read);
6157   src2   : S1(read);
6158   INS01  : ISS;
6159   NEON_FP : S5;
6160 %}
6161 
6162 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6163 %{
6164   single_instruction;
6165   dst    : S5(write);
6166   src1   : S1(read);
6167   src2   : S1(read);
6168   INS0   : ISS;
6169   NEON_FP : S5;
6170 %}
6171 
6172 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6173 %{
6174   single_instruction;
6175   dst    : S5(write);
6176   src1   : S1(read);
6177   src2   : S1(read);
6178   dst    : S1(read);
6179   INS01  : ISS;
6180   NEON_FP : S5;
6181 %}
6182 
6183 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6184 %{
6185   single_instruction;
6186   dst    : S5(write);
6187   src1   : S1(read);
6188   src2   : S1(read);
6189   dst    : S1(read);
6190   INS0   : ISS;
6191   NEON_FP : S5;
6192 %}
6193 
6194 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6195 %{
6196   single_instruction;
6197   dst    : S4(write);
6198   src1   : S2(read);
6199   src2   : S2(read);
6200   INS01  : ISS;
6201   NEON_FP : S4;
6202 %}
6203 
6204 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6205 %{
6206   single_instruction;
6207   dst    : S4(write);
6208   src1   : S2(read);
6209   src2   : S2(read);
6210   INS0   : ISS;
6211   NEON_FP : S4;
6212 %}
6213 
6214 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6215 %{
6216   single_instruction;
6217   dst    : S3(write);
6218   src1   : S2(read);
6219   src2   : S2(read);
6220   INS01  : ISS;
6221   NEON_FP : S3;
6222 %}
6223 
6224 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6225 %{
6226   single_instruction;
6227   dst    : S3(write);
6228   src1   : S2(read);
6229   src2   : S2(read);
6230   INS0   : ISS;
6231   NEON_FP : S3;
6232 %}
6233 
6234 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6235 %{
6236   single_instruction;
6237   dst    : S3(write);
6238   src    : S1(read);
6239   shift  : S1(read);
6240   INS01  : ISS;
6241   NEON_FP : S3;
6242 %}
6243 
6244 pipe_class vshift128(vecX dst, vecX src, vecX shift)
6245 %{
6246   single_instruction;
6247   dst    : S3(write);
6248   src    : S1(read);
6249   shift  : S1(read);
6250   INS0   : ISS;
6251   NEON_FP : S3;
6252 %}
6253 
6254 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
6255 %{
6256   single_instruction;
6257   dst    : S3(write);
6258   src    : S1(read);
6259   INS01  : ISS;
6260   NEON_FP : S3;
6261 %}
6262 
6263 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
6264 %{
6265   single_instruction;
6266   dst    : S3(write);
6267   src    : S1(read);
6268   INS0   : ISS;
6269   NEON_FP : S3;
6270 %}
6271 
6272 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
6273 %{
6274   single_instruction;
6275   dst    : S5(write);
6276   src1   : S1(read);
6277   src2   : S1(read);
6278   INS01  : ISS;
6279   NEON_FP : S5;
6280 %}
6281 
6282 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
6283 %{
6284   single_instruction;
6285   dst    : S5(write);
6286   src1   : S1(read);
6287   src2   : S1(read);
6288   INS0   : ISS;
6289   NEON_FP : S5;
6290 %}
6291 
6292 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
6293 %{
6294   single_instruction;
6295   dst    : S5(write);
6296   src1   : S1(read);
6297   src2   : S1(read);
6298   INS0   : ISS;
6299   NEON_FP : S5;
6300 %}
6301 
6302 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
6303 %{
6304   single_instruction;
6305   dst    : S5(write);
6306   src1   : S1(read);
6307   src2   : S1(read);
6308   INS0   : ISS;
6309   NEON_FP : S5;
6310 %}
6311 
6312 pipe_class vsqrt_fp128(vecX dst, vecX src)
6313 %{
6314   single_instruction;
6315   dst    : S5(write);
6316   src    : S1(read);
6317   INS0   : ISS;
6318   NEON_FP : S5;
6319 %}
6320 
6321 pipe_class vunop_fp64(vecD dst, vecD src)
6322 %{
6323   single_instruction;
6324   dst    : S5(write);
6325   src    : S1(read);
6326   INS01  : ISS;
6327   NEON_FP : S5;
6328 %}
6329 
6330 pipe_class vunop_fp128(vecX dst, vecX src)
6331 %{
6332   single_instruction;
6333   dst    : S5(write);
6334   src    : S1(read);
6335   INS0   : ISS;
6336   NEON_FP : S5;
6337 %}
6338 
6339 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
6340 %{
6341   single_instruction;
6342   dst    : S3(write);
6343   src    : S1(read);
6344   INS01  : ISS;
6345   NEON_FP : S3;
6346 %}
6347 
6348 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
6349 %{
6350   single_instruction;
6351   dst    : S3(write);
6352   src    : S1(read);
6353   INS01  : ISS;
6354   NEON_FP : S3;
6355 %}
6356 
6357 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
6358 %{
6359   single_instruction;
6360   dst    : S3(write);
6361   src    : S1(read);
6362   INS01  : ISS;
6363   NEON_FP : S3;
6364 %}
6365 
6366 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
6367 %{
6368   single_instruction;
6369   dst    : S3(write);
6370   src    : S1(read);
6371   INS01  : ISS;
6372   NEON_FP : S3;
6373 %}
6374 
6375 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
6376 %{
6377   single_instruction;
6378   dst    : S3(write);
6379   src    : S1(read);
6380   INS01  : ISS;
6381   NEON_FP : S3;
6382 %}
6383 
6384 pipe_class vmovi_reg_imm64(vecD dst)
6385 %{
6386   single_instruction;
6387   dst    : S3(write);
6388   INS01  : ISS;
6389   NEON_FP : S3;
6390 %}
6391 
6392 pipe_class vmovi_reg_imm128(vecX dst)
6393 %{
6394   single_instruction;
6395   dst    : S3(write);
6396   INS0   : ISS;
6397   NEON_FP : S3;
6398 %}
6399 
6400 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6401 %{
6402   single_instruction;
6403   dst    : S5(write);
6404   mem    : ISS(read);
6405   INS01  : ISS;
6406   NEON_FP : S3;
6407 %}
6408 
6409 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6410 %{
6411   single_instruction;
6412   dst    : S5(write);
6413   mem    : ISS(read);
6414   INS01  : ISS;
6415   NEON_FP : S3;
6416 %}
6417 
6418 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6419 %{
6420   single_instruction;
6421   mem    : ISS(read);
6422   src    : S2(read);
6423   INS01  : ISS;
6424   NEON_FP : S3;
6425 %}
6426 
6427 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6428 %{
6429   single_instruction;
6430   mem    : ISS(read);
6431   src    : S2(read);
6432   INS01  : ISS;
6433   NEON_FP : S3;
6434 %}
6435 
6436 //------- Integer ALU operations --------------------------
6437 
6438 // Integer ALU reg-reg operation
6439 // Operands needed in EX1, result generated in EX2
6440 // Eg.  ADD     x0, x1, x2
6441 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6442 %{
6443   single_instruction;
6444   dst    : EX2(write);
6445   src1   : EX1(read);
6446   src2   : EX1(read);
6447   INS01  : ISS; // Dual issue as instruction 0 or 1
6448   ALU    : EX2;
6449 %}
6450 
6451 // Integer ALU reg-reg operation with constant shift
6452 // Shifted register must be available in LATE_ISS instead of EX1
6453 // Eg.  ADD     x0, x1, x2, LSL #2
6454 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6455 %{
6456   single_instruction;
6457   dst    : EX2(write);
6458   src1   : EX1(read);
6459   src2   : ISS(read);
6460   INS01  : ISS;
6461   ALU    : EX2;
6462 %}
6463 
6464 // Integer ALU reg operation with constant shift
6465 // Eg.  LSL     x0, x1, #shift
6466 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6467 %{
6468   single_instruction;
6469   dst    : EX2(write);
6470   src1   : ISS(read);
6471   INS01  : ISS;
6472   ALU    : EX2;
6473 %}
6474 
6475 // Integer ALU reg-reg operation with variable shift
6476 // Both operands must be available in LATE_ISS instead of EX1
6477 // Result is available in EX1 instead of EX2
6478 // Eg.  LSLV    x0, x1, x2
6479 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6480 %{
6481   single_instruction;
6482   dst    : EX1(write);
6483   src1   : ISS(read);
6484   src2   : ISS(read);
6485   INS01  : ISS;
6486   ALU    : EX1;
6487 %}
6488 
6489 // Integer ALU reg-reg operation with extract
6490 // As for _vshift above, but result generated in EX2
6491 // Eg.  EXTR    x0, x1, x2, #N
6492 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6493 %{
6494   single_instruction;
6495   dst    : EX2(write);
6496   src1   : ISS(read);
6497   src2   : ISS(read);
6498   INS1   : ISS; // Can only dual issue as Instruction 1
6499   ALU    : EX1;
6500 %}
6501 
6502 // Integer ALU reg operation
6503 // Eg.  NEG     x0, x1
6504 pipe_class ialu_reg(iRegI dst, iRegI src)
6505 %{
6506   single_instruction;
6507   dst    : EX2(write);
6508   src    : EX1(read);
6509   INS01  : ISS;
6510   ALU    : EX2;
6511 %}
6512 
6513 // Integer ALU reg mmediate operation
6514 // Eg.  ADD     x0, x1, #N
6515 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6516 %{
6517   single_instruction;
6518   dst    : EX2(write);
6519   src1   : EX1(read);
6520   INS01  : ISS;
6521   ALU    : EX2;
6522 %}
6523 
6524 // Integer ALU immediate operation (no source operands)
6525 // Eg.  MOV     x0, #N
6526 pipe_class ialu_imm(iRegI dst)
6527 %{
6528   single_instruction;
6529   dst    : EX1(write);
6530   INS01  : ISS;
6531   ALU    : EX1;
6532 %}
6533 
6534 //------- Compare operation -------------------------------
6535 
6536 // Compare reg-reg
6537 // Eg.  CMP     x0, x1
6538 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6539 %{
6540   single_instruction;
6541 //  fixed_latency(16);
6542   cr     : EX2(write);
6543   op1    : EX1(read);
6544   op2    : EX1(read);
6545   INS01  : ISS;
6546   ALU    : EX2;
6547 %}
6548 
6549 // Compare reg-reg
6550 // Eg.  CMP     x0, #N
6551 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6552 %{
6553   single_instruction;
6554 //  fixed_latency(16);
6555   cr     : EX2(write);
6556   op1    : EX1(read);
6557   INS01  : ISS;
6558   ALU    : EX2;
6559 %}
6560 
6561 //------- Conditional instructions ------------------------
6562 
6563 // Conditional no operands
6564 // Eg.  CSINC   x0, zr, zr, <cond>
6565 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6566 %{
6567   single_instruction;
6568   cr     : EX1(read);
6569   dst    : EX2(write);
6570   INS01  : ISS;
6571   ALU    : EX2;
6572 %}
6573 
6574 // Conditional 2 operand
6575 // EG.  CSEL    X0, X1, X2, <cond>
6576 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6577 %{
6578   single_instruction;
6579   cr     : EX1(read);
6580   src1   : EX1(read);
6581   src2   : EX1(read);
6582   dst    : EX2(write);
6583   INS01  : ISS;
6584   ALU    : EX2;
6585 %}
6586 
6587 // Conditional 2 operand
6588 // EG.  CSEL    X0, X1, X2, <cond>
6589 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6590 %{
6591   single_instruction;
6592   cr     : EX1(read);
6593   src    : EX1(read);
6594   dst    : EX2(write);
6595   INS01  : ISS;
6596   ALU    : EX2;
6597 %}
6598 
6599 //------- Multiply pipeline operations --------------------
6600 
6601 // Multiply reg-reg
6602 // Eg.  MUL     w0, w1, w2
6603 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6604 %{
6605   single_instruction;
6606   dst    : WR(write);
6607   src1   : ISS(read);
6608   src2   : ISS(read);
6609   INS01  : ISS;
6610   MAC    : WR;
6611 %}
6612 
6613 // Multiply accumulate
6614 // Eg.  MADD    w0, w1, w2, w3
6615 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6616 %{
6617   single_instruction;
6618   dst    : WR(write);
6619   src1   : ISS(read);
6620   src2   : ISS(read);
6621   src3   : ISS(read);
6622   INS01  : ISS;
6623   MAC    : WR;
6624 %}
6625 
6626 // Eg.  MUL     w0, w1, w2
6627 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6628 %{
6629   single_instruction;
6630   fixed_latency(3); // Maximum latency for 64 bit mul
6631   dst    : WR(write);
6632   src1   : ISS(read);
6633   src2   : ISS(read);
6634   INS01  : ISS;
6635   MAC    : WR;
6636 %}
6637 
6638 // Multiply accumulate
6639 // Eg.  MADD    w0, w1, w2, w3
6640 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6641 %{
6642   single_instruction;
6643   fixed_latency(3); // Maximum latency for 64 bit mul
6644   dst    : WR(write);
6645   src1   : ISS(read);
6646   src2   : ISS(read);
6647   src3   : ISS(read);
6648   INS01  : ISS;
6649   MAC    : WR;
6650 %}
6651 
6652 //------- Divide pipeline operations --------------------
6653 
6654 // Eg.  SDIV    w0, w1, w2
6655 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6656 %{
6657   single_instruction;
6658   fixed_latency(8); // Maximum latency for 32 bit divide
6659   dst    : WR(write);
6660   src1   : ISS(read);
6661   src2   : ISS(read);
6662   INS0   : ISS; // Can only dual issue as instruction 0
6663   DIV    : WR;
6664 %}
6665 
6666 // Eg.  SDIV    x0, x1, x2
6667 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6668 %{
6669   single_instruction;
6670   fixed_latency(16); // Maximum latency for 64 bit divide
6671   dst    : WR(write);
6672   src1   : ISS(read);
6673   src2   : ISS(read);
6674   INS0   : ISS; // Can only dual issue as instruction 0
6675   DIV    : WR;
6676 %}
6677 
6678 //------- Load pipeline operations ------------------------
6679 
6680 // Load - prefetch
6681 // Eg.  PFRM    <mem>
6682 pipe_class iload_prefetch(memory mem)
6683 %{
6684   single_instruction;
6685   mem    : ISS(read);
6686   INS01  : ISS;
6687   LDST   : WR;
6688 %}
6689 
6690 // Load - reg, mem
6691 // Eg.  LDR     x0, <mem>
6692 pipe_class iload_reg_mem(iRegI dst, memory mem)
6693 %{
6694   single_instruction;
6695   dst    : WR(write);
6696   mem    : ISS(read);
6697   INS01  : ISS;
6698   LDST   : WR;
6699 %}
6700 
6701 // Load - reg, reg
6702 // Eg.  LDR     x0, [sp, x1]
6703 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6704 %{
6705   single_instruction;
6706   dst    : WR(write);
6707   src    : ISS(read);
6708   INS01  : ISS;
6709   LDST   : WR;
6710 %}
6711 
6712 //------- Store pipeline operations -----------------------
6713 
6714 // Store - zr, mem
6715 // Eg.  STR     zr, <mem>
6716 pipe_class istore_mem(memory mem)
6717 %{
6718   single_instruction;
6719   mem    : ISS(read);
6720   INS01  : ISS;
6721   LDST   : WR;
6722 %}
6723 
6724 // Store - reg, mem
6725 // Eg.  STR     x0, <mem>
6726 pipe_class istore_reg_mem(iRegI src, memory mem)
6727 %{
6728   single_instruction;
6729   mem    : ISS(read);
6730   src    : EX2(read);
6731   INS01  : ISS;
6732   LDST   : WR;
6733 %}
6734 
6735 // Store - reg, reg
6736 // Eg. STR      x0, [sp, x1]
6737 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6738 %{
6739   single_instruction;
6740   dst    : ISS(read);
6741   src    : EX2(read);
6742   INS01  : ISS;
6743   LDST   : WR;
6744 %}
6745 
6746 //------- Store pipeline operations -----------------------
6747 
6748 // Branch
6749 pipe_class pipe_branch()
6750 %{
6751   single_instruction;
6752   INS01  : ISS;
6753   BRANCH : EX1;
6754 %}
6755 
6756 // Conditional branch
6757 pipe_class pipe_branch_cond(rFlagsReg cr)
6758 %{
6759   single_instruction;
6760   cr     : EX1(read);
6761   INS01  : ISS;
6762   BRANCH : EX1;
6763 %}
6764 
6765 // Compare & Branch
6766 // EG.  CBZ/CBNZ
6767 pipe_class pipe_cmp_branch(iRegI op1)
6768 %{
6769   single_instruction;
6770   op1    : EX1(read);
6771   INS01  : ISS;
6772   BRANCH : EX1;
6773 %}
6774 
6775 //------- Synchronisation operations ----------------------
6776 
6777 // Any operation requiring serialization.
6778 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6779 pipe_class pipe_serial()
6780 %{
6781   single_instruction;
6782   force_serialization;
6783   fixed_latency(16);
6784   INS01  : ISS(2); // Cannot dual issue with any other instruction
6785   LDST   : WR;
6786 %}
6787 
6788 // Generic big/slow expanded idiom - also serialized
6789 pipe_class pipe_slow()
6790 %{
6791   instruction_count(10);
6792   multiple_bundles;
6793   force_serialization;
6794   fixed_latency(16);
6795   INS01  : ISS(2); // Cannot dual issue with any other instruction
6796   LDST   : WR;
6797 %}
6798 
6799 // Empty pipeline class
6800 pipe_class pipe_class_empty()
6801 %{
6802   single_instruction;
6803   fixed_latency(0);
6804 %}
6805 
6806 // Default pipeline class.
6807 pipe_class pipe_class_default()
6808 %{
6809   single_instruction;
6810   fixed_latency(2);
6811 %}
6812 
6813 // Pipeline class for compares.
6814 pipe_class pipe_class_compare()
6815 %{
6816   single_instruction;
6817   fixed_latency(16);
6818 %}
6819 
6820 // Pipeline class for memory operations.
6821 pipe_class pipe_class_memory()
6822 %{
6823   single_instruction;
6824   fixed_latency(16);
6825 %}
6826 
6827 // Pipeline class for call.
6828 pipe_class pipe_class_call()
6829 %{
6830   single_instruction;
6831   fixed_latency(100);
6832 %}
6833 
6834 // Define the class for the Nop node.
6835 define %{
6836    MachNop = pipe_class_empty;
6837 %}
6838 
6839 %}
6840 //----------INSTRUCTIONS-------------------------------------------------------
6841 //
6842 // match      -- States which machine-independent subtree may be replaced
6843 //               by this instruction.
6844 // ins_cost   -- The estimated cost of this instruction is used by instruction
6845 //               selection to identify a minimum cost tree of machine
6846 //               instructions that matches a tree of machine-independent
6847 //               instructions.
6848 // format     -- A string providing the disassembly for this instruction.
6849 //               The value of an instruction's operand may be inserted
6850 //               by referring to it with a '$' prefix.
6851 // opcode     -- Three instruction opcodes may be provided.  These are referred
6852 //               to within an encode class as $primary, $secondary, and $tertiary
6853 //               rrspectively.  The primary opcode is commonly used to
6854 //               indicate the type of machine instruction, while secondary
6855 //               and tertiary are often used for prefix options or addressing
6856 //               modes.
6857 // ins_encode -- A list of encode classes with parameters. The encode class
6858 //               name must have been defined in an 'enc_class' specification
6859 //               in the encode section of the architecture description.
6860 
6861 // ============================================================================
6862 // Memory (Load/Store) Instructions
6863 
6864 // Load Instructions
6865 
6866 // Load Byte (8 bit signed)
6867 instruct loadB(iRegINoSp dst, memory1 mem)
6868 %{
6869   match(Set dst (LoadB mem));
6870   predicate(!needs_acquiring_load(n));
6871 
6872   ins_cost(4 * INSN_COST);
6873   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6874 
6875   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6876 
6877   ins_pipe(iload_reg_mem);
6878 %}
6879 
6880 // Load Byte (8 bit signed) into long
6881 instruct loadB2L(iRegLNoSp dst, memory1 mem)
6882 %{
6883   match(Set dst (ConvI2L (LoadB mem)));
6884   predicate(!needs_acquiring_load(n->in(1)));
6885 
6886   ins_cost(4 * INSN_COST);
6887   format %{ "ldrsb  $dst, $mem\t# byte" %}
6888 
6889   ins_encode(aarch64_enc_ldrsb(dst, mem));
6890 
6891   ins_pipe(iload_reg_mem);
6892 %}
6893 
6894 // Load Byte (8 bit unsigned)
6895 instruct loadUB(iRegINoSp dst, memory1 mem)
6896 %{
6897   match(Set dst (LoadUB mem));
6898   predicate(!needs_acquiring_load(n));
6899 
6900   ins_cost(4 * INSN_COST);
6901   format %{ "ldrbw  $dst, $mem\t# byte" %}
6902 
6903   ins_encode(aarch64_enc_ldrb(dst, mem));
6904 
6905   ins_pipe(iload_reg_mem);
6906 %}
6907 
6908 // Load Byte (8 bit unsigned) into long
6909 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
6910 %{
6911   match(Set dst (ConvI2L (LoadUB mem)));
6912   predicate(!needs_acquiring_load(n->in(1)));
6913 
6914   ins_cost(4 * INSN_COST);
6915   format %{ "ldrb  $dst, $mem\t# byte" %}
6916 
6917   ins_encode(aarch64_enc_ldrb(dst, mem));
6918 
6919   ins_pipe(iload_reg_mem);
6920 %}
6921 
6922 // Load Short (16 bit signed)
6923 instruct loadS(iRegINoSp dst, memory2 mem)
6924 %{
6925   match(Set dst (LoadS mem));
6926   predicate(!needs_acquiring_load(n));
6927 
6928   ins_cost(4 * INSN_COST);
6929   format %{ "ldrshw  $dst, $mem\t# short" %}
6930 
6931   ins_encode(aarch64_enc_ldrshw(dst, mem));
6932 
6933   ins_pipe(iload_reg_mem);
6934 %}
6935 
6936 // Load Short (16 bit signed) into long
6937 instruct loadS2L(iRegLNoSp dst, memory2 mem)
6938 %{
6939   match(Set dst (ConvI2L (LoadS mem)));
6940   predicate(!needs_acquiring_load(n->in(1)));
6941 
6942   ins_cost(4 * INSN_COST);
6943   format %{ "ldrsh  $dst, $mem\t# short" %}
6944 
6945   ins_encode(aarch64_enc_ldrsh(dst, mem));
6946 
6947   ins_pipe(iload_reg_mem);
6948 %}
6949 
6950 // Load Char (16 bit unsigned)
6951 instruct loadUS(iRegINoSp dst, memory2 mem)
6952 %{
6953   match(Set dst (LoadUS mem));
6954   predicate(!needs_acquiring_load(n));
6955 
6956   ins_cost(4 * INSN_COST);
6957   format %{ "ldrh  $dst, $mem\t# short" %}
6958 
6959   ins_encode(aarch64_enc_ldrh(dst, mem));
6960 
6961   ins_pipe(iload_reg_mem);
6962 %}
6963 
6964 // Load Short/Char (16 bit unsigned) into long
6965 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
6966 %{
6967   match(Set dst (ConvI2L (LoadUS mem)));
6968   predicate(!needs_acquiring_load(n->in(1)));
6969 
6970   ins_cost(4 * INSN_COST);
6971   format %{ "ldrh  $dst, $mem\t# short" %}
6972 
6973   ins_encode(aarch64_enc_ldrh(dst, mem));
6974 
6975   ins_pipe(iload_reg_mem);
6976 %}
6977 
6978 // Load Integer (32 bit signed)
6979 instruct loadI(iRegINoSp dst, memory4 mem)
6980 %{
6981   match(Set dst (LoadI mem));
6982   predicate(!needs_acquiring_load(n));
6983 
6984   ins_cost(4 * INSN_COST);
6985   format %{ "ldrw  $dst, $mem\t# int" %}
6986 
6987   ins_encode(aarch64_enc_ldrw(dst, mem));
6988 
6989   ins_pipe(iload_reg_mem);
6990 %}
6991 
6992 // Load Integer (32 bit signed) into long
6993 instruct loadI2L(iRegLNoSp dst, memory4 mem)
6994 %{
6995   match(Set dst (ConvI2L (LoadI mem)));
6996   predicate(!needs_acquiring_load(n->in(1)));
6997 
6998   ins_cost(4 * INSN_COST);
6999   format %{ "ldrsw  $dst, $mem\t# int" %}
7000 
7001   ins_encode(aarch64_enc_ldrsw(dst, mem));
7002 
7003   ins_pipe(iload_reg_mem);
7004 %}
7005 
7006 // Load Integer (32 bit unsigned) into long
7007 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
7008 %{
7009   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7010   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7011 
7012   ins_cost(4 * INSN_COST);
7013   format %{ "ldrw  $dst, $mem\t# int" %}
7014 
7015   ins_encode(aarch64_enc_ldrw(dst, mem));
7016 
7017   ins_pipe(iload_reg_mem);
7018 %}
7019 
7020 // Load Long (64 bit signed)
7021 instruct loadL(iRegLNoSp dst, memory8 mem)
7022 %{
7023   match(Set dst (LoadL mem));
7024   predicate(!needs_acquiring_load(n));
7025 
7026   ins_cost(4 * INSN_COST);
7027   format %{ "ldr  $dst, $mem\t# int" %}
7028 
7029   ins_encode(aarch64_enc_ldr(dst, mem));
7030 
7031   ins_pipe(iload_reg_mem);
7032 %}
7033 
7034 // Load Range
7035 instruct loadRange(iRegINoSp dst, memory4 mem)
7036 %{
7037   match(Set dst (LoadRange mem));
7038 
7039   ins_cost(4 * INSN_COST);
7040   format %{ "ldrw  $dst, $mem\t# range" %}
7041 
7042   ins_encode(aarch64_enc_ldrw(dst, mem));
7043 
7044   ins_pipe(iload_reg_mem);
7045 %}
7046 
7047 // Load Pointer
7048 instruct loadP(iRegPNoSp dst, memory8 mem)
7049 %{
7050   match(Set dst (LoadP mem));
7051   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
7052 
7053   ins_cost(4 * INSN_COST);
7054   format %{ "ldr  $dst, $mem\t# ptr" %}
7055 
7056   ins_encode(aarch64_enc_ldr(dst, mem));
7057 
7058   ins_pipe(iload_reg_mem);
7059 %}
7060 
7061 // Load Compressed Pointer
7062 instruct loadN(iRegNNoSp dst, memory4 mem)
7063 %{
7064   match(Set dst (LoadN mem));
7065   predicate(!needs_acquiring_load(n));
7066 
7067   ins_cost(4 * INSN_COST);
7068   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7069 
7070   ins_encode(aarch64_enc_ldrw(dst, mem));
7071 
7072   ins_pipe(iload_reg_mem);
7073 %}
7074 
7075 // Load Klass Pointer
7076 instruct loadKlass(iRegPNoSp dst, memory8 mem)
7077 %{
7078   match(Set dst (LoadKlass mem));
7079   predicate(!needs_acquiring_load(n));
7080 
7081   ins_cost(4 * INSN_COST);
7082   format %{ "ldr  $dst, $mem\t# class" %}
7083 
7084   ins_encode(aarch64_enc_ldr(dst, mem));
7085 
7086   ins_pipe(iload_reg_mem);
7087 %}
7088 
7089 // Load Narrow Klass Pointer
7090 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
7091 %{
7092   match(Set dst (LoadNKlass mem));
7093   predicate(!needs_acquiring_load(n));
7094 
7095   ins_cost(4 * INSN_COST);
7096   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7097 
7098   ins_encode(aarch64_enc_ldrw(dst, mem));
7099 
7100   ins_pipe(iload_reg_mem);
7101 %}
7102 
7103 // Load Float
7104 instruct loadF(vRegF dst, memory4 mem)
7105 %{
7106   match(Set dst (LoadF mem));
7107   predicate(!needs_acquiring_load(n));
7108 
7109   ins_cost(4 * INSN_COST);
7110   format %{ "ldrs  $dst, $mem\t# float" %}
7111 
7112   ins_encode( aarch64_enc_ldrs(dst, mem) );
7113 
7114   ins_pipe(pipe_class_memory);
7115 %}
7116 
7117 // Load Double
7118 instruct loadD(vRegD dst, memory8 mem)
7119 %{
7120   match(Set dst (LoadD mem));
7121   predicate(!needs_acquiring_load(n));
7122 
7123   ins_cost(4 * INSN_COST);
7124   format %{ "ldrd  $dst, $mem\t# double" %}
7125 
7126   ins_encode( aarch64_enc_ldrd(dst, mem) );
7127 
7128   ins_pipe(pipe_class_memory);
7129 %}
7130 
7131 
7132 // Load Int Constant
7133 instruct loadConI(iRegINoSp dst, immI src)
7134 %{
7135   match(Set dst src);
7136 
7137   ins_cost(INSN_COST);
7138   format %{ "mov $dst, $src\t# int" %}
7139 
7140   ins_encode( aarch64_enc_movw_imm(dst, src) );
7141 
7142   ins_pipe(ialu_imm);
7143 %}
7144 
7145 // Load Long Constant
7146 instruct loadConL(iRegLNoSp dst, immL src)
7147 %{
7148   match(Set dst src);
7149 
7150   ins_cost(INSN_COST);
7151   format %{ "mov $dst, $src\t# long" %}
7152 
7153   ins_encode( aarch64_enc_mov_imm(dst, src) );
7154 
7155   ins_pipe(ialu_imm);
7156 %}
7157 
7158 // Load Pointer Constant
7159 
7160 instruct loadConP(iRegPNoSp dst, immP con)
7161 %{
7162   match(Set dst con);
7163 
7164   ins_cost(INSN_COST * 4);
7165   format %{
7166     "mov  $dst, $con\t# ptr\n\t"
7167   %}
7168 
7169   ins_encode(aarch64_enc_mov_p(dst, con));
7170 
7171   ins_pipe(ialu_imm);
7172 %}
7173 
7174 // Load Null Pointer Constant
7175 
7176 instruct loadConP0(iRegPNoSp dst, immP0 con)
7177 %{
7178   match(Set dst con);
7179 
7180   ins_cost(INSN_COST);
7181   format %{ "mov  $dst, $con\t# NULL ptr" %}
7182 
7183   ins_encode(aarch64_enc_mov_p0(dst, con));
7184 
7185   ins_pipe(ialu_imm);
7186 %}
7187 
7188 // Load Pointer Constant One
7189 
7190 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7191 %{
7192   match(Set dst con);
7193 
7194   ins_cost(INSN_COST);
7195   format %{ "mov  $dst, $con\t# NULL ptr" %}
7196 
7197   ins_encode(aarch64_enc_mov_p1(dst, con));
7198 
7199   ins_pipe(ialu_imm);
7200 %}
7201 
7202 // Load Byte Map Base Constant
7203 
7204 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7205 %{
7206   match(Set dst con);
7207 
7208   ins_cost(INSN_COST);
7209   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7210 
7211   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7212 
7213   ins_pipe(ialu_imm);
7214 %}
7215 
7216 // Load Narrow Pointer Constant
7217 
7218 instruct loadConN(iRegNNoSp dst, immN con)
7219 %{
7220   match(Set dst con);
7221 
7222   ins_cost(INSN_COST * 4);
7223   format %{ "mov  $dst, $con\t# compressed ptr" %}
7224 
7225   ins_encode(aarch64_enc_mov_n(dst, con));
7226 
7227   ins_pipe(ialu_imm);
7228 %}
7229 
7230 // Load Narrow Null Pointer Constant
7231 
7232 instruct loadConN0(iRegNNoSp dst, immN0 con)
7233 %{
7234   match(Set dst con);
7235 
7236   ins_cost(INSN_COST);
7237   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7238 
7239   ins_encode(aarch64_enc_mov_n0(dst, con));
7240 
7241   ins_pipe(ialu_imm);
7242 %}
7243 
7244 // Load Narrow Klass Constant
7245 
7246 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7247 %{
7248   match(Set dst con);
7249 
7250   ins_cost(INSN_COST);
7251   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7252 
7253   ins_encode(aarch64_enc_mov_nk(dst, con));
7254 
7255   ins_pipe(ialu_imm);
7256 %}
7257 
7258 // Load Packed Float Constant
7259 
7260 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7261   match(Set dst con);
7262   ins_cost(INSN_COST * 4);
7263   format %{ "fmovs  $dst, $con"%}
7264   ins_encode %{
7265     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7266   %}
7267 
7268   ins_pipe(fp_imm_s);
7269 %}
7270 
7271 // Load Float Constant
7272 
7273 instruct loadConF(vRegF dst, immF con) %{
7274   match(Set dst con);
7275 
7276   ins_cost(INSN_COST * 4);
7277 
7278   format %{
7279     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7280   %}
7281 
7282   ins_encode %{
7283     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7284   %}
7285 
7286   ins_pipe(fp_load_constant_s);
7287 %}
7288 
7289 // Load Packed Double Constant
7290 
7291 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7292   match(Set dst con);
7293   ins_cost(INSN_COST);
7294   format %{ "fmovd  $dst, $con"%}
7295   ins_encode %{
7296     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7297   %}
7298 
7299   ins_pipe(fp_imm_d);
7300 %}
7301 
7302 // Load Double Constant
7303 
7304 instruct loadConD(vRegD dst, immD con) %{
7305   match(Set dst con);
7306 
7307   ins_cost(INSN_COST * 5);
7308   format %{
7309     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7310   %}
7311 
7312   ins_encode %{
7313     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7314   %}
7315 
7316   ins_pipe(fp_load_constant_d);
7317 %}
7318 
7319 // Store Instructions
7320 
7321 // Store CMS card-mark Immediate
7322 instruct storeimmCM0(immI0 zero, memory1 mem)
7323 %{
7324   match(Set mem (StoreCM mem zero));
7325 
7326   ins_cost(INSN_COST);
7327   format %{ "storestore (elided)\n\t"
7328             "strb zr, $mem\t# byte" %}
7329 
7330   ins_encode(aarch64_enc_strb0(mem));
7331 
7332   ins_pipe(istore_mem);
7333 %}
7334 
7335 // Store CMS card-mark Immediate with intervening StoreStore
7336 // needed when using CMS with no conditional card marking
7337 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
7338 %{
7339   match(Set mem (StoreCM mem zero));
7340 
7341   ins_cost(INSN_COST * 2);
7342   format %{ "storestore\n\t"
7343             "dmb ishst"
7344             "\n\tstrb zr, $mem\t# byte" %}
7345 
7346   ins_encode(aarch64_enc_strb0_ordered(mem));
7347 
7348   ins_pipe(istore_mem);
7349 %}
7350 
7351 // Store Byte
7352 instruct storeB(iRegIorL2I src, memory1 mem)
7353 %{
7354   match(Set mem (StoreB mem src));
7355   predicate(!needs_releasing_store(n));
7356 
7357   ins_cost(INSN_COST);
7358   format %{ "strb  $src, $mem\t# byte" %}
7359 
7360   ins_encode(aarch64_enc_strb(src, mem));
7361 
7362   ins_pipe(istore_reg_mem);
7363 %}
7364 
7365 
7366 instruct storeimmB0(immI0 zero, memory1 mem)
7367 %{
7368   match(Set mem (StoreB mem zero));
7369   predicate(!needs_releasing_store(n));
7370 
7371   ins_cost(INSN_COST);
7372   format %{ "strb rscractch2, $mem\t# byte" %}
7373 
7374   ins_encode(aarch64_enc_strb0(mem));
7375 
7376   ins_pipe(istore_mem);
7377 %}
7378 
7379 // Store Char/Short
7380 instruct storeC(iRegIorL2I src, memory2 mem)
7381 %{
7382   match(Set mem (StoreC mem src));
7383   predicate(!needs_releasing_store(n));
7384 
7385   ins_cost(INSN_COST);
7386   format %{ "strh  $src, $mem\t# short" %}
7387 
7388   ins_encode(aarch64_enc_strh(src, mem));
7389 
7390   ins_pipe(istore_reg_mem);
7391 %}
7392 
7393 instruct storeimmC0(immI0 zero, memory2 mem)
7394 %{
7395   match(Set mem (StoreC mem zero));
7396   predicate(!needs_releasing_store(n));
7397 
7398   ins_cost(INSN_COST);
7399   format %{ "strh  zr, $mem\t# short" %}
7400 
7401   ins_encode(aarch64_enc_strh0(mem));
7402 
7403   ins_pipe(istore_mem);
7404 %}
7405 
7406 // Store Integer
7407 
7408 instruct storeI(iRegIorL2I src, memory4 mem)
7409 %{
7410   match(Set mem(StoreI mem src));
7411   predicate(!needs_releasing_store(n));
7412 
7413   ins_cost(INSN_COST);
7414   format %{ "strw  $src, $mem\t# int" %}
7415 
7416   ins_encode(aarch64_enc_strw(src, mem));
7417 
7418   ins_pipe(istore_reg_mem);
7419 %}
7420 
7421 instruct storeimmI0(immI0 zero, memory4 mem)
7422 %{
7423   match(Set mem(StoreI mem zero));
7424   predicate(!needs_releasing_store(n));
7425 
7426   ins_cost(INSN_COST);
7427   format %{ "strw  zr, $mem\t# int" %}
7428 
7429   ins_encode(aarch64_enc_strw0(mem));
7430 
7431   ins_pipe(istore_mem);
7432 %}
7433 
7434 // Store Long (64 bit signed)
7435 instruct storeL(iRegL src, memory8 mem)
7436 %{
7437   match(Set mem (StoreL mem src));
7438   predicate(!needs_releasing_store(n));
7439 
7440   ins_cost(INSN_COST);
7441   format %{ "str  $src, $mem\t# int" %}
7442 
7443   ins_encode(aarch64_enc_str(src, mem));
7444 
7445   ins_pipe(istore_reg_mem);
7446 %}
7447 
7448 // Store Long (64 bit signed)
7449 instruct storeimmL0(immL0 zero, memory8 mem)
7450 %{
7451   match(Set mem (StoreL mem zero));
7452   predicate(!needs_releasing_store(n));
7453 
7454   ins_cost(INSN_COST);
7455   format %{ "str  zr, $mem\t# int" %}
7456 
7457   ins_encode(aarch64_enc_str0(mem));
7458 
7459   ins_pipe(istore_mem);
7460 %}
7461 
7462 // Store Pointer
7463 instruct storeP(iRegP src, memory8 mem)
7464 %{
7465   match(Set mem (StoreP mem src));
7466   predicate(!needs_releasing_store(n));
7467 
7468   ins_cost(INSN_COST);
7469   format %{ "str  $src, $mem\t# ptr" %}
7470 
7471   ins_encode(aarch64_enc_str(src, mem));
7472 
7473   ins_pipe(istore_reg_mem);
7474 %}
7475 
7476 // Store Pointer
7477 instruct storeimmP0(immP0 zero, memory8 mem)
7478 %{
7479   match(Set mem (StoreP mem zero));
7480   predicate(!needs_releasing_store(n));
7481 
7482   ins_cost(INSN_COST);
7483   format %{ "str zr, $mem\t# ptr" %}
7484 
7485   ins_encode(aarch64_enc_str0(mem));
7486 
7487   ins_pipe(istore_mem);
7488 %}
7489 
7490 // Store Compressed Pointer
7491 instruct storeN(iRegN src, memory4 mem)
7492 %{
7493   match(Set mem (StoreN mem src));
7494   predicate(!needs_releasing_store(n));
7495 
7496   ins_cost(INSN_COST);
7497   format %{ "strw  $src, $mem\t# compressed ptr" %}
7498 
7499   ins_encode(aarch64_enc_strw(src, mem));
7500 
7501   ins_pipe(istore_reg_mem);
7502 %}
7503 
7504 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory4 mem)
7505 %{
7506   match(Set mem (StoreN mem zero));
7507   predicate(CompressedOops::base() == NULL &&
7508             CompressedKlassPointers::base() == NULL &&
7509             (!needs_releasing_store(n)));
7510 
7511   ins_cost(INSN_COST);
7512   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7513 
7514   ins_encode(aarch64_enc_strw(heapbase, mem));
7515 
7516   ins_pipe(istore_reg_mem);
7517 %}
7518 
7519 // Store Float
7520 instruct storeF(vRegF src, memory4 mem)
7521 %{
7522   match(Set mem (StoreF mem src));
7523   predicate(!needs_releasing_store(n));
7524 
7525   ins_cost(INSN_COST);
7526   format %{ "strs  $src, $mem\t# float" %}
7527 
7528   ins_encode( aarch64_enc_strs(src, mem) );
7529 
7530   ins_pipe(pipe_class_memory);
7531 %}
7532 
7533 // TODO
7534 // implement storeImmF0 and storeFImmPacked
7535 
7536 // Store Double
7537 instruct storeD(vRegD src, memory8 mem)
7538 %{
7539   match(Set mem (StoreD mem src));
7540   predicate(!needs_releasing_store(n));
7541 
7542   ins_cost(INSN_COST);
7543   format %{ "strd  $src, $mem\t# double" %}
7544 
7545   ins_encode( aarch64_enc_strd(src, mem) );
7546 
7547   ins_pipe(pipe_class_memory);
7548 %}
7549 
7550 // Store Compressed Klass Pointer
7551 instruct storeNKlass(iRegN src, memory4 mem)
7552 %{
7553   predicate(!needs_releasing_store(n));
7554   match(Set mem (StoreNKlass mem src));
7555 
7556   ins_cost(INSN_COST);
7557   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7558 
7559   ins_encode(aarch64_enc_strw(src, mem));
7560 
7561   ins_pipe(istore_reg_mem);
7562 %}
7563 
7564 // TODO
7565 // implement storeImmD0 and storeDImmPacked
7566 
7567 // prefetch instructions
7568 // Must be safe to execute with invalid address (cannot fault).
7569 
7570 instruct prefetchalloc( memory8 mem ) %{
7571   match(PrefetchAllocation mem);
7572 
7573   ins_cost(INSN_COST);
7574   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7575 
7576   ins_encode( aarch64_enc_prefetchw(mem) );
7577 
7578   ins_pipe(iload_prefetch);
7579 %}
7580 
7581 //  ---------------- volatile loads and stores ----------------
7582 
7583 // Load Byte (8 bit signed)
7584 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7585 %{
7586   match(Set dst (LoadB mem));
7587 
7588   ins_cost(VOLATILE_REF_COST);
7589   format %{ "ldarsb  $dst, $mem\t# byte" %}
7590 
7591   ins_encode(aarch64_enc_ldarsb(dst, mem));
7592 
7593   ins_pipe(pipe_serial);
7594 %}
7595 
7596 // Load Byte (8 bit signed) into long
7597 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7598 %{
7599   match(Set dst (ConvI2L (LoadB mem)));
7600 
7601   ins_cost(VOLATILE_REF_COST);
7602   format %{ "ldarsb  $dst, $mem\t# byte" %}
7603 
7604   ins_encode(aarch64_enc_ldarsb(dst, mem));
7605 
7606   ins_pipe(pipe_serial);
7607 %}
7608 
7609 // Load Byte (8 bit unsigned)
7610 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7611 %{
7612   match(Set dst (LoadUB mem));
7613 
7614   ins_cost(VOLATILE_REF_COST);
7615   format %{ "ldarb  $dst, $mem\t# byte" %}
7616 
7617   ins_encode(aarch64_enc_ldarb(dst, mem));
7618 
7619   ins_pipe(pipe_serial);
7620 %}
7621 
7622 // Load Byte (8 bit unsigned) into long
7623 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7624 %{
7625   match(Set dst (ConvI2L (LoadUB mem)));
7626 
7627   ins_cost(VOLATILE_REF_COST);
7628   format %{ "ldarb  $dst, $mem\t# byte" %}
7629 
7630   ins_encode(aarch64_enc_ldarb(dst, mem));
7631 
7632   ins_pipe(pipe_serial);
7633 %}
7634 
7635 // Load Short (16 bit signed)
7636 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7637 %{
7638   match(Set dst (LoadS mem));
7639 
7640   ins_cost(VOLATILE_REF_COST);
7641   format %{ "ldarshw  $dst, $mem\t# short" %}
7642 
7643   ins_encode(aarch64_enc_ldarshw(dst, mem));
7644 
7645   ins_pipe(pipe_serial);
7646 %}
7647 
7648 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7649 %{
7650   match(Set dst (LoadUS mem));
7651 
7652   ins_cost(VOLATILE_REF_COST);
7653   format %{ "ldarhw  $dst, $mem\t# short" %}
7654 
7655   ins_encode(aarch64_enc_ldarhw(dst, mem));
7656 
7657   ins_pipe(pipe_serial);
7658 %}
7659 
7660 // Load Short/Char (16 bit unsigned) into long
7661 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7662 %{
7663   match(Set dst (ConvI2L (LoadUS mem)));
7664 
7665   ins_cost(VOLATILE_REF_COST);
7666   format %{ "ldarh  $dst, $mem\t# short" %}
7667 
7668   ins_encode(aarch64_enc_ldarh(dst, mem));
7669 
7670   ins_pipe(pipe_serial);
7671 %}
7672 
7673 // Load Short/Char (16 bit signed) into long
7674 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7675 %{
7676   match(Set dst (ConvI2L (LoadS mem)));
7677 
7678   ins_cost(VOLATILE_REF_COST);
7679   format %{ "ldarh  $dst, $mem\t# short" %}
7680 
7681   ins_encode(aarch64_enc_ldarsh(dst, mem));
7682 
7683   ins_pipe(pipe_serial);
7684 %}
7685 
7686 // Load Integer (32 bit signed)
7687 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7688 %{
7689   match(Set dst (LoadI mem));
7690 
7691   ins_cost(VOLATILE_REF_COST);
7692   format %{ "ldarw  $dst, $mem\t# int" %}
7693 
7694   ins_encode(aarch64_enc_ldarw(dst, mem));
7695 
7696   ins_pipe(pipe_serial);
7697 %}
7698 
7699 // Load Integer (32 bit unsigned) into long
7700 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7701 %{
7702   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7703 
7704   ins_cost(VOLATILE_REF_COST);
7705   format %{ "ldarw  $dst, $mem\t# int" %}
7706 
7707   ins_encode(aarch64_enc_ldarw(dst, mem));
7708 
7709   ins_pipe(pipe_serial);
7710 %}
7711 
7712 // Load Long (64 bit signed)
7713 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7714 %{
7715   match(Set dst (LoadL mem));
7716 
7717   ins_cost(VOLATILE_REF_COST);
7718   format %{ "ldar  $dst, $mem\t# int" %}
7719 
7720   ins_encode(aarch64_enc_ldar(dst, mem));
7721 
7722   ins_pipe(pipe_serial);
7723 %}
7724 
7725 // Load Pointer
7726 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7727 %{
7728   match(Set dst (LoadP mem));
7729   predicate(n->as_Load()->barrier_data() == 0);
7730 
7731   ins_cost(VOLATILE_REF_COST);
7732   format %{ "ldar  $dst, $mem\t# ptr" %}
7733 
7734   ins_encode(aarch64_enc_ldar(dst, mem));
7735 
7736   ins_pipe(pipe_serial);
7737 %}
7738 
7739 // Load Compressed Pointer
7740 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7741 %{
7742   match(Set dst (LoadN mem));
7743 
7744   ins_cost(VOLATILE_REF_COST);
7745   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7746 
7747   ins_encode(aarch64_enc_ldarw(dst, mem));
7748 
7749   ins_pipe(pipe_serial);
7750 %}
7751 
7752 // Load Float
7753 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7754 %{
7755   match(Set dst (LoadF mem));
7756 
7757   ins_cost(VOLATILE_REF_COST);
7758   format %{ "ldars  $dst, $mem\t# float" %}
7759 
7760   ins_encode( aarch64_enc_fldars(dst, mem) );
7761 
7762   ins_pipe(pipe_serial);
7763 %}
7764 
7765 // Load Double
7766 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7767 %{
7768   match(Set dst (LoadD mem));
7769 
7770   ins_cost(VOLATILE_REF_COST);
7771   format %{ "ldard  $dst, $mem\t# double" %}
7772 
7773   ins_encode( aarch64_enc_fldard(dst, mem) );
7774 
7775   ins_pipe(pipe_serial);
7776 %}
7777 
7778 // Store Byte
7779 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7780 %{
7781   match(Set mem (StoreB mem src));
7782 
7783   ins_cost(VOLATILE_REF_COST);
7784   format %{ "stlrb  $src, $mem\t# byte" %}
7785 
7786   ins_encode(aarch64_enc_stlrb(src, mem));
7787 
7788   ins_pipe(pipe_class_memory);
7789 %}
7790 
7791 // Store Char/Short
7792 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7793 %{
7794   match(Set mem (StoreC mem src));
7795 
7796   ins_cost(VOLATILE_REF_COST);
7797   format %{ "stlrh  $src, $mem\t# short" %}
7798 
7799   ins_encode(aarch64_enc_stlrh(src, mem));
7800 
7801   ins_pipe(pipe_class_memory);
7802 %}
7803 
7804 // Store Integer
7805 
7806 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7807 %{
7808   match(Set mem(StoreI mem src));
7809 
7810   ins_cost(VOLATILE_REF_COST);
7811   format %{ "stlrw  $src, $mem\t# int" %}
7812 
7813   ins_encode(aarch64_enc_stlrw(src, mem));
7814 
7815   ins_pipe(pipe_class_memory);
7816 %}
7817 
7818 // Store Long (64 bit signed)
7819 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7820 %{
7821   match(Set mem (StoreL mem src));
7822 
7823   ins_cost(VOLATILE_REF_COST);
7824   format %{ "stlr  $src, $mem\t# int" %}
7825 
7826   ins_encode(aarch64_enc_stlr(src, mem));
7827 
7828   ins_pipe(pipe_class_memory);
7829 %}
7830 
7831 // Store Pointer
7832 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7833 %{
7834   match(Set mem (StoreP mem src));
7835 
7836   ins_cost(VOLATILE_REF_COST);
7837   format %{ "stlr  $src, $mem\t# ptr" %}
7838 
7839   ins_encode(aarch64_enc_stlr(src, mem));
7840 
7841   ins_pipe(pipe_class_memory);
7842 %}
7843 
7844 // Store Compressed Pointer
7845 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7846 %{
7847   match(Set mem (StoreN mem src));
7848 
7849   ins_cost(VOLATILE_REF_COST);
7850   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7851 
7852   ins_encode(aarch64_enc_stlrw(src, mem));
7853 
7854   ins_pipe(pipe_class_memory);
7855 %}
7856 
7857 // Store Float
7858 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7859 %{
7860   match(Set mem (StoreF mem src));
7861 
7862   ins_cost(VOLATILE_REF_COST);
7863   format %{ "stlrs  $src, $mem\t# float" %}
7864 
7865   ins_encode( aarch64_enc_fstlrs(src, mem) );
7866 
7867   ins_pipe(pipe_class_memory);
7868 %}
7869 
7870 // TODO
7871 // implement storeImmF0 and storeFImmPacked
7872 
7873 // Store Double
7874 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7875 %{
7876   match(Set mem (StoreD mem src));
7877 
7878   ins_cost(VOLATILE_REF_COST);
7879   format %{ "stlrd  $src, $mem\t# double" %}
7880 
7881   ins_encode( aarch64_enc_fstlrd(src, mem) );
7882 
7883   ins_pipe(pipe_class_memory);
7884 %}
7885 
7886 //  ---------------- end of volatile loads and stores ----------------
7887 
7888 instruct cacheWB(indirect addr)
7889 %{
7890   predicate(VM_Version::supports_data_cache_line_flush());
7891   match(CacheWB addr);
7892 
7893   ins_cost(100);
7894   format %{"cache wb $addr" %}
7895   ins_encode %{
7896     assert($addr->index_position() < 0, "should be");
7897     assert($addr$$disp == 0, "should be");
7898     __ cache_wb(Address($addr$$base$$Register, 0));
7899   %}
7900   ins_pipe(pipe_slow); // XXX
7901 %}
7902 
7903 instruct cacheWBPreSync()
7904 %{
7905   predicate(VM_Version::supports_data_cache_line_flush());
7906   match(CacheWBPreSync);
7907 
7908   ins_cost(100);
7909   format %{"cache wb presync" %}
7910   ins_encode %{
7911     __ cache_wbsync(true);
7912   %}
7913   ins_pipe(pipe_slow); // XXX
7914 %}
7915 
7916 instruct cacheWBPostSync()
7917 %{
7918   predicate(VM_Version::supports_data_cache_line_flush());
7919   match(CacheWBPostSync);
7920 
7921   ins_cost(100);
7922   format %{"cache wb postsync" %}
7923   ins_encode %{
7924     __ cache_wbsync(false);
7925   %}
7926   ins_pipe(pipe_slow); // XXX
7927 %}
7928 
7929 // ============================================================================
7930 // BSWAP Instructions
7931 
7932 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7933   match(Set dst (ReverseBytesI src));
7934 
7935   ins_cost(INSN_COST);
7936   format %{ "revw  $dst, $src" %}
7937 
7938   ins_encode %{
7939     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7940   %}
7941 
7942   ins_pipe(ialu_reg);
7943 %}
7944 
7945 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7946   match(Set dst (ReverseBytesL src));
7947 
7948   ins_cost(INSN_COST);
7949   format %{ "rev  $dst, $src" %}
7950 
7951   ins_encode %{
7952     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7953   %}
7954 
7955   ins_pipe(ialu_reg);
7956 %}
7957 
7958 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7959   match(Set dst (ReverseBytesUS src));
7960 
7961   ins_cost(INSN_COST);
7962   format %{ "rev16w  $dst, $src" %}
7963 
7964   ins_encode %{
7965     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7966   %}
7967 
7968   ins_pipe(ialu_reg);
7969 %}
7970 
7971 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7972   match(Set dst (ReverseBytesS src));
7973 
7974   ins_cost(INSN_COST);
7975   format %{ "rev16w  $dst, $src\n\t"
7976             "sbfmw $dst, $dst, #0, #15" %}
7977 
7978   ins_encode %{
7979     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7980     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7981   %}
7982 
7983   ins_pipe(ialu_reg);
7984 %}
7985 
7986 // ============================================================================
7987 // Zero Count Instructions
7988 
7989 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7990   match(Set dst (CountLeadingZerosI src));
7991 
7992   ins_cost(INSN_COST);
7993   format %{ "clzw  $dst, $src" %}
7994   ins_encode %{
7995     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7996   %}
7997 
7998   ins_pipe(ialu_reg);
7999 %}
8000 
8001 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8002   match(Set dst (CountLeadingZerosL src));
8003 
8004   ins_cost(INSN_COST);
8005   format %{ "clz   $dst, $src" %}
8006   ins_encode %{
8007     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8008   %}
8009 
8010   ins_pipe(ialu_reg);
8011 %}
8012 
8013 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8014   match(Set dst (CountTrailingZerosI src));
8015 
8016   ins_cost(INSN_COST * 2);
8017   format %{ "rbitw  $dst, $src\n\t"
8018             "clzw   $dst, $dst" %}
8019   ins_encode %{
8020     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8021     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8022   %}
8023 
8024   ins_pipe(ialu_reg);
8025 %}
8026 
8027 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8028   match(Set dst (CountTrailingZerosL src));
8029 
8030   ins_cost(INSN_COST * 2);
8031   format %{ "rbit   $dst, $src\n\t"
8032             "clz    $dst, $dst" %}
8033   ins_encode %{
8034     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8035     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8036   %}
8037 
8038   ins_pipe(ialu_reg);
8039 %}
8040 
8041 //---------- Population Count Instructions -------------------------------------
8042 //
8043 
8044 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8045   predicate(UsePopCountInstruction);
8046   match(Set dst (PopCountI src));
8047   effect(TEMP tmp);
8048   ins_cost(INSN_COST * 13);
8049 
8050   format %{ "movw   $src, $src\n\t"
8051             "mov    $tmp, $src\t# vector (1D)\n\t"
8052             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8053             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8054             "mov    $dst, $tmp\t# vector (1D)" %}
8055   ins_encode %{
8056     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8057     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8058     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8059     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8060     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8061   %}
8062 
8063   ins_pipe(pipe_class_default);
8064 %}
8065 
8066 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
8067   predicate(UsePopCountInstruction);
8068   match(Set dst (PopCountI (LoadI mem)));
8069   effect(TEMP tmp);
8070   ins_cost(INSN_COST * 13);
8071 
8072   format %{ "ldrs   $tmp, $mem\n\t"
8073             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8074             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8075             "mov    $dst, $tmp\t# vector (1D)" %}
8076   ins_encode %{
8077     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8078     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8079               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
8080     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8081     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8082     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8083   %}
8084 
8085   ins_pipe(pipe_class_default);
8086 %}
8087 
8088 // Note: Long.bitCount(long) returns an int.
8089 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8090   predicate(UsePopCountInstruction);
8091   match(Set dst (PopCountL src));
8092   effect(TEMP tmp);
8093   ins_cost(INSN_COST * 13);
8094 
8095   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8096             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8097             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8098             "mov    $dst, $tmp\t# vector (1D)" %}
8099   ins_encode %{
8100     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8101     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8102     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8103     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8104   %}
8105 
8106   ins_pipe(pipe_class_default);
8107 %}
8108 
8109 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
8110   predicate(UsePopCountInstruction);
8111   match(Set dst (PopCountL (LoadL mem)));
8112   effect(TEMP tmp);
8113   ins_cost(INSN_COST * 13);
8114 
8115   format %{ "ldrd   $tmp, $mem\n\t"
8116             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8117             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8118             "mov    $dst, $tmp\t# vector (1D)" %}
8119   ins_encode %{
8120     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8121     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8122               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
8123     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8124     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8125     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8126   %}
8127 
8128   ins_pipe(pipe_class_default);
8129 %}
8130 
8131 // ============================================================================
8132 // MemBar Instruction
8133 
8134 instruct load_fence() %{
8135   match(LoadFence);
8136   ins_cost(VOLATILE_REF_COST);
8137 
8138   format %{ "load_fence" %}
8139 
8140   ins_encode %{
8141     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8142   %}
8143   ins_pipe(pipe_serial);
8144 %}
8145 
8146 instruct unnecessary_membar_acquire() %{
8147   predicate(unnecessary_acquire(n));
8148   match(MemBarAcquire);
8149   ins_cost(0);
8150 
8151   format %{ "membar_acquire (elided)" %}
8152 
8153   ins_encode %{
8154     __ block_comment("membar_acquire (elided)");
8155   %}
8156 
8157   ins_pipe(pipe_class_empty);
8158 %}
8159 
8160 instruct membar_acquire() %{
8161   match(MemBarAcquire);
8162   ins_cost(VOLATILE_REF_COST);
8163 
8164   format %{ "membar_acquire\n\t"
8165             "dmb ish" %}
8166 
8167   ins_encode %{
8168     __ block_comment("membar_acquire");
8169     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8170   %}
8171 
8172   ins_pipe(pipe_serial);
8173 %}
8174 
8175 
8176 instruct membar_acquire_lock() %{
8177   match(MemBarAcquireLock);
8178   ins_cost(VOLATILE_REF_COST);
8179 
8180   format %{ "membar_acquire_lock (elided)" %}
8181 
8182   ins_encode %{
8183     __ block_comment("membar_acquire_lock (elided)");
8184   %}
8185 
8186   ins_pipe(pipe_serial);
8187 %}
8188 
8189 instruct store_fence() %{
8190   match(StoreFence);
8191   ins_cost(VOLATILE_REF_COST);
8192 
8193   format %{ "store_fence" %}
8194 
8195   ins_encode %{
8196     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8197   %}
8198   ins_pipe(pipe_serial);
8199 %}
8200 
8201 instruct unnecessary_membar_release() %{
8202   predicate(unnecessary_release(n));
8203   match(MemBarRelease);
8204   ins_cost(0);
8205 
8206   format %{ "membar_release (elided)" %}
8207 
8208   ins_encode %{
8209     __ block_comment("membar_release (elided)");
8210   %}
8211   ins_pipe(pipe_serial);
8212 %}
8213 
8214 instruct membar_release() %{
8215   match(MemBarRelease);
8216   ins_cost(VOLATILE_REF_COST);
8217 
8218   format %{ "membar_release\n\t"
8219             "dmb ish" %}
8220 
8221   ins_encode %{
8222     __ block_comment("membar_release");
8223     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8224   %}
8225   ins_pipe(pipe_serial);
8226 %}
8227 
8228 instruct membar_storestore() %{
8229   match(MemBarStoreStore);
8230   ins_cost(VOLATILE_REF_COST);
8231 
8232   format %{ "MEMBAR-store-store" %}
8233 
8234   ins_encode %{
8235     __ membar(Assembler::StoreStore);
8236   %}
8237   ins_pipe(pipe_serial);
8238 %}
8239 
8240 instruct membar_release_lock() %{
8241   match(MemBarReleaseLock);
8242   ins_cost(VOLATILE_REF_COST);
8243 
8244   format %{ "membar_release_lock (elided)" %}
8245 
8246   ins_encode %{
8247     __ block_comment("membar_release_lock (elided)");
8248   %}
8249 
8250   ins_pipe(pipe_serial);
8251 %}
8252 
8253 instruct unnecessary_membar_volatile() %{
8254   predicate(unnecessary_volatile(n));
8255   match(MemBarVolatile);
8256   ins_cost(0);
8257 
8258   format %{ "membar_volatile (elided)" %}
8259 
8260   ins_encode %{
8261     __ block_comment("membar_volatile (elided)");
8262   %}
8263 
8264   ins_pipe(pipe_serial);
8265 %}
8266 
8267 instruct membar_volatile() %{
8268   match(MemBarVolatile);
8269   ins_cost(VOLATILE_REF_COST*100);
8270 
8271   format %{ "membar_volatile\n\t"
8272              "dmb ish"%}
8273 
8274   ins_encode %{
8275     __ block_comment("membar_volatile");
8276     __ membar(Assembler::StoreLoad);
8277   %}
8278 
8279   ins_pipe(pipe_serial);
8280 %}
8281 
8282 // ============================================================================
8283 // Cast/Convert Instructions
8284 
8285 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8286   match(Set dst (CastX2P src));
8287 
8288   ins_cost(INSN_COST);
8289   format %{ "mov $dst, $src\t# long -> ptr" %}
8290 
8291   ins_encode %{
8292     if ($dst$$reg != $src$$reg) {
8293       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8294     }
8295   %}
8296 
8297   ins_pipe(ialu_reg);
8298 %}
8299 
8300 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8301   match(Set dst (CastP2X src));
8302 
8303   ins_cost(INSN_COST);
8304   format %{ "mov $dst, $src\t# ptr -> long" %}
8305 
8306   ins_encode %{
8307     if ($dst$$reg != $src$$reg) {
8308       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8309     }
8310   %}
8311 
8312   ins_pipe(ialu_reg);
8313 %}
8314 
8315 // Convert oop into int for vectors alignment masking
8316 instruct convP2I(iRegINoSp dst, iRegP src) %{
8317   match(Set dst (ConvL2I (CastP2X src)));
8318 
8319   ins_cost(INSN_COST);
8320   format %{ "movw $dst, $src\t# ptr -> int" %}
8321   ins_encode %{
8322     __ movw($dst$$Register, $src$$Register);
8323   %}
8324 
8325   ins_pipe(ialu_reg);
8326 %}
8327 
8328 // Convert compressed oop into int for vectors alignment masking
8329 // in case of 32bit oops (heap < 4Gb).
8330 instruct convN2I(iRegINoSp dst, iRegN src)
8331 %{
8332   predicate(CompressedOops::shift() == 0);
8333   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8334 
8335   ins_cost(INSN_COST);
8336   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8337   ins_encode %{
8338     __ movw($dst$$Register, $src$$Register);
8339   %}
8340 
8341   ins_pipe(ialu_reg);
8342 %}
8343 
8344 
8345 // Convert oop pointer into compressed form
8346 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8347   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8348   match(Set dst (EncodeP src));
8349   effect(KILL cr);
8350   ins_cost(INSN_COST * 3);
8351   format %{ "encode_heap_oop $dst, $src" %}
8352   ins_encode %{
8353     Register s = $src$$Register;
8354     Register d = $dst$$Register;
8355     __ encode_heap_oop(d, s);
8356   %}
8357   ins_pipe(ialu_reg);
8358 %}
8359 
8360 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8361   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8362   match(Set dst (EncodeP src));
8363   ins_cost(INSN_COST * 3);
8364   format %{ "encode_heap_oop_not_null $dst, $src" %}
8365   ins_encode %{
8366     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8367   %}
8368   ins_pipe(ialu_reg);
8369 %}
8370 
8371 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8372   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8373             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8374   match(Set dst (DecodeN src));
8375   ins_cost(INSN_COST * 3);
8376   format %{ "decode_heap_oop $dst, $src" %}
8377   ins_encode %{
8378     Register s = $src$$Register;
8379     Register d = $dst$$Register;
8380     __ decode_heap_oop(d, s);
8381   %}
8382   ins_pipe(ialu_reg);
8383 %}
8384 
8385 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8386   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8387             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8388   match(Set dst (DecodeN src));
8389   ins_cost(INSN_COST * 3);
8390   format %{ "decode_heap_oop_not_null $dst, $src" %}
8391   ins_encode %{
8392     Register s = $src$$Register;
8393     Register d = $dst$$Register;
8394     __ decode_heap_oop_not_null(d, s);
8395   %}
8396   ins_pipe(ialu_reg);
8397 %}
8398 
8399 // n.b. AArch64 implementations of encode_klass_not_null and
8400 // decode_klass_not_null do not modify the flags register so, unlike
8401 // Intel, we don't kill CR as a side effect here
8402 
8403 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8404   match(Set dst (EncodePKlass src));
8405 
8406   ins_cost(INSN_COST * 3);
8407   format %{ "encode_klass_not_null $dst,$src" %}
8408 
8409   ins_encode %{
8410     Register src_reg = as_Register($src$$reg);
8411     Register dst_reg = as_Register($dst$$reg);
8412     __ encode_klass_not_null(dst_reg, src_reg);
8413   %}
8414 
8415    ins_pipe(ialu_reg);
8416 %}
8417 
8418 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8419   match(Set dst (DecodeNKlass src));
8420 
8421   ins_cost(INSN_COST * 3);
8422   format %{ "decode_klass_not_null $dst,$src" %}
8423 
8424   ins_encode %{
8425     Register src_reg = as_Register($src$$reg);
8426     Register dst_reg = as_Register($dst$$reg);
8427     if (dst_reg != src_reg) {
8428       __ decode_klass_not_null(dst_reg, src_reg);
8429     } else {
8430       __ decode_klass_not_null(dst_reg);
8431     }
8432   %}
8433 
8434    ins_pipe(ialu_reg);
8435 %}
8436 
8437 instruct checkCastPP(iRegPNoSp dst)
8438 %{
8439   match(Set dst (CheckCastPP dst));
8440 
8441   size(0);
8442   format %{ "# checkcastPP of $dst" %}
8443   ins_encode(/* empty encoding */);
8444   ins_pipe(pipe_class_empty);
8445 %}
8446 
8447 instruct castPP(iRegPNoSp dst)
8448 %{
8449   match(Set dst (CastPP dst));
8450 
8451   size(0);
8452   format %{ "# castPP of $dst" %}
8453   ins_encode(/* empty encoding */);
8454   ins_pipe(pipe_class_empty);
8455 %}
8456 
8457 instruct castII(iRegI dst)
8458 %{
8459   match(Set dst (CastII dst));
8460 
8461   size(0);
8462   format %{ "# castII of $dst" %}
8463   ins_encode(/* empty encoding */);
8464   ins_cost(0);
8465   ins_pipe(pipe_class_empty);
8466 %}
8467 
8468 instruct castLL(iRegL dst)
8469 %{
8470   match(Set dst (CastLL dst));
8471 
8472   size(0);
8473   format %{ "# castLL of $dst" %}
8474   ins_encode(/* empty encoding */);
8475   ins_cost(0);
8476   ins_pipe(pipe_class_empty);
8477 %}
8478 
8479 instruct vcvt4Bto4S(vecD dst, vecD src) %{
8480   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
8481   match(Set dst (VectorCastB2X src));
8482   format %{ "sxtl  $dst, T8H, $src, T8B\t# convert 4B to 4S vector" %}
8483   ins_encode %{
8484     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
8485   %}
8486   ins_pipe(vdop64);
8487 %}
8488 
8489 instruct vcvt4Bto4I(vecX dst, vecD src) %{
8490   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
8491   match(Set dst (VectorCastB2X src));
8492   format %{  "sxtl  $dst, T8H, $src, T8B\n\t"
8493              "sxtl  $dst, T4S, $dst, T4H\t# convert 4B to 4I vector"
8494   %}
8495   ins_encode %{
8496     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
8497     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
8498   %}
8499   ins_pipe(vdop128);
8500 %}
8501 
8502 instruct vcvt4Bto4F(vecX dst, vecD src) %{
8503   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
8504   match(Set dst (VectorCastB2X src));
8505   format %{  "sxtl  $dst, T8H, $src, T8B\n\t"
8506              "sxtl  $dst, T4S, $dst, T4H\n\t"
8507              "scvtfv  T4S, $dst, $dst\t# convert 4B to 4F vector"
8508   %}
8509   ins_encode %{
8510     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
8511     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
8512     __ scvtfv(__ T4S, as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg));
8513   %}
8514   ins_pipe(vdop128);
8515 %}
8516 
8517 instruct vcvt8Bto8S(vecX dst, vecD src) %{
8518   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
8519   match(Set dst (VectorCastB2X src));
8520   format %{ "sxtl  $dst, T8H, $src, T8B\t# convert 8B to 8S vector" %}
8521   ins_encode %{
8522     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
8523   %}
8524   ins_pipe(vdop128);
8525 %}
8526 
8527 instruct vcvt4Sto4B(vecD dst, vecD src) %{
8528   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
8529   match(Set dst (VectorCastS2X src));
8530   format %{ "xtn  $dst, T8S, $src, T8H\t# convert 4S to 4B vector" %}
8531   ins_encode %{
8532     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
8533   %}
8534   ins_pipe(vdop64);
8535 %}
8536 
8537 instruct vcvt4Sto4I(vecX dst, vecD src) %{
8538   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
8539   match(Set dst (VectorCastS2X src));
8540   format %{ "sxtl  $dst, T4S, $src, T4H\t# convert 4S to 4I vector" %}
8541   ins_encode %{
8542     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg), __ T4H);
8543   %}
8544   ins_pipe(vdop128);
8545 %}
8546 
8547 instruct vcvt4Sto4F(vecX dst, vecD src) %{
8548   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
8549   match(Set dst (VectorCastS2X src));
8550   format %{ "sxtl    $dst, T4S, $src, T4H\n\t"
8551             "scvtfv  T4S, $dst, $dst\t# convert 4S to 4F vector"
8552   %}
8553   ins_encode %{
8554     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg), __ T4H);
8555     __ scvtfv(__ T4S, as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg));
8556   %}
8557   ins_pipe(vdop128);
8558 %}
8559 
8560 instruct vcvt8Sto8B(vecD dst, vecX src) %{
8561   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
8562   match(Set dst (VectorCastS2X src));
8563   format %{ "xtn  $dst, T8B, $src, T8H\t# convert 8S to 8B vector" %}
8564   ins_encode %{
8565     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
8566   %}
8567   ins_pipe(vdop128);
8568 %}
8569 
8570 instruct vcvt2Ito2L(vecX dst, vecD src) %{
8571   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
8572   match(Set dst (VectorCastI2X src));
8573   format %{ "sxtl  $dst, T2D, $src, T2S\t# convert 2I to 2L vector" %}
8574   ins_encode %{
8575     __ sxtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg), __ T2S);
8576   %}
8577   ins_pipe(vdop128);
8578 %}
8579 
8580 instruct vcvt2Ito2F(vecD dst, vecD src) %{
8581   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
8582   match(Set dst (VectorCastI2X src));
8583   format %{ "scvtfv  T2S, $dst, $src\t# convert 2I to 2F vector" %}
8584   ins_encode %{
8585     __ scvtfv(__ T2S, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
8586   %}
8587   ins_pipe(vdop64);
8588 %}
8589 
8590 instruct vcvt2Ito2D(vecX dst, vecD src) %{
8591   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
8592   match(Set dst (VectorCastI2X src));
8593   format %{ "sxtl    $dst, T2D, $src, T2S\n\t"
8594             "scvtfv  T2D, $dst, $dst\t# convert 2I to 2D vector"
8595   %}
8596   ins_encode %{
8597     __ sxtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg), __ T2S);
8598     __ scvtfv(__ T2D, as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg));
8599   %}
8600   ins_pipe(vdop128);
8601 %}
8602 
8603 instruct vcvt4Ito4B(vecD dst, vecX src) %{
8604   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
8605   match(Set dst (VectorCastI2X src));
8606   format %{ "xtn  $dst, T4H, $src, T4S\n\t"
8607             "xtn  $dst, T8B, $dst, T8H\t# convert 4I to 4B vector"
8608   %}
8609   ins_encode %{
8610     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
8611     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
8612   %}
8613   ins_pipe(vdop128);
8614 %}
8615 
8616 instruct vcvt4Ito4S(vecD dst, vecX src) %{
8617   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
8618   match(Set dst (VectorCastI2X src));
8619   format %{ "xtn  $dst, T4H, $src, T4S\t# convert 4I to 4S vector" %}
8620   ins_encode %{
8621     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
8622   %}
8623   ins_pipe(vdop128);
8624 %}
8625 
8626 instruct vcvt4Ito4F(vecX dst, vecX src) %{
8627   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
8628   match(Set dst (VectorCastI2X src));
8629   format %{ "scvtfv  T4S, $dst, $src\t# convert 4I to 4F vector" %}
8630   ins_encode %{
8631     __ scvtfv(__ T4S, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
8632   %}
8633   ins_pipe(vdop64);
8634 %}
8635 
8636 instruct vcvt2Lto2I(vecD dst, vecX src) %{
8637   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
8638   match(Set dst (VectorCastL2X src));
8639   format %{ "xtn  $dst, T2S, $src, T2D\t# convert 2L to 2I vector" %}
8640   ins_encode %{
8641     __ xtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg), __ T2D);
8642   %}
8643   ins_pipe(vdop128);
8644 %}
8645 
8646 instruct vcvt2Lto2F(vecD dst, vecX src) %{
8647   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
8648   match(Set dst (VectorCastL2X src));
8649   format %{ "scvtfv  T2D, $dst, $src\n\t"
8650             "fcvtn   $dst, T2S, $dst, T2D\t# convert 2L to 2F vector"
8651   %}
8652   ins_encode %{
8653     __ scvtfv(__ T2D, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
8654     __ fcvtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($dst$$reg), __ T2D);
8655   %}
8656   ins_pipe(vdop128);
8657 %}
8658 
8659 instruct vcvt2Lto2D(vecX dst, vecX src) %{
8660   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
8661   match(Set dst (VectorCastL2X src));
8662   format %{ "scvtfv  T2D, $dst, $src\t# convert 2L to 2D vector" %}
8663   ins_encode %{
8664     __ scvtfv(__ T2D, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
8665   %}
8666   ins_pipe(vdop128);
8667 %}
8668 
8669 instruct vcvt2Fto2D(vecX dst, vecD src) %{
8670   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
8671   match(Set dst (VectorCastF2X src));
8672   format %{ "fcvtl  $dst, T2D, $src, T2S\t# convert 2F to 2D vector" %}
8673   ins_encode %{
8674     __ fcvtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg), __ T2S);
8675   %}
8676   ins_pipe(vdop128);
8677 %}
8678 
8679 instruct vcvt2Dto2F(vecD dst, vecX src) %{
8680   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
8681   match(Set dst (VectorCastD2X src));
8682   format %{ "fcvtn  $dst, T2S, $src, T2D\t# convert 2D to 2F vector" %}
8683   ins_encode %{
8684     __ fcvtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg), __ T2D);
8685   %}
8686   ins_pipe(vdop128);
8687 %}
8688 
8689 // ============================================================================
8690 // Atomic operation instructions
8691 //
8692 // Intel and SPARC both implement Ideal Node LoadPLocked and
8693 // Store{PIL}Conditional instructions using a normal load for the
8694 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8695 //
8696 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8697 // pair to lock object allocations from Eden space when not using
8698 // TLABs.
8699 //
8700 // There does not appear to be a Load{IL}Locked Ideal Node and the
8701 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8702 // and to use StoreIConditional only for 32-bit and StoreLConditional
8703 // only for 64-bit.
8704 //
8705 // We implement LoadPLocked and StorePLocked instructions using,
8706 // respectively the AArch64 hw load-exclusive and store-conditional
8707 // instructions. Whereas we must implement each of
8708 // Store{IL}Conditional using a CAS which employs a pair of
8709 // instructions comprising a load-exclusive followed by a
8710 // store-conditional.
8711 
8712 
8713 // Locked-load (linked load) of the current heap-top
8714 // used when updating the eden heap top
8715 // implemented using ldaxr on AArch64
8716 
8717 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8718 %{
8719   match(Set dst (LoadPLocked mem));
8720 
8721   ins_cost(VOLATILE_REF_COST);
8722 
8723   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8724 
8725   ins_encode(aarch64_enc_ldaxr(dst, mem));
8726 
8727   ins_pipe(pipe_serial);
8728 %}
8729 
8730 // Conditional-store of the updated heap-top.
8731 // Used during allocation of the shared heap.
8732 // Sets flag (EQ) on success.
8733 // implemented using stlxr on AArch64.
8734 
8735 instruct storePConditional(memory8 heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8736 %{
8737   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8738 
8739   ins_cost(VOLATILE_REF_COST);
8740 
8741  // TODO
8742  // do we need to do a store-conditional release or can we just use a
8743  // plain store-conditional?
8744 
8745   format %{
8746     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8747     "cmpw rscratch1, zr\t# EQ on successful write"
8748   %}
8749 
8750   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8751 
8752   ins_pipe(pipe_serial);
8753 %}
8754 
8755 
8756 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8757 // when attempting to rebias a lock towards the current thread.  We
8758 // must use the acquire form of cmpxchg in order to guarantee acquire
8759 // semantics in this case.
8760 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8761 %{
8762   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8763 
8764   ins_cost(VOLATILE_REF_COST);
8765 
8766   format %{
8767     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8768     "cmpw rscratch1, zr\t# EQ on successful write"
8769   %}
8770 
8771   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8772 
8773   ins_pipe(pipe_slow);
8774 %}
8775 
8776 // storeIConditional also has acquire semantics, for no better reason
8777 // than matching storeLConditional.  At the time of writing this
8778 // comment storeIConditional was not used anywhere by AArch64.
8779 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8780 %{
8781   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8782 
8783   ins_cost(VOLATILE_REF_COST);
8784 
8785   format %{
8786     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8787     "cmpw rscratch1, zr\t# EQ on successful write"
8788   %}
8789 
8790   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8791 
8792   ins_pipe(pipe_slow);
8793 %}
8794 
8795 // standard CompareAndSwapX when we are using barriers
8796 // these have higher priority than the rules selected by a predicate
8797 
8798 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8799 // can't match them
8800 
8801 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8802 
8803   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8804   ins_cost(2 * VOLATILE_REF_COST);
8805 
8806   effect(KILL cr);
8807 
8808   format %{
8809     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8810     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8811   %}
8812 
8813   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8814             aarch64_enc_cset_eq(res));
8815 
8816   ins_pipe(pipe_slow);
8817 %}
8818 
8819 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8820 
8821   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8822   ins_cost(2 * VOLATILE_REF_COST);
8823 
8824   effect(KILL cr);
8825 
8826   format %{
8827     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8828     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8829   %}
8830 
8831   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8832             aarch64_enc_cset_eq(res));
8833 
8834   ins_pipe(pipe_slow);
8835 %}
8836 
8837 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8838 
8839   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8840   ins_cost(2 * VOLATILE_REF_COST);
8841 
8842   effect(KILL cr);
8843 
8844  format %{
8845     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8846     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8847  %}
8848 
8849  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8850             aarch64_enc_cset_eq(res));
8851 
8852   ins_pipe(pipe_slow);
8853 %}
8854 
8855 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8856 
8857   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8858   ins_cost(2 * VOLATILE_REF_COST);
8859 
8860   effect(KILL cr);
8861 
8862  format %{
8863     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8864     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8865  %}
8866 
8867  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8868             aarch64_enc_cset_eq(res));
8869 
8870   ins_pipe(pipe_slow);
8871 %}
8872 
8873 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8874 
8875   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8876   predicate(n->as_LoadStore()->barrier_data() == 0);
8877   ins_cost(2 * VOLATILE_REF_COST);
8878 
8879   effect(KILL cr);
8880 
8881  format %{
8882     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8883     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8884  %}
8885 
8886  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8887             aarch64_enc_cset_eq(res));
8888 
8889   ins_pipe(pipe_slow);
8890 %}
8891 
8892 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8893 
8894   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8895   ins_cost(2 * VOLATILE_REF_COST);
8896 
8897   effect(KILL cr);
8898 
8899  format %{
8900     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8901     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8902  %}
8903 
8904  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8905             aarch64_enc_cset_eq(res));
8906 
8907   ins_pipe(pipe_slow);
8908 %}
8909 
8910 // alternative CompareAndSwapX when we are eliding barriers
8911 
8912 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8913 
8914   predicate(needs_acquiring_load_exclusive(n));
8915   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8916   ins_cost(VOLATILE_REF_COST);
8917 
8918   effect(KILL cr);
8919 
8920   format %{
8921     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8922     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8923   %}
8924 
8925   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8926             aarch64_enc_cset_eq(res));
8927 
8928   ins_pipe(pipe_slow);
8929 %}
8930 
8931 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8932 
8933   predicate(needs_acquiring_load_exclusive(n));
8934   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8935   ins_cost(VOLATILE_REF_COST);
8936 
8937   effect(KILL cr);
8938 
8939   format %{
8940     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8941     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8942   %}
8943 
8944   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8945             aarch64_enc_cset_eq(res));
8946 
8947   ins_pipe(pipe_slow);
8948 %}
8949 
8950 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8951 
8952   predicate(needs_acquiring_load_exclusive(n));
8953   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8954   ins_cost(VOLATILE_REF_COST);
8955 
8956   effect(KILL cr);
8957 
8958  format %{
8959     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8960     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8961  %}
8962 
8963  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8964             aarch64_enc_cset_eq(res));
8965 
8966   ins_pipe(pipe_slow);
8967 %}
8968 
8969 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8970 
8971   predicate(needs_acquiring_load_exclusive(n));
8972   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8973   ins_cost(VOLATILE_REF_COST);
8974 
8975   effect(KILL cr);
8976 
8977  format %{
8978     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8979     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8980  %}
8981 
8982  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8983             aarch64_enc_cset_eq(res));
8984 
8985   ins_pipe(pipe_slow);
8986 %}
8987 
8988 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8989 
8990   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
8991   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8992   ins_cost(VOLATILE_REF_COST);
8993 
8994   effect(KILL cr);
8995 
8996  format %{
8997     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8998     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8999  %}
9000 
9001  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9002             aarch64_enc_cset_eq(res));
9003 
9004   ins_pipe(pipe_slow);
9005 %}
9006 
9007 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9008 
9009   predicate(needs_acquiring_load_exclusive(n));
9010   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9011   ins_cost(VOLATILE_REF_COST);
9012 
9013   effect(KILL cr);
9014 
9015  format %{
9016     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9017     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9018  %}
9019 
9020  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9021             aarch64_enc_cset_eq(res));
9022 
9023   ins_pipe(pipe_slow);
9024 %}
9025 
9026 
9027 // ---------------------------------------------------------------------
9028 
9029 
9030 // BEGIN This section of the file is automatically generated. Do not edit --------------
9031 
9032 // Sundry CAS operations.  Note that release is always true,
9033 // regardless of the memory ordering of the CAS.  This is because we
9034 // need the volatile case to be sequentially consistent but there is
9035 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
9036 // can't check the type of memory ordering here, so we always emit a
9037 // STLXR.
9038 
9039 // This section is generated from aarch64_ad_cas.m4
9040 
9041 
9042 
9043 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9044   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
9045   ins_cost(2 * VOLATILE_REF_COST);
9046   effect(TEMP_DEF res, KILL cr);
9047   format %{
9048     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9049   %}
9050   ins_encode %{
9051     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9052                Assembler::byte, /*acquire*/ false, /*release*/ true,
9053                /*weak*/ false, $res$$Register);
9054     __ sxtbw($res$$Register, $res$$Register);
9055   %}
9056   ins_pipe(pipe_slow);
9057 %}
9058 
9059 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9060   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
9061   ins_cost(2 * VOLATILE_REF_COST);
9062   effect(TEMP_DEF res, KILL cr);
9063   format %{
9064     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9065   %}
9066   ins_encode %{
9067     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9068                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9069                /*weak*/ false, $res$$Register);
9070     __ sxthw($res$$Register, $res$$Register);
9071   %}
9072   ins_pipe(pipe_slow);
9073 %}
9074 
9075 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9076   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
9077   ins_cost(2 * VOLATILE_REF_COST);
9078   effect(TEMP_DEF res, KILL cr);
9079   format %{
9080     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9081   %}
9082   ins_encode %{
9083     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9084                Assembler::word, /*acquire*/ false, /*release*/ true,
9085                /*weak*/ false, $res$$Register);
9086   %}
9087   ins_pipe(pipe_slow);
9088 %}
9089 
9090 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9091   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
9092   ins_cost(2 * VOLATILE_REF_COST);
9093   effect(TEMP_DEF res, KILL cr);
9094   format %{
9095     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9096   %}
9097   ins_encode %{
9098     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9099                Assembler::xword, /*acquire*/ false, /*release*/ true,
9100                /*weak*/ false, $res$$Register);
9101   %}
9102   ins_pipe(pipe_slow);
9103 %}
9104 
9105 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9106   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
9107   ins_cost(2 * VOLATILE_REF_COST);
9108   effect(TEMP_DEF res, KILL cr);
9109   format %{
9110     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9111   %}
9112   ins_encode %{
9113     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9114                Assembler::word, /*acquire*/ false, /*release*/ true,
9115                /*weak*/ false, $res$$Register);
9116   %}
9117   ins_pipe(pipe_slow);
9118 %}
9119 
9120 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9121   predicate(n->as_LoadStore()->barrier_data() == 0);
9122   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
9123   ins_cost(2 * VOLATILE_REF_COST);
9124   effect(TEMP_DEF res, KILL cr);
9125   format %{
9126     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9127   %}
9128   ins_encode %{
9129     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9130                Assembler::xword, /*acquire*/ false, /*release*/ true,
9131                /*weak*/ false, $res$$Register);
9132   %}
9133   ins_pipe(pipe_slow);
9134 %}
9135 
9136 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9137   predicate(needs_acquiring_load_exclusive(n));
9138   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
9139   ins_cost(VOLATILE_REF_COST);
9140   effect(TEMP_DEF res, KILL cr);
9141   format %{
9142     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9143   %}
9144   ins_encode %{
9145     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9146                Assembler::byte, /*acquire*/ true, /*release*/ true,
9147                /*weak*/ false, $res$$Register);
9148     __ sxtbw($res$$Register, $res$$Register);
9149   %}
9150   ins_pipe(pipe_slow);
9151 %}
9152 
9153 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9154   predicate(needs_acquiring_load_exclusive(n));
9155   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
9156   ins_cost(VOLATILE_REF_COST);
9157   effect(TEMP_DEF res, KILL cr);
9158   format %{
9159     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9160   %}
9161   ins_encode %{
9162     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9163                Assembler::halfword, /*acquire*/ true, /*release*/ true,
9164                /*weak*/ false, $res$$Register);
9165     __ sxthw($res$$Register, $res$$Register);
9166   %}
9167   ins_pipe(pipe_slow);
9168 %}
9169 
9170 
9171 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9172   predicate(needs_acquiring_load_exclusive(n));
9173   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
9174   ins_cost(VOLATILE_REF_COST);
9175   effect(TEMP_DEF res, KILL cr);
9176   format %{
9177     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9178   %}
9179   ins_encode %{
9180     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9181                Assembler::word, /*acquire*/ true, /*release*/ true,
9182                /*weak*/ false, $res$$Register);
9183   %}
9184   ins_pipe(pipe_slow);
9185 %}
9186 
9187 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9188   predicate(needs_acquiring_load_exclusive(n));
9189   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
9190   ins_cost(VOLATILE_REF_COST);
9191   effect(TEMP_DEF res, KILL cr);
9192   format %{
9193     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9194   %}
9195   ins_encode %{
9196     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9197                Assembler::xword, /*acquire*/ true, /*release*/ true,
9198                /*weak*/ false, $res$$Register);
9199   %}
9200   ins_pipe(pipe_slow);
9201 %}
9202 
9203 
9204 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9205   predicate(needs_acquiring_load_exclusive(n));
9206   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
9207   ins_cost(VOLATILE_REF_COST);
9208   effect(TEMP_DEF res, KILL cr);
9209   format %{
9210     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9211   %}
9212   ins_encode %{
9213     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9214                Assembler::word, /*acquire*/ true, /*release*/ true,
9215                /*weak*/ false, $res$$Register);
9216   %}
9217   ins_pipe(pipe_slow);
9218 %}
9219 
9220 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9221   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9222   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
9223   ins_cost(VOLATILE_REF_COST);
9224   effect(TEMP_DEF res, KILL cr);
9225   format %{
9226     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9227   %}
9228   ins_encode %{
9229     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9230                Assembler::xword, /*acquire*/ true, /*release*/ true,
9231                /*weak*/ false, $res$$Register);
9232   %}
9233   ins_pipe(pipe_slow);
9234 %}
9235 
9236 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9237   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9238   ins_cost(2 * VOLATILE_REF_COST);
9239   effect(KILL cr);
9240   format %{
9241     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9242     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9243   %}
9244   ins_encode %{
9245     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9246                Assembler::byte, /*acquire*/ false, /*release*/ true,
9247                /*weak*/ true, noreg);
9248     __ csetw($res$$Register, Assembler::EQ);
9249   %}
9250   ins_pipe(pipe_slow);
9251 %}
9252 
9253 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9254   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9255   ins_cost(2 * VOLATILE_REF_COST);
9256   effect(KILL cr);
9257   format %{
9258     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9259     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9260   %}
9261   ins_encode %{
9262     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9263                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9264                /*weak*/ true, noreg);
9265     __ csetw($res$$Register, Assembler::EQ);
9266   %}
9267   ins_pipe(pipe_slow);
9268 %}
9269 
9270 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9271   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9272   ins_cost(2 * VOLATILE_REF_COST);
9273   effect(KILL cr);
9274   format %{
9275     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9276     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9277   %}
9278   ins_encode %{
9279     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9280                Assembler::word, /*acquire*/ false, /*release*/ true,
9281                /*weak*/ true, noreg);
9282     __ csetw($res$$Register, Assembler::EQ);
9283   %}
9284   ins_pipe(pipe_slow);
9285 %}
9286 
9287 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9288   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9289   ins_cost(2 * VOLATILE_REF_COST);
9290   effect(KILL cr);
9291   format %{
9292     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9293     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9294   %}
9295   ins_encode %{
9296     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9297                Assembler::xword, /*acquire*/ false, /*release*/ true,
9298                /*weak*/ true, noreg);
9299     __ csetw($res$$Register, Assembler::EQ);
9300   %}
9301   ins_pipe(pipe_slow);
9302 %}
9303 
9304 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9305   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9306   ins_cost(2 * VOLATILE_REF_COST);
9307   effect(KILL cr);
9308   format %{
9309     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9310     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9311   %}
9312   ins_encode %{
9313     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9314                Assembler::word, /*acquire*/ false, /*release*/ true,
9315                /*weak*/ true, noreg);
9316     __ csetw($res$$Register, Assembler::EQ);
9317   %}
9318   ins_pipe(pipe_slow);
9319 %}
9320 
9321 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9322   predicate(n->as_LoadStore()->barrier_data() == 0);
9323   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9324   ins_cost(2 * VOLATILE_REF_COST);
9325   effect(KILL cr);
9326   format %{
9327     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9328     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9329   %}
9330   ins_encode %{
9331     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9332                Assembler::xword, /*acquire*/ false, /*release*/ true,
9333                /*weak*/ true, noreg);
9334     __ csetw($res$$Register, Assembler::EQ);
9335   %}
9336   ins_pipe(pipe_slow);
9337 %}
9338 
9339 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9340   predicate(needs_acquiring_load_exclusive(n));
9341   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9342   ins_cost(VOLATILE_REF_COST);
9343   effect(KILL cr);
9344   format %{
9345     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9346     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9347   %}
9348   ins_encode %{
9349     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9350                Assembler::byte, /*acquire*/ true, /*release*/ true,
9351                /*weak*/ true, noreg);
9352     __ csetw($res$$Register, Assembler::EQ);
9353   %}
9354   ins_pipe(pipe_slow);
9355 %}
9356 
9357 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9358   predicate(needs_acquiring_load_exclusive(n));
9359   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9360   ins_cost(VOLATILE_REF_COST);
9361   effect(KILL cr);
9362   format %{
9363     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9364     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9365   %}
9366   ins_encode %{
9367     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9368                Assembler::halfword, /*acquire*/ true, /*release*/ true,
9369                /*weak*/ true, noreg);
9370     __ csetw($res$$Register, Assembler::EQ);
9371   %}
9372   ins_pipe(pipe_slow);
9373 %}
9374 
9375 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9376   predicate(needs_acquiring_load_exclusive(n));
9377   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9378   ins_cost(VOLATILE_REF_COST);
9379   effect(KILL cr);
9380   format %{
9381     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9382     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9383   %}
9384   ins_encode %{
9385     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9386                Assembler::word, /*acquire*/ true, /*release*/ true,
9387                /*weak*/ true, noreg);
9388     __ csetw($res$$Register, Assembler::EQ);
9389   %}
9390   ins_pipe(pipe_slow);
9391 %}
9392 
9393 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9394   predicate(needs_acquiring_load_exclusive(n));
9395   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9396   ins_cost(VOLATILE_REF_COST);
9397   effect(KILL cr);
9398   format %{
9399     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9400     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9401   %}
9402   ins_encode %{
9403     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9404                Assembler::xword, /*acquire*/ true, /*release*/ true,
9405                /*weak*/ true, noreg);
9406     __ csetw($res$$Register, Assembler::EQ);
9407   %}
9408   ins_pipe(pipe_slow);
9409 %}
9410 
9411 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9412   predicate(needs_acquiring_load_exclusive(n));
9413   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9414   ins_cost(VOLATILE_REF_COST);
9415   effect(KILL cr);
9416   format %{
9417     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9418     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9419   %}
9420   ins_encode %{
9421     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9422                Assembler::word, /*acquire*/ true, /*release*/ true,
9423                /*weak*/ true, noreg);
9424     __ csetw($res$$Register, Assembler::EQ);
9425   %}
9426   ins_pipe(pipe_slow);
9427 %}
9428 
9429 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9430   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9431   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9432   ins_cost(VOLATILE_REF_COST);
9433   effect(KILL cr);
9434   format %{
9435     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9436     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9437   %}
9438   ins_encode %{
9439     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9440                Assembler::xword, /*acquire*/ true, /*release*/ true,
9441                /*weak*/ true, noreg);
9442     __ csetw($res$$Register, Assembler::EQ);
9443   %}
9444   ins_pipe(pipe_slow);
9445 %}
9446 
9447 // END This section of the file is automatically generated. Do not edit --------------
9448 // ---------------------------------------------------------------------
9449 
9450 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9451   match(Set prev (GetAndSetI mem newv));
9452   ins_cost(2 * VOLATILE_REF_COST);
9453   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9454   ins_encode %{
9455     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9456   %}
9457   ins_pipe(pipe_serial);
9458 %}
9459 
9460 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
9461   match(Set prev (GetAndSetL mem newv));
9462   ins_cost(2 * VOLATILE_REF_COST);
9463   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9464   ins_encode %{
9465     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9466   %}
9467   ins_pipe(pipe_serial);
9468 %}
9469 
9470 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
9471   match(Set prev (GetAndSetN mem newv));
9472   ins_cost(2 * VOLATILE_REF_COST);
9473   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9474   ins_encode %{
9475     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9476   %}
9477   ins_pipe(pipe_serial);
9478 %}
9479 
9480 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
9481   predicate(n->as_LoadStore()->barrier_data() == 0);
9482   match(Set prev (GetAndSetP mem newv));
9483   ins_cost(2 * VOLATILE_REF_COST);
9484   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9485   ins_encode %{
9486     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9487   %}
9488   ins_pipe(pipe_serial);
9489 %}
9490 
9491 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
9492   predicate(needs_acquiring_load_exclusive(n));
9493   match(Set prev (GetAndSetI mem newv));
9494   ins_cost(VOLATILE_REF_COST);
9495   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
9496   ins_encode %{
9497     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9498   %}
9499   ins_pipe(pipe_serial);
9500 %}
9501 
9502 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
9503   predicate(needs_acquiring_load_exclusive(n));
9504   match(Set prev (GetAndSetL mem newv));
9505   ins_cost(VOLATILE_REF_COST);
9506   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9507   ins_encode %{
9508     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9509   %}
9510   ins_pipe(pipe_serial);
9511 %}
9512 
9513 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
9514   predicate(needs_acquiring_load_exclusive(n));
9515   match(Set prev (GetAndSetN mem newv));
9516   ins_cost(VOLATILE_REF_COST);
9517   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9518   ins_encode %{
9519     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9520   %}
9521   ins_pipe(pipe_serial);
9522 %}
9523 
9524 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
9525   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9526   match(Set prev (GetAndSetP mem newv));
9527   ins_cost(VOLATILE_REF_COST);
9528   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9529   ins_encode %{
9530     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9531   %}
9532   ins_pipe(pipe_serial);
9533 %}
9534 
9535 
9536 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9537   match(Set newval (GetAndAddL mem incr));
9538   ins_cost(2 * VOLATILE_REF_COST + 1);
9539   format %{ "get_and_addL $newval, [$mem], $incr" %}
9540   ins_encode %{
9541     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9542   %}
9543   ins_pipe(pipe_serial);
9544 %}
9545 
9546 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9547   predicate(n->as_LoadStore()->result_not_used());
9548   match(Set dummy (GetAndAddL mem incr));
9549   ins_cost(2 * VOLATILE_REF_COST);
9550   format %{ "get_and_addL [$mem], $incr" %}
9551   ins_encode %{
9552     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9553   %}
9554   ins_pipe(pipe_serial);
9555 %}
9556 
9557 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9558   match(Set newval (GetAndAddL mem incr));
9559   ins_cost(2 * VOLATILE_REF_COST + 1);
9560   format %{ "get_and_addL $newval, [$mem], $incr" %}
9561   ins_encode %{
9562     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9563   %}
9564   ins_pipe(pipe_serial);
9565 %}
9566 
9567 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9568   predicate(n->as_LoadStore()->result_not_used());
9569   match(Set dummy (GetAndAddL mem incr));
9570   ins_cost(2 * VOLATILE_REF_COST);
9571   format %{ "get_and_addL [$mem], $incr" %}
9572   ins_encode %{
9573     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9574   %}
9575   ins_pipe(pipe_serial);
9576 %}
9577 
9578 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9579   match(Set newval (GetAndAddI mem incr));
9580   ins_cost(2 * VOLATILE_REF_COST + 1);
9581   format %{ "get_and_addI $newval, [$mem], $incr" %}
9582   ins_encode %{
9583     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9584   %}
9585   ins_pipe(pipe_serial);
9586 %}
9587 
9588 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9589   predicate(n->as_LoadStore()->result_not_used());
9590   match(Set dummy (GetAndAddI mem incr));
9591   ins_cost(2 * VOLATILE_REF_COST);
9592   format %{ "get_and_addI [$mem], $incr" %}
9593   ins_encode %{
9594     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9595   %}
9596   ins_pipe(pipe_serial);
9597 %}
9598 
9599 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9600   match(Set newval (GetAndAddI mem incr));
9601   ins_cost(2 * VOLATILE_REF_COST + 1);
9602   format %{ "get_and_addI $newval, [$mem], $incr" %}
9603   ins_encode %{
9604     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9605   %}
9606   ins_pipe(pipe_serial);
9607 %}
9608 
9609 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9610   predicate(n->as_LoadStore()->result_not_used());
9611   match(Set dummy (GetAndAddI mem incr));
9612   ins_cost(2 * VOLATILE_REF_COST);
9613   format %{ "get_and_addI [$mem], $incr" %}
9614   ins_encode %{
9615     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9616   %}
9617   ins_pipe(pipe_serial);
9618 %}
9619 
9620 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
9621   predicate(needs_acquiring_load_exclusive(n));
9622   match(Set newval (GetAndAddL mem incr));
9623   ins_cost(VOLATILE_REF_COST + 1);
9624   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9625   ins_encode %{
9626     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
9627   %}
9628   ins_pipe(pipe_serial);
9629 %}
9630 
9631 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
9632   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9633   match(Set dummy (GetAndAddL mem incr));
9634   ins_cost(VOLATILE_REF_COST);
9635   format %{ "get_and_addL_acq [$mem], $incr" %}
9636   ins_encode %{
9637     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
9638   %}
9639   ins_pipe(pipe_serial);
9640 %}
9641 
9642 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9643   predicate(needs_acquiring_load_exclusive(n));
9644   match(Set newval (GetAndAddL mem incr));
9645   ins_cost(VOLATILE_REF_COST + 1);
9646   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9647   ins_encode %{
9648     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9649   %}
9650   ins_pipe(pipe_serial);
9651 %}
9652 
9653 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9654   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9655   match(Set dummy (GetAndAddL mem incr));
9656   ins_cost(VOLATILE_REF_COST);
9657   format %{ "get_and_addL_acq [$mem], $incr" %}
9658   ins_encode %{
9659     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9660   %}
9661   ins_pipe(pipe_serial);
9662 %}
9663 
9664 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9665   predicate(needs_acquiring_load_exclusive(n));
9666   match(Set newval (GetAndAddI mem incr));
9667   ins_cost(VOLATILE_REF_COST + 1);
9668   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9669   ins_encode %{
9670     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9671   %}
9672   ins_pipe(pipe_serial);
9673 %}
9674 
9675 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9676   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9677   match(Set dummy (GetAndAddI mem incr));
9678   ins_cost(VOLATILE_REF_COST);
9679   format %{ "get_and_addI_acq [$mem], $incr" %}
9680   ins_encode %{
9681     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9682   %}
9683   ins_pipe(pipe_serial);
9684 %}
9685 
9686 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9687   predicate(needs_acquiring_load_exclusive(n));
9688   match(Set newval (GetAndAddI mem incr));
9689   ins_cost(VOLATILE_REF_COST + 1);
9690   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9691   ins_encode %{
9692     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9693   %}
9694   ins_pipe(pipe_serial);
9695 %}
9696 
9697 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9698   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9699   match(Set dummy (GetAndAddI mem incr));
9700   ins_cost(VOLATILE_REF_COST);
9701   format %{ "get_and_addI_acq [$mem], $incr" %}
9702   ins_encode %{
9703     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9704   %}
9705   ins_pipe(pipe_serial);
9706 %}
9707 
9708 // Manifest a CmpL result in an integer register.
9709 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9710 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9711 %{
9712   match(Set dst (CmpL3 src1 src2));
9713   effect(KILL flags);
9714 
9715   ins_cost(INSN_COST * 6);
9716   format %{
9717       "cmp $src1, $src2"
9718       "csetw $dst, ne"
9719       "cnegw $dst, lt"
9720   %}
9721   // format %{ "CmpL3 $dst, $src1, $src2" %}
9722   ins_encode %{
9723     __ cmp($src1$$Register, $src2$$Register);
9724     __ csetw($dst$$Register, Assembler::NE);
9725     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9726   %}
9727 
9728   ins_pipe(pipe_class_default);
9729 %}
9730 
9731 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9732 %{
9733   match(Set dst (CmpL3 src1 src2));
9734   effect(KILL flags);
9735 
9736   ins_cost(INSN_COST * 6);
9737   format %{
9738       "cmp $src1, $src2"
9739       "csetw $dst, ne"
9740       "cnegw $dst, lt"
9741   %}
9742   ins_encode %{
9743     int32_t con = (int32_t)$src2$$constant;
9744      if (con < 0) {
9745       __ adds(zr, $src1$$Register, -con);
9746     } else {
9747       __ subs(zr, $src1$$Register, con);
9748     }
9749     __ csetw($dst$$Register, Assembler::NE);
9750     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9751   %}
9752 
9753   ins_pipe(pipe_class_default);
9754 %}
9755 
9756 // ============================================================================
9757 // Conditional Move Instructions
9758 
9759 // n.b. we have identical rules for both a signed compare op (cmpOp)
9760 // and an unsigned compare op (cmpOpU). it would be nice if we could
9761 // define an op class which merged both inputs and use it to type the
9762 // argument to a single rule. unfortunatelyt his fails because the
9763 // opclass does not live up to the COND_INTER interface of its
9764 // component operands. When the generic code tries to negate the
9765 // operand it ends up running the generci Machoper::negate method
9766 // which throws a ShouldNotHappen. So, we have to provide two flavours
9767 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9768 
9769 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9770   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9771 
9772   ins_cost(INSN_COST * 2);
9773   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9774 
9775   ins_encode %{
9776     __ cselw(as_Register($dst$$reg),
9777              as_Register($src2$$reg),
9778              as_Register($src1$$reg),
9779              (Assembler::Condition)$cmp$$cmpcode);
9780   %}
9781 
9782   ins_pipe(icond_reg_reg);
9783 %}
9784 
9785 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9786   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9787 
9788   ins_cost(INSN_COST * 2);
9789   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9790 
9791   ins_encode %{
9792     __ cselw(as_Register($dst$$reg),
9793              as_Register($src2$$reg),
9794              as_Register($src1$$reg),
9795              (Assembler::Condition)$cmp$$cmpcode);
9796   %}
9797 
9798   ins_pipe(icond_reg_reg);
9799 %}
9800 
9801 // special cases where one arg is zero
9802 
9803 // n.b. this is selected in preference to the rule above because it
9804 // avoids loading constant 0 into a source register
9805 
9806 // TODO
9807 // we ought only to be able to cull one of these variants as the ideal
9808 // transforms ought always to order the zero consistently (to left/right?)
9809 
9810 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9811   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9812 
9813   ins_cost(INSN_COST * 2);
9814   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9815 
9816   ins_encode %{
9817     __ cselw(as_Register($dst$$reg),
9818              as_Register($src$$reg),
9819              zr,
9820              (Assembler::Condition)$cmp$$cmpcode);
9821   %}
9822 
9823   ins_pipe(icond_reg);
9824 %}
9825 
9826 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9827   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9828 
9829   ins_cost(INSN_COST * 2);
9830   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9831 
9832   ins_encode %{
9833     __ cselw(as_Register($dst$$reg),
9834              as_Register($src$$reg),
9835              zr,
9836              (Assembler::Condition)$cmp$$cmpcode);
9837   %}
9838 
9839   ins_pipe(icond_reg);
9840 %}
9841 
9842 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9843   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9844 
9845   ins_cost(INSN_COST * 2);
9846   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9847 
9848   ins_encode %{
9849     __ cselw(as_Register($dst$$reg),
9850              zr,
9851              as_Register($src$$reg),
9852              (Assembler::Condition)$cmp$$cmpcode);
9853   %}
9854 
9855   ins_pipe(icond_reg);
9856 %}
9857 
9858 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9859   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9860 
9861   ins_cost(INSN_COST * 2);
9862   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9863 
9864   ins_encode %{
9865     __ cselw(as_Register($dst$$reg),
9866              zr,
9867              as_Register($src$$reg),
9868              (Assembler::Condition)$cmp$$cmpcode);
9869   %}
9870 
9871   ins_pipe(icond_reg);
9872 %}
9873 
9874 // special case for creating a boolean 0 or 1
9875 
9876 // n.b. this is selected in preference to the rule above because it
9877 // avoids loading constants 0 and 1 into a source register
9878 
9879 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9880   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9881 
9882   ins_cost(INSN_COST * 2);
9883   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9884 
9885   ins_encode %{
9886     // equivalently
9887     // cset(as_Register($dst$$reg),
9888     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9889     __ csincw(as_Register($dst$$reg),
9890              zr,
9891              zr,
9892              (Assembler::Condition)$cmp$$cmpcode);
9893   %}
9894 
9895   ins_pipe(icond_none);
9896 %}
9897 
9898 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9899   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9900 
9901   ins_cost(INSN_COST * 2);
9902   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9903 
9904   ins_encode %{
9905     // equivalently
9906     // cset(as_Register($dst$$reg),
9907     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9908     __ csincw(as_Register($dst$$reg),
9909              zr,
9910              zr,
9911              (Assembler::Condition)$cmp$$cmpcode);
9912   %}
9913 
9914   ins_pipe(icond_none);
9915 %}
9916 
9917 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9918   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9919 
9920   ins_cost(INSN_COST * 2);
9921   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9922 
9923   ins_encode %{
9924     __ csel(as_Register($dst$$reg),
9925             as_Register($src2$$reg),
9926             as_Register($src1$$reg),
9927             (Assembler::Condition)$cmp$$cmpcode);
9928   %}
9929 
9930   ins_pipe(icond_reg_reg);
9931 %}
9932 
9933 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9934   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9935 
9936   ins_cost(INSN_COST * 2);
9937   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9938 
9939   ins_encode %{
9940     __ csel(as_Register($dst$$reg),
9941             as_Register($src2$$reg),
9942             as_Register($src1$$reg),
9943             (Assembler::Condition)$cmp$$cmpcode);
9944   %}
9945 
9946   ins_pipe(icond_reg_reg);
9947 %}
9948 
9949 // special cases where one arg is zero
9950 
9951 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9952   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9953 
9954   ins_cost(INSN_COST * 2);
9955   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9956 
9957   ins_encode %{
9958     __ csel(as_Register($dst$$reg),
9959             zr,
9960             as_Register($src$$reg),
9961             (Assembler::Condition)$cmp$$cmpcode);
9962   %}
9963 
9964   ins_pipe(icond_reg);
9965 %}
9966 
9967 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9968   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9969 
9970   ins_cost(INSN_COST * 2);
9971   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9972 
9973   ins_encode %{
9974     __ csel(as_Register($dst$$reg),
9975             zr,
9976             as_Register($src$$reg),
9977             (Assembler::Condition)$cmp$$cmpcode);
9978   %}
9979 
9980   ins_pipe(icond_reg);
9981 %}
9982 
9983 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9984   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9985 
9986   ins_cost(INSN_COST * 2);
9987   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9988 
9989   ins_encode %{
9990     __ csel(as_Register($dst$$reg),
9991             as_Register($src$$reg),
9992             zr,
9993             (Assembler::Condition)$cmp$$cmpcode);
9994   %}
9995 
9996   ins_pipe(icond_reg);
9997 %}
9998 
9999 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10000   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10001 
10002   ins_cost(INSN_COST * 2);
10003   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10004 
10005   ins_encode %{
10006     __ csel(as_Register($dst$$reg),
10007             as_Register($src$$reg),
10008             zr,
10009             (Assembler::Condition)$cmp$$cmpcode);
10010   %}
10011 
10012   ins_pipe(icond_reg);
10013 %}
10014 
10015 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10016   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10017 
10018   ins_cost(INSN_COST * 2);
10019   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10020 
10021   ins_encode %{
10022     __ csel(as_Register($dst$$reg),
10023             as_Register($src2$$reg),
10024             as_Register($src1$$reg),
10025             (Assembler::Condition)$cmp$$cmpcode);
10026   %}
10027 
10028   ins_pipe(icond_reg_reg);
10029 %}
10030 
10031 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10032   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10033 
10034   ins_cost(INSN_COST * 2);
10035   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10036 
10037   ins_encode %{
10038     __ csel(as_Register($dst$$reg),
10039             as_Register($src2$$reg),
10040             as_Register($src1$$reg),
10041             (Assembler::Condition)$cmp$$cmpcode);
10042   %}
10043 
10044   ins_pipe(icond_reg_reg);
10045 %}
10046 
10047 // special cases where one arg is zero
10048 
10049 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10050   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10051 
10052   ins_cost(INSN_COST * 2);
10053   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10054 
10055   ins_encode %{
10056     __ csel(as_Register($dst$$reg),
10057             zr,
10058             as_Register($src$$reg),
10059             (Assembler::Condition)$cmp$$cmpcode);
10060   %}
10061 
10062   ins_pipe(icond_reg);
10063 %}
10064 
10065 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10066   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10067 
10068   ins_cost(INSN_COST * 2);
10069   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10070 
10071   ins_encode %{
10072     __ csel(as_Register($dst$$reg),
10073             zr,
10074             as_Register($src$$reg),
10075             (Assembler::Condition)$cmp$$cmpcode);
10076   %}
10077 
10078   ins_pipe(icond_reg);
10079 %}
10080 
10081 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10082   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10083 
10084   ins_cost(INSN_COST * 2);
10085   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10086 
10087   ins_encode %{
10088     __ csel(as_Register($dst$$reg),
10089             as_Register($src$$reg),
10090             zr,
10091             (Assembler::Condition)$cmp$$cmpcode);
10092   %}
10093 
10094   ins_pipe(icond_reg);
10095 %}
10096 
10097 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10098   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10099 
10100   ins_cost(INSN_COST * 2);
10101   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10102 
10103   ins_encode %{
10104     __ csel(as_Register($dst$$reg),
10105             as_Register($src$$reg),
10106             zr,
10107             (Assembler::Condition)$cmp$$cmpcode);
10108   %}
10109 
10110   ins_pipe(icond_reg);
10111 %}
10112 
10113 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10114   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10115 
10116   ins_cost(INSN_COST * 2);
10117   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10118 
10119   ins_encode %{
10120     __ cselw(as_Register($dst$$reg),
10121              as_Register($src2$$reg),
10122              as_Register($src1$$reg),
10123              (Assembler::Condition)$cmp$$cmpcode);
10124   %}
10125 
10126   ins_pipe(icond_reg_reg);
10127 %}
10128 
10129 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10130   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10131 
10132   ins_cost(INSN_COST * 2);
10133   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10134 
10135   ins_encode %{
10136     __ cselw(as_Register($dst$$reg),
10137              as_Register($src2$$reg),
10138              as_Register($src1$$reg),
10139              (Assembler::Condition)$cmp$$cmpcode);
10140   %}
10141 
10142   ins_pipe(icond_reg_reg);
10143 %}
10144 
10145 // special cases where one arg is zero
10146 
10147 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10148   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10149 
10150   ins_cost(INSN_COST * 2);
10151   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10152 
10153   ins_encode %{
10154     __ cselw(as_Register($dst$$reg),
10155              zr,
10156              as_Register($src$$reg),
10157              (Assembler::Condition)$cmp$$cmpcode);
10158   %}
10159 
10160   ins_pipe(icond_reg);
10161 %}
10162 
10163 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10164   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10165 
10166   ins_cost(INSN_COST * 2);
10167   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10168 
10169   ins_encode %{
10170     __ cselw(as_Register($dst$$reg),
10171              zr,
10172              as_Register($src$$reg),
10173              (Assembler::Condition)$cmp$$cmpcode);
10174   %}
10175 
10176   ins_pipe(icond_reg);
10177 %}
10178 
10179 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10180   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10181 
10182   ins_cost(INSN_COST * 2);
10183   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10184 
10185   ins_encode %{
10186     __ cselw(as_Register($dst$$reg),
10187              as_Register($src$$reg),
10188              zr,
10189              (Assembler::Condition)$cmp$$cmpcode);
10190   %}
10191 
10192   ins_pipe(icond_reg);
10193 %}
10194 
10195 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10196   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10197 
10198   ins_cost(INSN_COST * 2);
10199   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10200 
10201   ins_encode %{
10202     __ cselw(as_Register($dst$$reg),
10203              as_Register($src$$reg),
10204              zr,
10205              (Assembler::Condition)$cmp$$cmpcode);
10206   %}
10207 
10208   ins_pipe(icond_reg);
10209 %}
10210 
10211 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10212 %{
10213   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10214 
10215   ins_cost(INSN_COST * 3);
10216 
10217   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10218   ins_encode %{
10219     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10220     __ fcsels(as_FloatRegister($dst$$reg),
10221               as_FloatRegister($src2$$reg),
10222               as_FloatRegister($src1$$reg),
10223               cond);
10224   %}
10225 
10226   ins_pipe(fp_cond_reg_reg_s);
10227 %}
10228 
10229 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10230 %{
10231   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10232 
10233   ins_cost(INSN_COST * 3);
10234 
10235   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10236   ins_encode %{
10237     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10238     __ fcsels(as_FloatRegister($dst$$reg),
10239               as_FloatRegister($src2$$reg),
10240               as_FloatRegister($src1$$reg),
10241               cond);
10242   %}
10243 
10244   ins_pipe(fp_cond_reg_reg_s);
10245 %}
10246 
10247 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10248 %{
10249   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10250 
10251   ins_cost(INSN_COST * 3);
10252 
10253   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10254   ins_encode %{
10255     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10256     __ fcseld(as_FloatRegister($dst$$reg),
10257               as_FloatRegister($src2$$reg),
10258               as_FloatRegister($src1$$reg),
10259               cond);
10260   %}
10261 
10262   ins_pipe(fp_cond_reg_reg_d);
10263 %}
10264 
10265 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10266 %{
10267   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10268 
10269   ins_cost(INSN_COST * 3);
10270 
10271   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10272   ins_encode %{
10273     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10274     __ fcseld(as_FloatRegister($dst$$reg),
10275               as_FloatRegister($src2$$reg),
10276               as_FloatRegister($src1$$reg),
10277               cond);
10278   %}
10279 
10280   ins_pipe(fp_cond_reg_reg_d);
10281 %}
10282 
10283 // ============================================================================
10284 // Arithmetic Instructions
10285 //
10286 
10287 // Integer Addition
10288 
10289 // TODO
10290 // these currently employ operations which do not set CR and hence are
10291 // not flagged as killing CR but we would like to isolate the cases
10292 // where we want to set flags from those where we don't. need to work
10293 // out how to do that.
10294 
10295 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10296   match(Set dst (AddI src1 src2));
10297 
10298   ins_cost(INSN_COST);
10299   format %{ "addw  $dst, $src1, $src2" %}
10300 
10301   ins_encode %{
10302     __ addw(as_Register($dst$$reg),
10303             as_Register($src1$$reg),
10304             as_Register($src2$$reg));
10305   %}
10306 
10307   ins_pipe(ialu_reg_reg);
10308 %}
10309 
10310 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10311   match(Set dst (AddI src1 src2));
10312 
10313   ins_cost(INSN_COST);
10314   format %{ "addw $dst, $src1, $src2" %}
10315 
10316   // use opcode to indicate that this is an add not a sub
10317   opcode(0x0);
10318 
10319   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10320 
10321   ins_pipe(ialu_reg_imm);
10322 %}
10323 
10324 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10325   match(Set dst (AddI (ConvL2I src1) src2));
10326 
10327   ins_cost(INSN_COST);
10328   format %{ "addw $dst, $src1, $src2" %}
10329 
10330   // use opcode to indicate that this is an add not a sub
10331   opcode(0x0);
10332 
10333   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10334 
10335   ins_pipe(ialu_reg_imm);
10336 %}
10337 
10338 // Pointer Addition
10339 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10340   match(Set dst (AddP src1 src2));
10341 
10342   ins_cost(INSN_COST);
10343   format %{ "add $dst, $src1, $src2\t# ptr" %}
10344 
10345   ins_encode %{
10346     __ add(as_Register($dst$$reg),
10347            as_Register($src1$$reg),
10348            as_Register($src2$$reg));
10349   %}
10350 
10351   ins_pipe(ialu_reg_reg);
10352 %}
10353 
10354 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10355   match(Set dst (AddP src1 (ConvI2L src2)));
10356 
10357   ins_cost(1.9 * INSN_COST);
10358   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10359 
10360   ins_encode %{
10361     __ add(as_Register($dst$$reg),
10362            as_Register($src1$$reg),
10363            as_Register($src2$$reg), ext::sxtw);
10364   %}
10365 
10366   ins_pipe(ialu_reg_reg);
10367 %}
10368 
10369 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10370   match(Set dst (AddP src1 (LShiftL src2 scale)));
10371 
10372   ins_cost(1.9 * INSN_COST);
10373   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10374 
10375   ins_encode %{
10376     __ lea(as_Register($dst$$reg),
10377            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10378                    Address::lsl($scale$$constant)));
10379   %}
10380 
10381   ins_pipe(ialu_reg_reg_shift);
10382 %}
10383 
10384 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10385   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10386 
10387   ins_cost(1.9 * INSN_COST);
10388   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10389 
10390   ins_encode %{
10391     __ lea(as_Register($dst$$reg),
10392            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10393                    Address::sxtw($scale$$constant)));
10394   %}
10395 
10396   ins_pipe(ialu_reg_reg_shift);
10397 %}
10398 
10399 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10400   match(Set dst (LShiftL (ConvI2L src) scale));
10401 
10402   ins_cost(INSN_COST);
10403   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10404 
10405   ins_encode %{
10406     __ sbfiz(as_Register($dst$$reg),
10407           as_Register($src$$reg),
10408           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10409   %}
10410 
10411   ins_pipe(ialu_reg_shift);
10412 %}
10413 
10414 // Pointer Immediate Addition
10415 // n.b. this needs to be more expensive than using an indirect memory
10416 // operand
10417 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10418   match(Set dst (AddP src1 src2));
10419 
10420   ins_cost(INSN_COST);
10421   format %{ "add $dst, $src1, $src2\t# ptr" %}
10422 
10423   // use opcode to indicate that this is an add not a sub
10424   opcode(0x0);
10425 
10426   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10427 
10428   ins_pipe(ialu_reg_imm);
10429 %}
10430 
10431 // Long Addition
10432 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10433 
10434   match(Set dst (AddL src1 src2));
10435 
10436   ins_cost(INSN_COST);
10437   format %{ "add  $dst, $src1, $src2" %}
10438 
10439   ins_encode %{
10440     __ add(as_Register($dst$$reg),
10441            as_Register($src1$$reg),
10442            as_Register($src2$$reg));
10443   %}
10444 
10445   ins_pipe(ialu_reg_reg);
10446 %}
10447 
10448 // No constant pool entries requiredLong Immediate Addition.
10449 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10450   match(Set dst (AddL src1 src2));
10451 
10452   ins_cost(INSN_COST);
10453   format %{ "add $dst, $src1, $src2" %}
10454 
10455   // use opcode to indicate that this is an add not a sub
10456   opcode(0x0);
10457 
10458   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10459 
10460   ins_pipe(ialu_reg_imm);
10461 %}
10462 
10463 // Integer Subtraction
10464 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10465   match(Set dst (SubI src1 src2));
10466 
10467   ins_cost(INSN_COST);
10468   format %{ "subw  $dst, $src1, $src2" %}
10469 
10470   ins_encode %{
10471     __ subw(as_Register($dst$$reg),
10472             as_Register($src1$$reg),
10473             as_Register($src2$$reg));
10474   %}
10475 
10476   ins_pipe(ialu_reg_reg);
10477 %}
10478 
10479 // Immediate Subtraction
10480 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10481   match(Set dst (SubI src1 src2));
10482 
10483   ins_cost(INSN_COST);
10484   format %{ "subw $dst, $src1, $src2" %}
10485 
10486   // use opcode to indicate that this is a sub not an add
10487   opcode(0x1);
10488 
10489   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10490 
10491   ins_pipe(ialu_reg_imm);
10492 %}
10493 
10494 // Long Subtraction
10495 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10496 
10497   match(Set dst (SubL src1 src2));
10498 
10499   ins_cost(INSN_COST);
10500   format %{ "sub  $dst, $src1, $src2" %}
10501 
10502   ins_encode %{
10503     __ sub(as_Register($dst$$reg),
10504            as_Register($src1$$reg),
10505            as_Register($src2$$reg));
10506   %}
10507 
10508   ins_pipe(ialu_reg_reg);
10509 %}
10510 
10511 // No constant pool entries requiredLong Immediate Subtraction.
10512 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10513   match(Set dst (SubL src1 src2));
10514 
10515   ins_cost(INSN_COST);
10516   format %{ "sub$dst, $src1, $src2" %}
10517 
10518   // use opcode to indicate that this is a sub not an add
10519   opcode(0x1);
10520 
10521   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10522 
10523   ins_pipe(ialu_reg_imm);
10524 %}
10525 
10526 // Integer Negation (special case for sub)
10527 
10528 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10529   match(Set dst (SubI zero src));
10530 
10531   ins_cost(INSN_COST);
10532   format %{ "negw $dst, $src\t# int" %}
10533 
10534   ins_encode %{
10535     __ negw(as_Register($dst$$reg),
10536             as_Register($src$$reg));
10537   %}
10538 
10539   ins_pipe(ialu_reg);
10540 %}
10541 
10542 // Long Negation
10543 
10544 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10545   match(Set dst (SubL zero src));
10546 
10547   ins_cost(INSN_COST);
10548   format %{ "neg $dst, $src\t# long" %}
10549 
10550   ins_encode %{
10551     __ neg(as_Register($dst$$reg),
10552            as_Register($src$$reg));
10553   %}
10554 
10555   ins_pipe(ialu_reg);
10556 %}
10557 
10558 // Integer Multiply
10559 
10560 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10561   match(Set dst (MulI src1 src2));
10562 
10563   ins_cost(INSN_COST * 3);
10564   format %{ "mulw  $dst, $src1, $src2" %}
10565 
10566   ins_encode %{
10567     __ mulw(as_Register($dst$$reg),
10568             as_Register($src1$$reg),
10569             as_Register($src2$$reg));
10570   %}
10571 
10572   ins_pipe(imul_reg_reg);
10573 %}
10574 
10575 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10576   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10577 
10578   ins_cost(INSN_COST * 3);
10579   format %{ "smull  $dst, $src1, $src2" %}
10580 
10581   ins_encode %{
10582     __ smull(as_Register($dst$$reg),
10583              as_Register($src1$$reg),
10584              as_Register($src2$$reg));
10585   %}
10586 
10587   ins_pipe(imul_reg_reg);
10588 %}
10589 
10590 // Long Multiply
10591 
10592 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10593   match(Set dst (MulL src1 src2));
10594 
10595   ins_cost(INSN_COST * 5);
10596   format %{ "mul  $dst, $src1, $src2" %}
10597 
10598   ins_encode %{
10599     __ mul(as_Register($dst$$reg),
10600            as_Register($src1$$reg),
10601            as_Register($src2$$reg));
10602   %}
10603 
10604   ins_pipe(lmul_reg_reg);
10605 %}
10606 
10607 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10608 %{
10609   match(Set dst (MulHiL src1 src2));
10610 
10611   ins_cost(INSN_COST * 7);
10612   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10613 
10614   ins_encode %{
10615     __ smulh(as_Register($dst$$reg),
10616              as_Register($src1$$reg),
10617              as_Register($src2$$reg));
10618   %}
10619 
10620   ins_pipe(lmul_reg_reg);
10621 %}
10622 
10623 // Combined Integer Multiply & Add/Sub
10624 
10625 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10626   match(Set dst (AddI src3 (MulI src1 src2)));
10627 
10628   ins_cost(INSN_COST * 3);
10629   format %{ "madd  $dst, $src1, $src2, $src3" %}
10630 
10631   ins_encode %{
10632     __ maddw(as_Register($dst$$reg),
10633              as_Register($src1$$reg),
10634              as_Register($src2$$reg),
10635              as_Register($src3$$reg));
10636   %}
10637 
10638   ins_pipe(imac_reg_reg);
10639 %}
10640 
10641 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10642   match(Set dst (SubI src3 (MulI src1 src2)));
10643 
10644   ins_cost(INSN_COST * 3);
10645   format %{ "msub  $dst, $src1, $src2, $src3" %}
10646 
10647   ins_encode %{
10648     __ msubw(as_Register($dst$$reg),
10649              as_Register($src1$$reg),
10650              as_Register($src2$$reg),
10651              as_Register($src3$$reg));
10652   %}
10653 
10654   ins_pipe(imac_reg_reg);
10655 %}
10656 
10657 // Combined Integer Multiply & Neg
10658 
10659 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10660   match(Set dst (MulI (SubI zero src1) src2));
10661   match(Set dst (MulI src1 (SubI zero src2)));
10662 
10663   ins_cost(INSN_COST * 3);
10664   format %{ "mneg  $dst, $src1, $src2" %}
10665 
10666   ins_encode %{
10667     __ mnegw(as_Register($dst$$reg),
10668              as_Register($src1$$reg),
10669              as_Register($src2$$reg));
10670   %}
10671 
10672   ins_pipe(imac_reg_reg);
10673 %}
10674 
10675 // Combined Long Multiply & Add/Sub
10676 
10677 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10678   match(Set dst (AddL src3 (MulL src1 src2)));
10679 
10680   ins_cost(INSN_COST * 5);
10681   format %{ "madd  $dst, $src1, $src2, $src3" %}
10682 
10683   ins_encode %{
10684     __ madd(as_Register($dst$$reg),
10685             as_Register($src1$$reg),
10686             as_Register($src2$$reg),
10687             as_Register($src3$$reg));
10688   %}
10689 
10690   ins_pipe(lmac_reg_reg);
10691 %}
10692 
10693 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10694   match(Set dst (SubL src3 (MulL src1 src2)));
10695 
10696   ins_cost(INSN_COST * 5);
10697   format %{ "msub  $dst, $src1, $src2, $src3" %}
10698 
10699   ins_encode %{
10700     __ msub(as_Register($dst$$reg),
10701             as_Register($src1$$reg),
10702             as_Register($src2$$reg),
10703             as_Register($src3$$reg));
10704   %}
10705 
10706   ins_pipe(lmac_reg_reg);
10707 %}
10708 
10709 // Combined Long Multiply & Neg
10710 
10711 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10712   match(Set dst (MulL (SubL zero src1) src2));
10713   match(Set dst (MulL src1 (SubL zero src2)));
10714 
10715   ins_cost(INSN_COST * 5);
10716   format %{ "mneg  $dst, $src1, $src2" %}
10717 
10718   ins_encode %{
10719     __ mneg(as_Register($dst$$reg),
10720             as_Register($src1$$reg),
10721             as_Register($src2$$reg));
10722   %}
10723 
10724   ins_pipe(lmac_reg_reg);
10725 %}
10726 
10727 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10728 
10729 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10730   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10731 
10732   ins_cost(INSN_COST * 3);
10733   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10734 
10735   ins_encode %{
10736     __ smaddl(as_Register($dst$$reg),
10737               as_Register($src1$$reg),
10738               as_Register($src2$$reg),
10739               as_Register($src3$$reg));
10740   %}
10741 
10742   ins_pipe(imac_reg_reg);
10743 %}
10744 
10745 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10746   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10747 
10748   ins_cost(INSN_COST * 3);
10749   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10750 
10751   ins_encode %{
10752     __ smsubl(as_Register($dst$$reg),
10753               as_Register($src1$$reg),
10754               as_Register($src2$$reg),
10755               as_Register($src3$$reg));
10756   %}
10757 
10758   ins_pipe(imac_reg_reg);
10759 %}
10760 
10761 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10762   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10763   match(Set dst (MulL (ConvI2L src1) (SubL zero (ConvI2L src2))));
10764 
10765   ins_cost(INSN_COST * 3);
10766   format %{ "smnegl  $dst, $src1, $src2" %}
10767 
10768   ins_encode %{
10769     __ smnegl(as_Register($dst$$reg),
10770               as_Register($src1$$reg),
10771               as_Register($src2$$reg));
10772   %}
10773 
10774   ins_pipe(imac_reg_reg);
10775 %}
10776 
10777 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10778 
10779 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10780   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10781 
10782   ins_cost(INSN_COST * 5);
10783   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10784             "maddw $dst, $src3, $src4, rscratch1" %}
10785 
10786   ins_encode %{
10787     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10788     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10789 
10790   ins_pipe(imac_reg_reg);
10791 %}
10792 
10793 // Integer Divide
10794 
10795 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10796   match(Set dst (DivI src1 src2));
10797 
10798   ins_cost(INSN_COST * 19);
10799   format %{ "sdivw  $dst, $src1, $src2" %}
10800 
10801   ins_encode(aarch64_enc_divw(dst, src1, src2));
10802   ins_pipe(idiv_reg_reg);
10803 %}
10804 
10805 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10806   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10807   ins_cost(INSN_COST);
10808   format %{ "lsrw $dst, $src1, $div1" %}
10809   ins_encode %{
10810     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10811   %}
10812   ins_pipe(ialu_reg_shift);
10813 %}
10814 
10815 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10816   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10817   ins_cost(INSN_COST);
10818   format %{ "addw $dst, $src, LSR $div1" %}
10819 
10820   ins_encode %{
10821     __ addw(as_Register($dst$$reg),
10822               as_Register($src$$reg),
10823               as_Register($src$$reg),
10824               Assembler::LSR, 31);
10825   %}
10826   ins_pipe(ialu_reg);
10827 %}
10828 
10829 // Long Divide
10830 
10831 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10832   match(Set dst (DivL src1 src2));
10833 
10834   ins_cost(INSN_COST * 35);
10835   format %{ "sdiv   $dst, $src1, $src2" %}
10836 
10837   ins_encode(aarch64_enc_div(dst, src1, src2));
10838   ins_pipe(ldiv_reg_reg);
10839 %}
10840 
10841 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10842   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10843   ins_cost(INSN_COST);
10844   format %{ "lsr $dst, $src1, $div1" %}
10845   ins_encode %{
10846     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10847   %}
10848   ins_pipe(ialu_reg_shift);
10849 %}
10850 
10851 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10852   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10853   ins_cost(INSN_COST);
10854   format %{ "add $dst, $src, $div1" %}
10855 
10856   ins_encode %{
10857     __ add(as_Register($dst$$reg),
10858               as_Register($src$$reg),
10859               as_Register($src$$reg),
10860               Assembler::LSR, 63);
10861   %}
10862   ins_pipe(ialu_reg);
10863 %}
10864 
10865 // Integer Remainder
10866 
10867 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10868   match(Set dst (ModI src1 src2));
10869 
10870   ins_cost(INSN_COST * 22);
10871   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10872             "msubw($dst, rscratch1, $src2, $src1" %}
10873 
10874   ins_encode(aarch64_enc_modw(dst, src1, src2));
10875   ins_pipe(idiv_reg_reg);
10876 %}
10877 
10878 // Long Remainder
10879 
10880 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10881   match(Set dst (ModL src1 src2));
10882 
10883   ins_cost(INSN_COST * 38);
10884   format %{ "sdiv   rscratch1, $src1, $src2\n"
10885             "msub($dst, rscratch1, $src2, $src1" %}
10886 
10887   ins_encode(aarch64_enc_mod(dst, src1, src2));
10888   ins_pipe(ldiv_reg_reg);
10889 %}
10890 
10891 // Integer Shifts
10892 
10893 // Shift Left Register
10894 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10895   match(Set dst (LShiftI src1 src2));
10896 
10897   ins_cost(INSN_COST * 2);
10898   format %{ "lslvw  $dst, $src1, $src2" %}
10899 
10900   ins_encode %{
10901     __ lslvw(as_Register($dst$$reg),
10902              as_Register($src1$$reg),
10903              as_Register($src2$$reg));
10904   %}
10905 
10906   ins_pipe(ialu_reg_reg_vshift);
10907 %}
10908 
10909 // Shift Left Immediate
10910 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10911   match(Set dst (LShiftI src1 src2));
10912 
10913   ins_cost(INSN_COST);
10914   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10915 
10916   ins_encode %{
10917     __ lslw(as_Register($dst$$reg),
10918             as_Register($src1$$reg),
10919             $src2$$constant & 0x1f);
10920   %}
10921 
10922   ins_pipe(ialu_reg_shift);
10923 %}
10924 
10925 // Shift Right Logical Register
10926 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10927   match(Set dst (URShiftI src1 src2));
10928 
10929   ins_cost(INSN_COST * 2);
10930   format %{ "lsrvw  $dst, $src1, $src2" %}
10931 
10932   ins_encode %{
10933     __ lsrvw(as_Register($dst$$reg),
10934              as_Register($src1$$reg),
10935              as_Register($src2$$reg));
10936   %}
10937 
10938   ins_pipe(ialu_reg_reg_vshift);
10939 %}
10940 
10941 // Shift Right Logical Immediate
10942 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10943   match(Set dst (URShiftI src1 src2));
10944 
10945   ins_cost(INSN_COST);
10946   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10947 
10948   ins_encode %{
10949     __ lsrw(as_Register($dst$$reg),
10950             as_Register($src1$$reg),
10951             $src2$$constant & 0x1f);
10952   %}
10953 
10954   ins_pipe(ialu_reg_shift);
10955 %}
10956 
10957 // Shift Right Arithmetic Register
10958 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10959   match(Set dst (RShiftI src1 src2));
10960 
10961   ins_cost(INSN_COST * 2);
10962   format %{ "asrvw  $dst, $src1, $src2" %}
10963 
10964   ins_encode %{
10965     __ asrvw(as_Register($dst$$reg),
10966              as_Register($src1$$reg),
10967              as_Register($src2$$reg));
10968   %}
10969 
10970   ins_pipe(ialu_reg_reg_vshift);
10971 %}
10972 
10973 // Shift Right Arithmetic Immediate
10974 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10975   match(Set dst (RShiftI src1 src2));
10976 
10977   ins_cost(INSN_COST);
10978   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10979 
10980   ins_encode %{
10981     __ asrw(as_Register($dst$$reg),
10982             as_Register($src1$$reg),
10983             $src2$$constant & 0x1f);
10984   %}
10985 
10986   ins_pipe(ialu_reg_shift);
10987 %}
10988 
10989 // Combined Int Mask and Right Shift (using UBFM)
10990 // TODO
10991 
10992 // Long Shifts
10993 
10994 // Shift Left Register
10995 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10996   match(Set dst (LShiftL src1 src2));
10997 
10998   ins_cost(INSN_COST * 2);
10999   format %{ "lslv  $dst, $src1, $src2" %}
11000 
11001   ins_encode %{
11002     __ lslv(as_Register($dst$$reg),
11003             as_Register($src1$$reg),
11004             as_Register($src2$$reg));
11005   %}
11006 
11007   ins_pipe(ialu_reg_reg_vshift);
11008 %}
11009 
11010 // Shift Left Immediate
11011 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11012   match(Set dst (LShiftL src1 src2));
11013 
11014   ins_cost(INSN_COST);
11015   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11016 
11017   ins_encode %{
11018     __ lsl(as_Register($dst$$reg),
11019             as_Register($src1$$reg),
11020             $src2$$constant & 0x3f);
11021   %}
11022 
11023   ins_pipe(ialu_reg_shift);
11024 %}
11025 
11026 // Shift Right Logical Register
11027 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11028   match(Set dst (URShiftL src1 src2));
11029 
11030   ins_cost(INSN_COST * 2);
11031   format %{ "lsrv  $dst, $src1, $src2" %}
11032 
11033   ins_encode %{
11034     __ lsrv(as_Register($dst$$reg),
11035             as_Register($src1$$reg),
11036             as_Register($src2$$reg));
11037   %}
11038 
11039   ins_pipe(ialu_reg_reg_vshift);
11040 %}
11041 
11042 // Shift Right Logical Immediate
11043 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11044   match(Set dst (URShiftL src1 src2));
11045 
11046   ins_cost(INSN_COST);
11047   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11048 
11049   ins_encode %{
11050     __ lsr(as_Register($dst$$reg),
11051            as_Register($src1$$reg),
11052            $src2$$constant & 0x3f);
11053   %}
11054 
11055   ins_pipe(ialu_reg_shift);
11056 %}
11057 
11058 // A special-case pattern for card table stores.
11059 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11060   match(Set dst (URShiftL (CastP2X src1) src2));
11061 
11062   ins_cost(INSN_COST);
11063   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11064 
11065   ins_encode %{
11066     __ lsr(as_Register($dst$$reg),
11067            as_Register($src1$$reg),
11068            $src2$$constant & 0x3f);
11069   %}
11070 
11071   ins_pipe(ialu_reg_shift);
11072 %}
11073 
11074 // Shift Right Arithmetic Register
11075 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11076   match(Set dst (RShiftL src1 src2));
11077 
11078   ins_cost(INSN_COST * 2);
11079   format %{ "asrv  $dst, $src1, $src2" %}
11080 
11081   ins_encode %{
11082     __ asrv(as_Register($dst$$reg),
11083             as_Register($src1$$reg),
11084             as_Register($src2$$reg));
11085   %}
11086 
11087   ins_pipe(ialu_reg_reg_vshift);
11088 %}
11089 
11090 // Shift Right Arithmetic Immediate
11091 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11092   match(Set dst (RShiftL src1 src2));
11093 
11094   ins_cost(INSN_COST);
11095   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11096 
11097   ins_encode %{
11098     __ asr(as_Register($dst$$reg),
11099            as_Register($src1$$reg),
11100            $src2$$constant & 0x3f);
11101   %}
11102 
11103   ins_pipe(ialu_reg_shift);
11104 %}
11105 
11106 // BEGIN This section of the file is automatically generated. Do not edit --------------
11107 
11108 instruct regL_not_reg(iRegLNoSp dst,
11109                          iRegL src1, immL_M1 m1,
11110                          rFlagsReg cr) %{
11111   match(Set dst (XorL src1 m1));
11112   ins_cost(INSN_COST);
11113   format %{ "eon  $dst, $src1, zr" %}
11114 
11115   ins_encode %{
11116     __ eon(as_Register($dst$$reg),
11117               as_Register($src1$$reg),
11118               zr,
11119               Assembler::LSL, 0);
11120   %}
11121 
11122   ins_pipe(ialu_reg);
11123 %}
11124 instruct regI_not_reg(iRegINoSp dst,
11125                          iRegIorL2I src1, immI_M1 m1,
11126                          rFlagsReg cr) %{
11127   match(Set dst (XorI src1 m1));
11128   ins_cost(INSN_COST);
11129   format %{ "eonw  $dst, $src1, zr" %}
11130 
11131   ins_encode %{
11132     __ eonw(as_Register($dst$$reg),
11133               as_Register($src1$$reg),
11134               zr,
11135               Assembler::LSL, 0);
11136   %}
11137 
11138   ins_pipe(ialu_reg);
11139 %}
11140 
11141 instruct AndI_reg_not_reg(iRegINoSp dst,
11142                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11143                          rFlagsReg cr) %{
11144   match(Set dst (AndI src1 (XorI src2 m1)));
11145   ins_cost(INSN_COST);
11146   format %{ "bicw  $dst, $src1, $src2" %}
11147 
11148   ins_encode %{
11149     __ bicw(as_Register($dst$$reg),
11150               as_Register($src1$$reg),
11151               as_Register($src2$$reg),
11152               Assembler::LSL, 0);
11153   %}
11154 
11155   ins_pipe(ialu_reg_reg);
11156 %}
11157 
11158 instruct AndL_reg_not_reg(iRegLNoSp dst,
11159                          iRegL src1, iRegL src2, immL_M1 m1,
11160                          rFlagsReg cr) %{
11161   match(Set dst (AndL src1 (XorL src2 m1)));
11162   ins_cost(INSN_COST);
11163   format %{ "bic  $dst, $src1, $src2" %}
11164 
11165   ins_encode %{
11166     __ bic(as_Register($dst$$reg),
11167               as_Register($src1$$reg),
11168               as_Register($src2$$reg),
11169               Assembler::LSL, 0);
11170   %}
11171 
11172   ins_pipe(ialu_reg_reg);
11173 %}
11174 
11175 instruct OrI_reg_not_reg(iRegINoSp dst,
11176                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11177                          rFlagsReg cr) %{
11178   match(Set dst (OrI src1 (XorI src2 m1)));
11179   ins_cost(INSN_COST);
11180   format %{ "ornw  $dst, $src1, $src2" %}
11181 
11182   ins_encode %{
11183     __ ornw(as_Register($dst$$reg),
11184               as_Register($src1$$reg),
11185               as_Register($src2$$reg),
11186               Assembler::LSL, 0);
11187   %}
11188 
11189   ins_pipe(ialu_reg_reg);
11190 %}
11191 
11192 instruct OrL_reg_not_reg(iRegLNoSp dst,
11193                          iRegL src1, iRegL src2, immL_M1 m1,
11194                          rFlagsReg cr) %{
11195   match(Set dst (OrL src1 (XorL src2 m1)));
11196   ins_cost(INSN_COST);
11197   format %{ "orn  $dst, $src1, $src2" %}
11198 
11199   ins_encode %{
11200     __ orn(as_Register($dst$$reg),
11201               as_Register($src1$$reg),
11202               as_Register($src2$$reg),
11203               Assembler::LSL, 0);
11204   %}
11205 
11206   ins_pipe(ialu_reg_reg);
11207 %}
11208 
11209 instruct XorI_reg_not_reg(iRegINoSp dst,
11210                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11211                          rFlagsReg cr) %{
11212   match(Set dst (XorI m1 (XorI src2 src1)));
11213   ins_cost(INSN_COST);
11214   format %{ "eonw  $dst, $src1, $src2" %}
11215 
11216   ins_encode %{
11217     __ eonw(as_Register($dst$$reg),
11218               as_Register($src1$$reg),
11219               as_Register($src2$$reg),
11220               Assembler::LSL, 0);
11221   %}
11222 
11223   ins_pipe(ialu_reg_reg);
11224 %}
11225 
11226 instruct XorL_reg_not_reg(iRegLNoSp dst,
11227                          iRegL src1, iRegL src2, immL_M1 m1,
11228                          rFlagsReg cr) %{
11229   match(Set dst (XorL m1 (XorL src2 src1)));
11230   ins_cost(INSN_COST);
11231   format %{ "eon  $dst, $src1, $src2" %}
11232 
11233   ins_encode %{
11234     __ eon(as_Register($dst$$reg),
11235               as_Register($src1$$reg),
11236               as_Register($src2$$reg),
11237               Assembler::LSL, 0);
11238   %}
11239 
11240   ins_pipe(ialu_reg_reg);
11241 %}
11242 
11243 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11244                          iRegIorL2I src1, iRegIorL2I src2,
11245                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11246   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11247   ins_cost(1.9 * INSN_COST);
11248   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11249 
11250   ins_encode %{
11251     __ bicw(as_Register($dst$$reg),
11252               as_Register($src1$$reg),
11253               as_Register($src2$$reg),
11254               Assembler::LSR,
11255               $src3$$constant & 0x1f);
11256   %}
11257 
11258   ins_pipe(ialu_reg_reg_shift);
11259 %}
11260 
11261 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11262                          iRegL src1, iRegL src2,
11263                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11264   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11265   ins_cost(1.9 * INSN_COST);
11266   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11267 
11268   ins_encode %{
11269     __ bic(as_Register($dst$$reg),
11270               as_Register($src1$$reg),
11271               as_Register($src2$$reg),
11272               Assembler::LSR,
11273               $src3$$constant & 0x3f);
11274   %}
11275 
11276   ins_pipe(ialu_reg_reg_shift);
11277 %}
11278 
11279 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11280                          iRegIorL2I src1, iRegIorL2I src2,
11281                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11282   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11283   ins_cost(1.9 * INSN_COST);
11284   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11285 
11286   ins_encode %{
11287     __ bicw(as_Register($dst$$reg),
11288               as_Register($src1$$reg),
11289               as_Register($src2$$reg),
11290               Assembler::ASR,
11291               $src3$$constant & 0x1f);
11292   %}
11293 
11294   ins_pipe(ialu_reg_reg_shift);
11295 %}
11296 
11297 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11298                          iRegL src1, iRegL src2,
11299                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11300   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11301   ins_cost(1.9 * INSN_COST);
11302   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11303 
11304   ins_encode %{
11305     __ bic(as_Register($dst$$reg),
11306               as_Register($src1$$reg),
11307               as_Register($src2$$reg),
11308               Assembler::ASR,
11309               $src3$$constant & 0x3f);
11310   %}
11311 
11312   ins_pipe(ialu_reg_reg_shift);
11313 %}
11314 
11315 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11316                          iRegIorL2I src1, iRegIorL2I src2,
11317                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11318   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11319   ins_cost(1.9 * INSN_COST);
11320   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11321 
11322   ins_encode %{
11323     __ bicw(as_Register($dst$$reg),
11324               as_Register($src1$$reg),
11325               as_Register($src2$$reg),
11326               Assembler::LSL,
11327               $src3$$constant & 0x1f);
11328   %}
11329 
11330   ins_pipe(ialu_reg_reg_shift);
11331 %}
11332 
11333 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11334                          iRegL src1, iRegL src2,
11335                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11336   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11337   ins_cost(1.9 * INSN_COST);
11338   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11339 
11340   ins_encode %{
11341     __ bic(as_Register($dst$$reg),
11342               as_Register($src1$$reg),
11343               as_Register($src2$$reg),
11344               Assembler::LSL,
11345               $src3$$constant & 0x3f);
11346   %}
11347 
11348   ins_pipe(ialu_reg_reg_shift);
11349 %}
11350 
11351 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11352                          iRegIorL2I src1, iRegIorL2I src2,
11353                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11354   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11355   ins_cost(1.9 * INSN_COST);
11356   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11357 
11358   ins_encode %{
11359     __ eonw(as_Register($dst$$reg),
11360               as_Register($src1$$reg),
11361               as_Register($src2$$reg),
11362               Assembler::LSR,
11363               $src3$$constant & 0x1f);
11364   %}
11365 
11366   ins_pipe(ialu_reg_reg_shift);
11367 %}
11368 
11369 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11370                          iRegL src1, iRegL src2,
11371                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11372   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11373   ins_cost(1.9 * INSN_COST);
11374   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11375 
11376   ins_encode %{
11377     __ eon(as_Register($dst$$reg),
11378               as_Register($src1$$reg),
11379               as_Register($src2$$reg),
11380               Assembler::LSR,
11381               $src3$$constant & 0x3f);
11382   %}
11383 
11384   ins_pipe(ialu_reg_reg_shift);
11385 %}
11386 
11387 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11388                          iRegIorL2I src1, iRegIorL2I src2,
11389                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11390   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11391   ins_cost(1.9 * INSN_COST);
11392   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11393 
11394   ins_encode %{
11395     __ eonw(as_Register($dst$$reg),
11396               as_Register($src1$$reg),
11397               as_Register($src2$$reg),
11398               Assembler::ASR,
11399               $src3$$constant & 0x1f);
11400   %}
11401 
11402   ins_pipe(ialu_reg_reg_shift);
11403 %}
11404 
11405 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11406                          iRegL src1, iRegL src2,
11407                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11408   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11409   ins_cost(1.9 * INSN_COST);
11410   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11411 
11412   ins_encode %{
11413     __ eon(as_Register($dst$$reg),
11414               as_Register($src1$$reg),
11415               as_Register($src2$$reg),
11416               Assembler::ASR,
11417               $src3$$constant & 0x3f);
11418   %}
11419 
11420   ins_pipe(ialu_reg_reg_shift);
11421 %}
11422 
11423 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11424                          iRegIorL2I src1, iRegIorL2I src2,
11425                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11426   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11427   ins_cost(1.9 * INSN_COST);
11428   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11429 
11430   ins_encode %{
11431     __ eonw(as_Register($dst$$reg),
11432               as_Register($src1$$reg),
11433               as_Register($src2$$reg),
11434               Assembler::LSL,
11435               $src3$$constant & 0x1f);
11436   %}
11437 
11438   ins_pipe(ialu_reg_reg_shift);
11439 %}
11440 
11441 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11442                          iRegL src1, iRegL src2,
11443                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11444   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11445   ins_cost(1.9 * INSN_COST);
11446   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11447 
11448   ins_encode %{
11449     __ eon(as_Register($dst$$reg),
11450               as_Register($src1$$reg),
11451               as_Register($src2$$reg),
11452               Assembler::LSL,
11453               $src3$$constant & 0x3f);
11454   %}
11455 
11456   ins_pipe(ialu_reg_reg_shift);
11457 %}
11458 
11459 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11460                          iRegIorL2I src1, iRegIorL2I src2,
11461                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11462   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11463   ins_cost(1.9 * INSN_COST);
11464   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11465 
11466   ins_encode %{
11467     __ ornw(as_Register($dst$$reg),
11468               as_Register($src1$$reg),
11469               as_Register($src2$$reg),
11470               Assembler::LSR,
11471               $src3$$constant & 0x1f);
11472   %}
11473 
11474   ins_pipe(ialu_reg_reg_shift);
11475 %}
11476 
11477 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11478                          iRegL src1, iRegL src2,
11479                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11480   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11481   ins_cost(1.9 * INSN_COST);
11482   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11483 
11484   ins_encode %{
11485     __ orn(as_Register($dst$$reg),
11486               as_Register($src1$$reg),
11487               as_Register($src2$$reg),
11488               Assembler::LSR,
11489               $src3$$constant & 0x3f);
11490   %}
11491 
11492   ins_pipe(ialu_reg_reg_shift);
11493 %}
11494 
11495 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11496                          iRegIorL2I src1, iRegIorL2I src2,
11497                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11498   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11499   ins_cost(1.9 * INSN_COST);
11500   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11501 
11502   ins_encode %{
11503     __ ornw(as_Register($dst$$reg),
11504               as_Register($src1$$reg),
11505               as_Register($src2$$reg),
11506               Assembler::ASR,
11507               $src3$$constant & 0x1f);
11508   %}
11509 
11510   ins_pipe(ialu_reg_reg_shift);
11511 %}
11512 
11513 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11514                          iRegL src1, iRegL src2,
11515                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11516   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11517   ins_cost(1.9 * INSN_COST);
11518   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11519 
11520   ins_encode %{
11521     __ orn(as_Register($dst$$reg),
11522               as_Register($src1$$reg),
11523               as_Register($src2$$reg),
11524               Assembler::ASR,
11525               $src3$$constant & 0x3f);
11526   %}
11527 
11528   ins_pipe(ialu_reg_reg_shift);
11529 %}
11530 
11531 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11532                          iRegIorL2I src1, iRegIorL2I src2,
11533                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11534   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11535   ins_cost(1.9 * INSN_COST);
11536   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11537 
11538   ins_encode %{
11539     __ ornw(as_Register($dst$$reg),
11540               as_Register($src1$$reg),
11541               as_Register($src2$$reg),
11542               Assembler::LSL,
11543               $src3$$constant & 0x1f);
11544   %}
11545 
11546   ins_pipe(ialu_reg_reg_shift);
11547 %}
11548 
11549 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11550                          iRegL src1, iRegL src2,
11551                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11552   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11553   ins_cost(1.9 * INSN_COST);
11554   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11555 
11556   ins_encode %{
11557     __ orn(as_Register($dst$$reg),
11558               as_Register($src1$$reg),
11559               as_Register($src2$$reg),
11560               Assembler::LSL,
11561               $src3$$constant & 0x3f);
11562   %}
11563 
11564   ins_pipe(ialu_reg_reg_shift);
11565 %}
11566 
11567 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11568                          iRegIorL2I src1, iRegIorL2I src2,
11569                          immI src3, rFlagsReg cr) %{
11570   match(Set dst (AndI src1 (URShiftI src2 src3)));
11571 
11572   ins_cost(1.9 * INSN_COST);
11573   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11574 
11575   ins_encode %{
11576     __ andw(as_Register($dst$$reg),
11577               as_Register($src1$$reg),
11578               as_Register($src2$$reg),
11579               Assembler::LSR,
11580               $src3$$constant & 0x1f);
11581   %}
11582 
11583   ins_pipe(ialu_reg_reg_shift);
11584 %}
11585 
11586 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11587                          iRegL src1, iRegL src2,
11588                          immI src3, rFlagsReg cr) %{
11589   match(Set dst (AndL src1 (URShiftL src2 src3)));
11590 
11591   ins_cost(1.9 * INSN_COST);
11592   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11593 
11594   ins_encode %{
11595     __ andr(as_Register($dst$$reg),
11596               as_Register($src1$$reg),
11597               as_Register($src2$$reg),
11598               Assembler::LSR,
11599               $src3$$constant & 0x3f);
11600   %}
11601 
11602   ins_pipe(ialu_reg_reg_shift);
11603 %}
11604 
11605 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11606                          iRegIorL2I src1, iRegIorL2I src2,
11607                          immI src3, rFlagsReg cr) %{
11608   match(Set dst (AndI src1 (RShiftI src2 src3)));
11609 
11610   ins_cost(1.9 * INSN_COST);
11611   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11612 
11613   ins_encode %{
11614     __ andw(as_Register($dst$$reg),
11615               as_Register($src1$$reg),
11616               as_Register($src2$$reg),
11617               Assembler::ASR,
11618               $src3$$constant & 0x1f);
11619   %}
11620 
11621   ins_pipe(ialu_reg_reg_shift);
11622 %}
11623 
11624 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11625                          iRegL src1, iRegL src2,
11626                          immI src3, rFlagsReg cr) %{
11627   match(Set dst (AndL src1 (RShiftL src2 src3)));
11628 
11629   ins_cost(1.9 * INSN_COST);
11630   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11631 
11632   ins_encode %{
11633     __ andr(as_Register($dst$$reg),
11634               as_Register($src1$$reg),
11635               as_Register($src2$$reg),
11636               Assembler::ASR,
11637               $src3$$constant & 0x3f);
11638   %}
11639 
11640   ins_pipe(ialu_reg_reg_shift);
11641 %}
11642 
11643 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11644                          iRegIorL2I src1, iRegIorL2I src2,
11645                          immI src3, rFlagsReg cr) %{
11646   match(Set dst (AndI src1 (LShiftI src2 src3)));
11647 
11648   ins_cost(1.9 * INSN_COST);
11649   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11650 
11651   ins_encode %{
11652     __ andw(as_Register($dst$$reg),
11653               as_Register($src1$$reg),
11654               as_Register($src2$$reg),
11655               Assembler::LSL,
11656               $src3$$constant & 0x1f);
11657   %}
11658 
11659   ins_pipe(ialu_reg_reg_shift);
11660 %}
11661 
11662 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11663                          iRegL src1, iRegL src2,
11664                          immI src3, rFlagsReg cr) %{
11665   match(Set dst (AndL src1 (LShiftL src2 src3)));
11666 
11667   ins_cost(1.9 * INSN_COST);
11668   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11669 
11670   ins_encode %{
11671     __ andr(as_Register($dst$$reg),
11672               as_Register($src1$$reg),
11673               as_Register($src2$$reg),
11674               Assembler::LSL,
11675               $src3$$constant & 0x3f);
11676   %}
11677 
11678   ins_pipe(ialu_reg_reg_shift);
11679 %}
11680 
11681 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11682                          iRegIorL2I src1, iRegIorL2I src2,
11683                          immI src3, rFlagsReg cr) %{
11684   match(Set dst (XorI src1 (URShiftI src2 src3)));
11685 
11686   ins_cost(1.9 * INSN_COST);
11687   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11688 
11689   ins_encode %{
11690     __ eorw(as_Register($dst$$reg),
11691               as_Register($src1$$reg),
11692               as_Register($src2$$reg),
11693               Assembler::LSR,
11694               $src3$$constant & 0x1f);
11695   %}
11696 
11697   ins_pipe(ialu_reg_reg_shift);
11698 %}
11699 
11700 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11701                          iRegL src1, iRegL src2,
11702                          immI src3, rFlagsReg cr) %{
11703   match(Set dst (XorL src1 (URShiftL src2 src3)));
11704 
11705   ins_cost(1.9 * INSN_COST);
11706   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11707 
11708   ins_encode %{
11709     __ eor(as_Register($dst$$reg),
11710               as_Register($src1$$reg),
11711               as_Register($src2$$reg),
11712               Assembler::LSR,
11713               $src3$$constant & 0x3f);
11714   %}
11715 
11716   ins_pipe(ialu_reg_reg_shift);
11717 %}
11718 
11719 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11720                          iRegIorL2I src1, iRegIorL2I src2,
11721                          immI src3, rFlagsReg cr) %{
11722   match(Set dst (XorI src1 (RShiftI src2 src3)));
11723 
11724   ins_cost(1.9 * INSN_COST);
11725   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11726 
11727   ins_encode %{
11728     __ eorw(as_Register($dst$$reg),
11729               as_Register($src1$$reg),
11730               as_Register($src2$$reg),
11731               Assembler::ASR,
11732               $src3$$constant & 0x1f);
11733   %}
11734 
11735   ins_pipe(ialu_reg_reg_shift);
11736 %}
11737 
11738 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11739                          iRegL src1, iRegL src2,
11740                          immI src3, rFlagsReg cr) %{
11741   match(Set dst (XorL src1 (RShiftL src2 src3)));
11742 
11743   ins_cost(1.9 * INSN_COST);
11744   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11745 
11746   ins_encode %{
11747     __ eor(as_Register($dst$$reg),
11748               as_Register($src1$$reg),
11749               as_Register($src2$$reg),
11750               Assembler::ASR,
11751               $src3$$constant & 0x3f);
11752   %}
11753 
11754   ins_pipe(ialu_reg_reg_shift);
11755 %}
11756 
11757 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11758                          iRegIorL2I src1, iRegIorL2I src2,
11759                          immI src3, rFlagsReg cr) %{
11760   match(Set dst (XorI src1 (LShiftI src2 src3)));
11761 
11762   ins_cost(1.9 * INSN_COST);
11763   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11764 
11765   ins_encode %{
11766     __ eorw(as_Register($dst$$reg),
11767               as_Register($src1$$reg),
11768               as_Register($src2$$reg),
11769               Assembler::LSL,
11770               $src3$$constant & 0x1f);
11771   %}
11772 
11773   ins_pipe(ialu_reg_reg_shift);
11774 %}
11775 
11776 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11777                          iRegL src1, iRegL src2,
11778                          immI src3, rFlagsReg cr) %{
11779   match(Set dst (XorL src1 (LShiftL src2 src3)));
11780 
11781   ins_cost(1.9 * INSN_COST);
11782   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11783 
11784   ins_encode %{
11785     __ eor(as_Register($dst$$reg),
11786               as_Register($src1$$reg),
11787               as_Register($src2$$reg),
11788               Assembler::LSL,
11789               $src3$$constant & 0x3f);
11790   %}
11791 
11792   ins_pipe(ialu_reg_reg_shift);
11793 %}
11794 
11795 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11796                          iRegIorL2I src1, iRegIorL2I src2,
11797                          immI src3, rFlagsReg cr) %{
11798   match(Set dst (OrI src1 (URShiftI src2 src3)));
11799 
11800   ins_cost(1.9 * INSN_COST);
11801   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11802 
11803   ins_encode %{
11804     __ orrw(as_Register($dst$$reg),
11805               as_Register($src1$$reg),
11806               as_Register($src2$$reg),
11807               Assembler::LSR,
11808               $src3$$constant & 0x1f);
11809   %}
11810 
11811   ins_pipe(ialu_reg_reg_shift);
11812 %}
11813 
11814 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11815                          iRegL src1, iRegL src2,
11816                          immI src3, rFlagsReg cr) %{
11817   match(Set dst (OrL src1 (URShiftL src2 src3)));
11818 
11819   ins_cost(1.9 * INSN_COST);
11820   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11821 
11822   ins_encode %{
11823     __ orr(as_Register($dst$$reg),
11824               as_Register($src1$$reg),
11825               as_Register($src2$$reg),
11826               Assembler::LSR,
11827               $src3$$constant & 0x3f);
11828   %}
11829 
11830   ins_pipe(ialu_reg_reg_shift);
11831 %}
11832 
11833 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11834                          iRegIorL2I src1, iRegIorL2I src2,
11835                          immI src3, rFlagsReg cr) %{
11836   match(Set dst (OrI src1 (RShiftI src2 src3)));
11837 
11838   ins_cost(1.9 * INSN_COST);
11839   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11840 
11841   ins_encode %{
11842     __ orrw(as_Register($dst$$reg),
11843               as_Register($src1$$reg),
11844               as_Register($src2$$reg),
11845               Assembler::ASR,
11846               $src3$$constant & 0x1f);
11847   %}
11848 
11849   ins_pipe(ialu_reg_reg_shift);
11850 %}
11851 
11852 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11853                          iRegL src1, iRegL src2,
11854                          immI src3, rFlagsReg cr) %{
11855   match(Set dst (OrL src1 (RShiftL src2 src3)));
11856 
11857   ins_cost(1.9 * INSN_COST);
11858   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11859 
11860   ins_encode %{
11861     __ orr(as_Register($dst$$reg),
11862               as_Register($src1$$reg),
11863               as_Register($src2$$reg),
11864               Assembler::ASR,
11865               $src3$$constant & 0x3f);
11866   %}
11867 
11868   ins_pipe(ialu_reg_reg_shift);
11869 %}
11870 
11871 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11872                          iRegIorL2I src1, iRegIorL2I src2,
11873                          immI src3, rFlagsReg cr) %{
11874   match(Set dst (OrI src1 (LShiftI src2 src3)));
11875 
11876   ins_cost(1.9 * INSN_COST);
11877   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11878 
11879   ins_encode %{
11880     __ orrw(as_Register($dst$$reg),
11881               as_Register($src1$$reg),
11882               as_Register($src2$$reg),
11883               Assembler::LSL,
11884               $src3$$constant & 0x1f);
11885   %}
11886 
11887   ins_pipe(ialu_reg_reg_shift);
11888 %}
11889 
11890 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11891                          iRegL src1, iRegL src2,
11892                          immI src3, rFlagsReg cr) %{
11893   match(Set dst (OrL src1 (LShiftL src2 src3)));
11894 
11895   ins_cost(1.9 * INSN_COST);
11896   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11897 
11898   ins_encode %{
11899     __ orr(as_Register($dst$$reg),
11900               as_Register($src1$$reg),
11901               as_Register($src2$$reg),
11902               Assembler::LSL,
11903               $src3$$constant & 0x3f);
11904   %}
11905 
11906   ins_pipe(ialu_reg_reg_shift);
11907 %}
11908 
11909 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11910                          iRegIorL2I src1, iRegIorL2I src2,
11911                          immI src3, rFlagsReg cr) %{
11912   match(Set dst (AddI src1 (URShiftI src2 src3)));
11913 
11914   ins_cost(1.9 * INSN_COST);
11915   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11916 
11917   ins_encode %{
11918     __ addw(as_Register($dst$$reg),
11919               as_Register($src1$$reg),
11920               as_Register($src2$$reg),
11921               Assembler::LSR,
11922               $src3$$constant & 0x1f);
11923   %}
11924 
11925   ins_pipe(ialu_reg_reg_shift);
11926 %}
11927 
11928 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11929                          iRegL src1, iRegL src2,
11930                          immI src3, rFlagsReg cr) %{
11931   match(Set dst (AddL src1 (URShiftL src2 src3)));
11932 
11933   ins_cost(1.9 * INSN_COST);
11934   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11935 
11936   ins_encode %{
11937     __ add(as_Register($dst$$reg),
11938               as_Register($src1$$reg),
11939               as_Register($src2$$reg),
11940               Assembler::LSR,
11941               $src3$$constant & 0x3f);
11942   %}
11943 
11944   ins_pipe(ialu_reg_reg_shift);
11945 %}
11946 
11947 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11948                          iRegIorL2I src1, iRegIorL2I src2,
11949                          immI src3, rFlagsReg cr) %{
11950   match(Set dst (AddI src1 (RShiftI src2 src3)));
11951 
11952   ins_cost(1.9 * INSN_COST);
11953   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11954 
11955   ins_encode %{
11956     __ addw(as_Register($dst$$reg),
11957               as_Register($src1$$reg),
11958               as_Register($src2$$reg),
11959               Assembler::ASR,
11960               $src3$$constant & 0x1f);
11961   %}
11962 
11963   ins_pipe(ialu_reg_reg_shift);
11964 %}
11965 
11966 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11967                          iRegL src1, iRegL src2,
11968                          immI src3, rFlagsReg cr) %{
11969   match(Set dst (AddL src1 (RShiftL src2 src3)));
11970 
11971   ins_cost(1.9 * INSN_COST);
11972   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11973 
11974   ins_encode %{
11975     __ add(as_Register($dst$$reg),
11976               as_Register($src1$$reg),
11977               as_Register($src2$$reg),
11978               Assembler::ASR,
11979               $src3$$constant & 0x3f);
11980   %}
11981 
11982   ins_pipe(ialu_reg_reg_shift);
11983 %}
11984 
11985 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11986                          iRegIorL2I src1, iRegIorL2I src2,
11987                          immI src3, rFlagsReg cr) %{
11988   match(Set dst (AddI src1 (LShiftI src2 src3)));
11989 
11990   ins_cost(1.9 * INSN_COST);
11991   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11992 
11993   ins_encode %{
11994     __ addw(as_Register($dst$$reg),
11995               as_Register($src1$$reg),
11996               as_Register($src2$$reg),
11997               Assembler::LSL,
11998               $src3$$constant & 0x1f);
11999   %}
12000 
12001   ins_pipe(ialu_reg_reg_shift);
12002 %}
12003 
12004 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12005                          iRegL src1, iRegL src2,
12006                          immI src3, rFlagsReg cr) %{
12007   match(Set dst (AddL src1 (LShiftL src2 src3)));
12008 
12009   ins_cost(1.9 * INSN_COST);
12010   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12011 
12012   ins_encode %{
12013     __ add(as_Register($dst$$reg),
12014               as_Register($src1$$reg),
12015               as_Register($src2$$reg),
12016               Assembler::LSL,
12017               $src3$$constant & 0x3f);
12018   %}
12019 
12020   ins_pipe(ialu_reg_reg_shift);
12021 %}
12022 
12023 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12024                          iRegIorL2I src1, iRegIorL2I src2,
12025                          immI src3, rFlagsReg cr) %{
12026   match(Set dst (SubI src1 (URShiftI src2 src3)));
12027 
12028   ins_cost(1.9 * INSN_COST);
12029   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12030 
12031   ins_encode %{
12032     __ subw(as_Register($dst$$reg),
12033               as_Register($src1$$reg),
12034               as_Register($src2$$reg),
12035               Assembler::LSR,
12036               $src3$$constant & 0x1f);
12037   %}
12038 
12039   ins_pipe(ialu_reg_reg_shift);
12040 %}
12041 
12042 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12043                          iRegL src1, iRegL src2,
12044                          immI src3, rFlagsReg cr) %{
12045   match(Set dst (SubL src1 (URShiftL src2 src3)));
12046 
12047   ins_cost(1.9 * INSN_COST);
12048   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12049 
12050   ins_encode %{
12051     __ sub(as_Register($dst$$reg),
12052               as_Register($src1$$reg),
12053               as_Register($src2$$reg),
12054               Assembler::LSR,
12055               $src3$$constant & 0x3f);
12056   %}
12057 
12058   ins_pipe(ialu_reg_reg_shift);
12059 %}
12060 
12061 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12062                          iRegIorL2I src1, iRegIorL2I src2,
12063                          immI src3, rFlagsReg cr) %{
12064   match(Set dst (SubI src1 (RShiftI src2 src3)));
12065 
12066   ins_cost(1.9 * INSN_COST);
12067   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12068 
12069   ins_encode %{
12070     __ subw(as_Register($dst$$reg),
12071               as_Register($src1$$reg),
12072               as_Register($src2$$reg),
12073               Assembler::ASR,
12074               $src3$$constant & 0x1f);
12075   %}
12076 
12077   ins_pipe(ialu_reg_reg_shift);
12078 %}
12079 
12080 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12081                          iRegL src1, iRegL src2,
12082                          immI src3, rFlagsReg cr) %{
12083   match(Set dst (SubL src1 (RShiftL src2 src3)));
12084 
12085   ins_cost(1.9 * INSN_COST);
12086   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12087 
12088   ins_encode %{
12089     __ sub(as_Register($dst$$reg),
12090               as_Register($src1$$reg),
12091               as_Register($src2$$reg),
12092               Assembler::ASR,
12093               $src3$$constant & 0x3f);
12094   %}
12095 
12096   ins_pipe(ialu_reg_reg_shift);
12097 %}
12098 
12099 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12100                          iRegIorL2I src1, iRegIorL2I src2,
12101                          immI src3, rFlagsReg cr) %{
12102   match(Set dst (SubI src1 (LShiftI src2 src3)));
12103 
12104   ins_cost(1.9 * INSN_COST);
12105   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12106 
12107   ins_encode %{
12108     __ subw(as_Register($dst$$reg),
12109               as_Register($src1$$reg),
12110               as_Register($src2$$reg),
12111               Assembler::LSL,
12112               $src3$$constant & 0x1f);
12113   %}
12114 
12115   ins_pipe(ialu_reg_reg_shift);
12116 %}
12117 
12118 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12119                          iRegL src1, iRegL src2,
12120                          immI src3, rFlagsReg cr) %{
12121   match(Set dst (SubL src1 (LShiftL src2 src3)));
12122 
12123   ins_cost(1.9 * INSN_COST);
12124   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12125 
12126   ins_encode %{
12127     __ sub(as_Register($dst$$reg),
12128               as_Register($src1$$reg),
12129               as_Register($src2$$reg),
12130               Assembler::LSL,
12131               $src3$$constant & 0x3f);
12132   %}
12133 
12134   ins_pipe(ialu_reg_reg_shift);
12135 %}
12136 
12137 
12138 
12139 // Shift Left followed by Shift Right.
12140 // This idiom is used by the compiler for the i2b bytecode etc.
12141 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12142 %{
12143   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12144   ins_cost(INSN_COST * 2);
12145   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12146   ins_encode %{
12147     int lshift = $lshift_count$$constant & 63;
12148     int rshift = $rshift_count$$constant & 63;
12149     int s = 63 - lshift;
12150     int r = (rshift - lshift) & 63;
12151     __ sbfm(as_Register($dst$$reg),
12152             as_Register($src$$reg),
12153             r, s);
12154   %}
12155 
12156   ins_pipe(ialu_reg_shift);
12157 %}
12158 
12159 // Shift Left followed by Shift Right.
12160 // This idiom is used by the compiler for the i2b bytecode etc.
12161 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12162 %{
12163   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12164   ins_cost(INSN_COST * 2);
12165   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12166   ins_encode %{
12167     int lshift = $lshift_count$$constant & 31;
12168     int rshift = $rshift_count$$constant & 31;
12169     int s = 31 - lshift;
12170     int r = (rshift - lshift) & 31;
12171     __ sbfmw(as_Register($dst$$reg),
12172             as_Register($src$$reg),
12173             r, s);
12174   %}
12175 
12176   ins_pipe(ialu_reg_shift);
12177 %}
12178 
12179 // Shift Left followed by Shift Right.
12180 // This idiom is used by the compiler for the i2b bytecode etc.
12181 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12182 %{
12183   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12184   ins_cost(INSN_COST * 2);
12185   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12186   ins_encode %{
12187     int lshift = $lshift_count$$constant & 63;
12188     int rshift = $rshift_count$$constant & 63;
12189     int s = 63 - lshift;
12190     int r = (rshift - lshift) & 63;
12191     __ ubfm(as_Register($dst$$reg),
12192             as_Register($src$$reg),
12193             r, s);
12194   %}
12195 
12196   ins_pipe(ialu_reg_shift);
12197 %}
12198 
12199 // Shift Left followed by Shift Right.
12200 // This idiom is used by the compiler for the i2b bytecode etc.
12201 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12202 %{
12203   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12204   ins_cost(INSN_COST * 2);
12205   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12206   ins_encode %{
12207     int lshift = $lshift_count$$constant & 31;
12208     int rshift = $rshift_count$$constant & 31;
12209     int s = 31 - lshift;
12210     int r = (rshift - lshift) & 31;
12211     __ ubfmw(as_Register($dst$$reg),
12212             as_Register($src$$reg),
12213             r, s);
12214   %}
12215 
12216   ins_pipe(ialu_reg_shift);
12217 %}
12218 // Bitfield extract with shift & mask
12219 
12220 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12221 %{
12222   match(Set dst (AndI (URShiftI src rshift) mask));
12223   // Make sure we are not going to exceed what ubfxw can do.
12224   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12225 
12226   ins_cost(INSN_COST);
12227   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12228   ins_encode %{
12229     int rshift = $rshift$$constant & 31;
12230     long mask = $mask$$constant;
12231     int width = exact_log2(mask+1);
12232     __ ubfxw(as_Register($dst$$reg),
12233             as_Register($src$$reg), rshift, width);
12234   %}
12235   ins_pipe(ialu_reg_shift);
12236 %}
12237 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12238 %{
12239   match(Set dst (AndL (URShiftL src rshift) mask));
12240   // Make sure we are not going to exceed what ubfx can do.
12241   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12242 
12243   ins_cost(INSN_COST);
12244   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12245   ins_encode %{
12246     int rshift = $rshift$$constant & 63;
12247     long mask = $mask$$constant;
12248     int width = exact_log2_long(mask+1);
12249     __ ubfx(as_Register($dst$$reg),
12250             as_Register($src$$reg), rshift, width);
12251   %}
12252   ins_pipe(ialu_reg_shift);
12253 %}
12254 
12255 // We can use ubfx when extending an And with a mask when we know mask
12256 // is positive.  We know that because immI_bitmask guarantees it.
12257 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12258 %{
12259   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12260   // Make sure we are not going to exceed what ubfxw can do.
12261   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12262 
12263   ins_cost(INSN_COST * 2);
12264   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12265   ins_encode %{
12266     int rshift = $rshift$$constant & 31;
12267     long mask = $mask$$constant;
12268     int width = exact_log2(mask+1);
12269     __ ubfx(as_Register($dst$$reg),
12270             as_Register($src$$reg), rshift, width);
12271   %}
12272   ins_pipe(ialu_reg_shift);
12273 %}
12274 
12275 // We can use ubfiz when masking by a positive number and then left shifting the result.
12276 // We know that the mask is positive because immI_bitmask guarantees it.
12277 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12278 %{
12279   match(Set dst (LShiftI (AndI src mask) lshift));
12280   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12281 
12282   ins_cost(INSN_COST);
12283   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12284   ins_encode %{
12285     int lshift = $lshift$$constant & 31;
12286     long mask = $mask$$constant;
12287     int width = exact_log2(mask+1);
12288     __ ubfizw(as_Register($dst$$reg),
12289           as_Register($src$$reg), lshift, width);
12290   %}
12291   ins_pipe(ialu_reg_shift);
12292 %}
12293 // We can use ubfiz when masking by a positive number and then left shifting the result.
12294 // We know that the mask is positive because immL_bitmask guarantees it.
12295 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12296 %{
12297   match(Set dst (LShiftL (AndL src mask) lshift));
12298   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12299 
12300   ins_cost(INSN_COST);
12301   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12302   ins_encode %{
12303     int lshift = $lshift$$constant & 63;
12304     long mask = $mask$$constant;
12305     int width = exact_log2_long(mask+1);
12306     __ ubfiz(as_Register($dst$$reg),
12307           as_Register($src$$reg), lshift, width);
12308   %}
12309   ins_pipe(ialu_reg_shift);
12310 %}
12311 
12312 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12313 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12314 %{
12315   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12316   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12317 
12318   ins_cost(INSN_COST);
12319   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12320   ins_encode %{
12321     int lshift = $lshift$$constant & 63;
12322     long mask = $mask$$constant;
12323     int width = exact_log2(mask+1);
12324     __ ubfiz(as_Register($dst$$reg),
12325              as_Register($src$$reg), lshift, width);
12326   %}
12327   ins_pipe(ialu_reg_shift);
12328 %}
12329 
12330 // Rotations
12331 
12332 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12333 %{
12334   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12335   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12336 
12337   ins_cost(INSN_COST);
12338   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12339 
12340   ins_encode %{
12341     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12342             $rshift$$constant & 63);
12343   %}
12344   ins_pipe(ialu_reg_reg_extr);
12345 %}
12346 
12347 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12348 %{
12349   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12350   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12351 
12352   ins_cost(INSN_COST);
12353   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12354 
12355   ins_encode %{
12356     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12357             $rshift$$constant & 31);
12358   %}
12359   ins_pipe(ialu_reg_reg_extr);
12360 %}
12361 
12362 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12363 %{
12364   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12365   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12366 
12367   ins_cost(INSN_COST);
12368   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12369 
12370   ins_encode %{
12371     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12372             $rshift$$constant & 63);
12373   %}
12374   ins_pipe(ialu_reg_reg_extr);
12375 %}
12376 
12377 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12378 %{
12379   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12380   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12381 
12382   ins_cost(INSN_COST);
12383   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12384 
12385   ins_encode %{
12386     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12387             $rshift$$constant & 31);
12388   %}
12389   ins_pipe(ialu_reg_reg_extr);
12390 %}
12391 
12392 
12393 // rol expander
12394 
12395 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12396 %{
12397   effect(DEF dst, USE src, USE shift);
12398 
12399   format %{ "rol    $dst, $src, $shift" %}
12400   ins_cost(INSN_COST * 3);
12401   ins_encode %{
12402     __ subw(rscratch1, zr, as_Register($shift$$reg));
12403     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12404             rscratch1);
12405     %}
12406   ins_pipe(ialu_reg_reg_vshift);
12407 %}
12408 
12409 // rol expander
12410 
12411 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12412 %{
12413   effect(DEF dst, USE src, USE shift);
12414 
12415   format %{ "rol    $dst, $src, $shift" %}
12416   ins_cost(INSN_COST * 3);
12417   ins_encode %{
12418     __ subw(rscratch1, zr, as_Register($shift$$reg));
12419     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12420             rscratch1);
12421     %}
12422   ins_pipe(ialu_reg_reg_vshift);
12423 %}
12424 
12425 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12426 %{
12427   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12428 
12429   expand %{
12430     rolL_rReg(dst, src, shift, cr);
12431   %}
12432 %}
12433 
12434 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12435 %{
12436   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12437 
12438   expand %{
12439     rolL_rReg(dst, src, shift, cr);
12440   %}
12441 %}
12442 
12443 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12444 %{
12445   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12446 
12447   expand %{
12448     rolI_rReg(dst, src, shift, cr);
12449   %}
12450 %}
12451 
12452 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12453 %{
12454   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12455 
12456   expand %{
12457     rolI_rReg(dst, src, shift, cr);
12458   %}
12459 %}
12460 
12461 // ror expander
12462 
12463 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12464 %{
12465   effect(DEF dst, USE src, USE shift);
12466 
12467   format %{ "ror    $dst, $src, $shift" %}
12468   ins_cost(INSN_COST);
12469   ins_encode %{
12470     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12471             as_Register($shift$$reg));
12472     %}
12473   ins_pipe(ialu_reg_reg_vshift);
12474 %}
12475 
12476 // ror expander
12477 
12478 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12479 %{
12480   effect(DEF dst, USE src, USE shift);
12481 
12482   format %{ "ror    $dst, $src, $shift" %}
12483   ins_cost(INSN_COST);
12484   ins_encode %{
12485     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12486             as_Register($shift$$reg));
12487     %}
12488   ins_pipe(ialu_reg_reg_vshift);
12489 %}
12490 
12491 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12492 %{
12493   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12494 
12495   expand %{
12496     rorL_rReg(dst, src, shift, cr);
12497   %}
12498 %}
12499 
12500 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12501 %{
12502   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12503 
12504   expand %{
12505     rorL_rReg(dst, src, shift, cr);
12506   %}
12507 %}
12508 
12509 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12510 %{
12511   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12512 
12513   expand %{
12514     rorI_rReg(dst, src, shift, cr);
12515   %}
12516 %}
12517 
12518 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12519 %{
12520   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12521 
12522   expand %{
12523     rorI_rReg(dst, src, shift, cr);
12524   %}
12525 %}
12526 
12527 // Add/subtract (extended)
12528 
12529 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12530 %{
12531   match(Set dst (AddL src1 (ConvI2L src2)));
12532   ins_cost(INSN_COST);
12533   format %{ "add  $dst, $src1, $src2, sxtw" %}
12534 
12535    ins_encode %{
12536      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12537             as_Register($src2$$reg), ext::sxtw);
12538    %}
12539   ins_pipe(ialu_reg_reg);
12540 %};
12541 
12542 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12543 %{
12544   match(Set dst (SubL src1 (ConvI2L src2)));
12545   ins_cost(INSN_COST);
12546   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12547 
12548    ins_encode %{
12549      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12550             as_Register($src2$$reg), ext::sxtw);
12551    %}
12552   ins_pipe(ialu_reg_reg);
12553 %};
12554 
12555 
12556 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12557 %{
12558   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12559   ins_cost(INSN_COST);
12560   format %{ "add  $dst, $src1, $src2, sxth" %}
12561 
12562    ins_encode %{
12563      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12564             as_Register($src2$$reg), ext::sxth);
12565    %}
12566   ins_pipe(ialu_reg_reg);
12567 %}
12568 
12569 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12570 %{
12571   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12572   ins_cost(INSN_COST);
12573   format %{ "add  $dst, $src1, $src2, sxtb" %}
12574 
12575    ins_encode %{
12576      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12577             as_Register($src2$$reg), ext::sxtb);
12578    %}
12579   ins_pipe(ialu_reg_reg);
12580 %}
12581 
12582 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12583 %{
12584   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12585   ins_cost(INSN_COST);
12586   format %{ "add  $dst, $src1, $src2, uxtb" %}
12587 
12588    ins_encode %{
12589      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12590             as_Register($src2$$reg), ext::uxtb);
12591    %}
12592   ins_pipe(ialu_reg_reg);
12593 %}
12594 
12595 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12596 %{
12597   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12598   ins_cost(INSN_COST);
12599   format %{ "add  $dst, $src1, $src2, sxth" %}
12600 
12601    ins_encode %{
12602      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12603             as_Register($src2$$reg), ext::sxth);
12604    %}
12605   ins_pipe(ialu_reg_reg);
12606 %}
12607 
12608 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12609 %{
12610   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12611   ins_cost(INSN_COST);
12612   format %{ "add  $dst, $src1, $src2, sxtw" %}
12613 
12614    ins_encode %{
12615      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12616             as_Register($src2$$reg), ext::sxtw);
12617    %}
12618   ins_pipe(ialu_reg_reg);
12619 %}
12620 
12621 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12622 %{
12623   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12624   ins_cost(INSN_COST);
12625   format %{ "add  $dst, $src1, $src2, sxtb" %}
12626 
12627    ins_encode %{
12628      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12629             as_Register($src2$$reg), ext::sxtb);
12630    %}
12631   ins_pipe(ialu_reg_reg);
12632 %}
12633 
12634 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12635 %{
12636   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12637   ins_cost(INSN_COST);
12638   format %{ "add  $dst, $src1, $src2, uxtb" %}
12639 
12640    ins_encode %{
12641      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12642             as_Register($src2$$reg), ext::uxtb);
12643    %}
12644   ins_pipe(ialu_reg_reg);
12645 %}
12646 
12647 
12648 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12649 %{
12650   match(Set dst (AddI src1 (AndI src2 mask)));
12651   ins_cost(INSN_COST);
12652   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12653 
12654    ins_encode %{
12655      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12656             as_Register($src2$$reg), ext::uxtb);
12657    %}
12658   ins_pipe(ialu_reg_reg);
12659 %}
12660 
12661 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12662 %{
12663   match(Set dst (AddI src1 (AndI src2 mask)));
12664   ins_cost(INSN_COST);
12665   format %{ "addw  $dst, $src1, $src2, uxth" %}
12666 
12667    ins_encode %{
12668      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12669             as_Register($src2$$reg), ext::uxth);
12670    %}
12671   ins_pipe(ialu_reg_reg);
12672 %}
12673 
12674 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12675 %{
12676   match(Set dst (AddL src1 (AndL src2 mask)));
12677   ins_cost(INSN_COST);
12678   format %{ "add  $dst, $src1, $src2, uxtb" %}
12679 
12680    ins_encode %{
12681      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12682             as_Register($src2$$reg), ext::uxtb);
12683    %}
12684   ins_pipe(ialu_reg_reg);
12685 %}
12686 
12687 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12688 %{
12689   match(Set dst (AddL src1 (AndL src2 mask)));
12690   ins_cost(INSN_COST);
12691   format %{ "add  $dst, $src1, $src2, uxth" %}
12692 
12693    ins_encode %{
12694      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12695             as_Register($src2$$reg), ext::uxth);
12696    %}
12697   ins_pipe(ialu_reg_reg);
12698 %}
12699 
12700 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12701 %{
12702   match(Set dst (AddL src1 (AndL src2 mask)));
12703   ins_cost(INSN_COST);
12704   format %{ "add  $dst, $src1, $src2, uxtw" %}
12705 
12706    ins_encode %{
12707      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12708             as_Register($src2$$reg), ext::uxtw);
12709    %}
12710   ins_pipe(ialu_reg_reg);
12711 %}
12712 
12713 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12714 %{
12715   match(Set dst (SubI src1 (AndI src2 mask)));
12716   ins_cost(INSN_COST);
12717   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12718 
12719    ins_encode %{
12720      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12721             as_Register($src2$$reg), ext::uxtb);
12722    %}
12723   ins_pipe(ialu_reg_reg);
12724 %}
12725 
12726 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12727 %{
12728   match(Set dst (SubI src1 (AndI src2 mask)));
12729   ins_cost(INSN_COST);
12730   format %{ "subw  $dst, $src1, $src2, uxth" %}
12731 
12732    ins_encode %{
12733      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12734             as_Register($src2$$reg), ext::uxth);
12735    %}
12736   ins_pipe(ialu_reg_reg);
12737 %}
12738 
12739 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12740 %{
12741   match(Set dst (SubL src1 (AndL src2 mask)));
12742   ins_cost(INSN_COST);
12743   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12744 
12745    ins_encode %{
12746      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12747             as_Register($src2$$reg), ext::uxtb);
12748    %}
12749   ins_pipe(ialu_reg_reg);
12750 %}
12751 
12752 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12753 %{
12754   match(Set dst (SubL src1 (AndL src2 mask)));
12755   ins_cost(INSN_COST);
12756   format %{ "sub  $dst, $src1, $src2, uxth" %}
12757 
12758    ins_encode %{
12759      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12760             as_Register($src2$$reg), ext::uxth);
12761    %}
12762   ins_pipe(ialu_reg_reg);
12763 %}
12764 
12765 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12766 %{
12767   match(Set dst (SubL src1 (AndL src2 mask)));
12768   ins_cost(INSN_COST);
12769   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12770 
12771    ins_encode %{
12772      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12773             as_Register($src2$$reg), ext::uxtw);
12774    %}
12775   ins_pipe(ialu_reg_reg);
12776 %}
12777 
12778 
12779 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12780 %{
12781   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12782   ins_cost(1.9 * INSN_COST);
12783   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12784 
12785    ins_encode %{
12786      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12787             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12788    %}
12789   ins_pipe(ialu_reg_reg_shift);
12790 %}
12791 
12792 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12793 %{
12794   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12795   ins_cost(1.9 * INSN_COST);
12796   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12797 
12798    ins_encode %{
12799      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12800             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12801    %}
12802   ins_pipe(ialu_reg_reg_shift);
12803 %}
12804 
12805 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12806 %{
12807   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12808   ins_cost(1.9 * INSN_COST);
12809   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12810 
12811    ins_encode %{
12812      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12813             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12814    %}
12815   ins_pipe(ialu_reg_reg_shift);
12816 %}
12817 
12818 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12819 %{
12820   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12821   ins_cost(1.9 * INSN_COST);
12822   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12823 
12824    ins_encode %{
12825      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12826             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12827    %}
12828   ins_pipe(ialu_reg_reg_shift);
12829 %}
12830 
12831 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12832 %{
12833   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12834   ins_cost(1.9 * INSN_COST);
12835   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12836 
12837    ins_encode %{
12838      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12839             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12840    %}
12841   ins_pipe(ialu_reg_reg_shift);
12842 %}
12843 
12844 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12845 %{
12846   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12847   ins_cost(1.9 * INSN_COST);
12848   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12849 
12850    ins_encode %{
12851      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12852             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12853    %}
12854   ins_pipe(ialu_reg_reg_shift);
12855 %}
12856 
12857 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12858 %{
12859   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12860   ins_cost(1.9 * INSN_COST);
12861   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12862 
12863    ins_encode %{
12864      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12865             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12866    %}
12867   ins_pipe(ialu_reg_reg_shift);
12868 %}
12869 
12870 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12871 %{
12872   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12873   ins_cost(1.9 * INSN_COST);
12874   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12875 
12876    ins_encode %{
12877      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12878             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12879    %}
12880   ins_pipe(ialu_reg_reg_shift);
12881 %}
12882 
12883 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12884 %{
12885   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12886   ins_cost(1.9 * INSN_COST);
12887   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12888 
12889    ins_encode %{
12890      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12891             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12892    %}
12893   ins_pipe(ialu_reg_reg_shift);
12894 %}
12895 
12896 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12897 %{
12898   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12899   ins_cost(1.9 * INSN_COST);
12900   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12901 
12902    ins_encode %{
12903      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12904             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12905    %}
12906   ins_pipe(ialu_reg_reg_shift);
12907 %}
12908 
12909 
12910 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12911 %{
12912   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12913   ins_cost(1.9 * INSN_COST);
12914   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12915 
12916    ins_encode %{
12917      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12918             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12919    %}
12920   ins_pipe(ialu_reg_reg_shift);
12921 %};
12922 
12923 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12924 %{
12925   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12926   ins_cost(1.9 * INSN_COST);
12927   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12928 
12929    ins_encode %{
12930      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12931             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12932    %}
12933   ins_pipe(ialu_reg_reg_shift);
12934 %};
12935 
12936 
12937 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12938 %{
12939   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12940   ins_cost(1.9 * INSN_COST);
12941   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12942 
12943    ins_encode %{
12944      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12945             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12946    %}
12947   ins_pipe(ialu_reg_reg_shift);
12948 %}
12949 
12950 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12951 %{
12952   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12953   ins_cost(1.9 * INSN_COST);
12954   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12955 
12956    ins_encode %{
12957      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12958             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12959    %}
12960   ins_pipe(ialu_reg_reg_shift);
12961 %}
12962 
12963 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12964 %{
12965   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12966   ins_cost(1.9 * INSN_COST);
12967   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12968 
12969    ins_encode %{
12970      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12971             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12972    %}
12973   ins_pipe(ialu_reg_reg_shift);
12974 %}
12975 
12976 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12977 %{
12978   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12979   ins_cost(1.9 * INSN_COST);
12980   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12981 
12982    ins_encode %{
12983      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12984             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12985    %}
12986   ins_pipe(ialu_reg_reg_shift);
12987 %}
12988 
12989 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12990 %{
12991   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12992   ins_cost(1.9 * INSN_COST);
12993   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12994 
12995    ins_encode %{
12996      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12997             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12998    %}
12999   ins_pipe(ialu_reg_reg_shift);
13000 %}
13001 
13002 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13003 %{
13004   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13005   ins_cost(1.9 * INSN_COST);
13006   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13007 
13008    ins_encode %{
13009      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13010             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13011    %}
13012   ins_pipe(ialu_reg_reg_shift);
13013 %}
13014 
13015 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13016 %{
13017   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13018   ins_cost(1.9 * INSN_COST);
13019   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13020 
13021    ins_encode %{
13022      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13023             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13024    %}
13025   ins_pipe(ialu_reg_reg_shift);
13026 %}
13027 
13028 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13029 %{
13030   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13031   ins_cost(1.9 * INSN_COST);
13032   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13033 
13034    ins_encode %{
13035      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13036             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13037    %}
13038   ins_pipe(ialu_reg_reg_shift);
13039 %}
13040 
13041 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13042 %{
13043   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13044   ins_cost(1.9 * INSN_COST);
13045   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13046 
13047    ins_encode %{
13048      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13049             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13050    %}
13051   ins_pipe(ialu_reg_reg_shift);
13052 %}
13053 
13054 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13055 %{
13056   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13057   ins_cost(1.9 * INSN_COST);
13058   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13059 
13060    ins_encode %{
13061      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13062             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13063    %}
13064   ins_pipe(ialu_reg_reg_shift);
13065 %}
13066 
13067 instruct reduce_and8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13068 %{
13069   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13070   match(Set dst (AndReductionV src1 src2));
13071   ins_cost(INSN_COST);
13072   effect(TEMP_DEF dst, TEMP tmp);
13073   format %{ "umov   $tmp, $src2, S, 0\n\t"
13074             "umov   $dst, $src2, S, 1\n\t"
13075             "andw   $dst, $dst, $tmp\n\t"
13076             "andw   $dst, $dst, $dst, LSR #16\n\t"
13077             "andw   $dst, $dst, $dst, LSR #8\n\t"
13078             "andw   $dst, $src1, $dst\n\t"
13079             "sxtb   $dst, $dst\t and reduction8B"
13080   %}
13081   ins_encode %{
13082     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13083     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13084     __ andw($dst$$Register, $dst$$Register, $tmp$$Register);
13085     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13086     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13087     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
13088     __ sxtb($dst$$Register, $dst$$Register);
13089   %}
13090   ins_pipe(pipe_class_default);
13091 %}
13092 
13093 instruct reduce_orr8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13094 %{
13095   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13096   match(Set dst (OrReductionV src1 src2));
13097   ins_cost(INSN_COST);
13098   effect(TEMP_DEF dst, TEMP tmp);
13099   format %{ "umov   $tmp, $src2, S, 0\n\t"
13100             "umov   $dst, $src2, S, 1\n\t"
13101             "orrw   $dst, $dst, $tmp\n\t"
13102             "orrw   $dst, $dst, $dst, LSR #16\n\t"
13103             "orrw   $dst, $dst, $dst, LSR #8\n\t"
13104             "orrw   $dst, $src1, $dst\n\t"
13105             "sxtb   $dst, $dst\t orr reduction8B"
13106   %}
13107   ins_encode %{
13108     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13109     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13110     __ orrw($dst$$Register, $dst$$Register, $tmp$$Register);
13111     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13112     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13113     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
13114     __ sxtb($dst$$Register, $dst$$Register);
13115   %}
13116   ins_pipe(pipe_class_default);
13117 %}
13118 
13119 instruct reduce_eor8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13120 %{
13121   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13122   match(Set dst (XorReductionV src1 src2));
13123   ins_cost(INSN_COST);
13124   effect(TEMP_DEF dst, TEMP tmp);
13125   format %{ "umov   $tmp, $src2, S, 0\n\t"
13126             "umov   $dst, $src2, S, 1\n\t"
13127             "eorw   $dst, $dst, $tmp\n\t"
13128             "eorw   $dst, $dst, $dst, LSR #16\n\t"
13129             "eorw   $dst, $dst, $dst, LSR #8\n\t"
13130             "eorw   $dst, $src1, $dst\n\t"
13131             "sxtb   $dst, $dst\t eor reduction8B"
13132   %}
13133   ins_encode %{
13134     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13135     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13136     __ eorw($dst$$Register, $dst$$Register, $tmp$$Register);
13137     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13138     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13139     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
13140     __ sxtb($dst$$Register, $dst$$Register);
13141   %}
13142   ins_pipe(pipe_class_default);
13143 %}
13144 
13145 instruct reduce_and16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13146 %{
13147   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13148   match(Set dst (AndReductionV src1 src2));
13149   ins_cost(INSN_COST);
13150   effect(TEMP_DEF dst, TEMP tmp);
13151   format %{ "umov   $tmp, $src2, D, 0\n\t"
13152             "umov   $dst, $src2, D, 1\n\t"
13153             "andr   $dst, $dst, $tmp\n\t"
13154             "andr   $dst, $dst, $dst, LSR #32\n\t"
13155             "andw   $dst, $dst, $dst, LSR #16\n\t"
13156             "andw   $dst, $dst, $dst, LSR #8\n\t"
13157             "andw   $dst, $src1, $dst\n\t"
13158             "sxtb   $dst, $dst\t and reduction16B"
13159   %}
13160   ins_encode %{
13161     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13162     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13163     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
13164     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13165     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13166     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13167     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
13168     __ sxtb($dst$$Register, $dst$$Register);
13169   %}
13170   ins_pipe(pipe_class_default);
13171 %}
13172 
13173 instruct reduce_orr16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13174 %{
13175   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13176   match(Set dst (OrReductionV src1 src2));
13177   ins_cost(INSN_COST);
13178   effect(TEMP_DEF dst, TEMP tmp);
13179   format %{ "umov   $tmp, $src2, D, 0\n\t"
13180             "umov   $dst, $src2, D, 1\n\t"
13181             "orr    $dst, $dst, $tmp\n\t"
13182             "orr    $dst, $dst, $dst, LSR #32\n\t"
13183             "orrw   $dst, $dst, $dst, LSR #16\n\t"
13184             "orrw   $dst, $dst, $dst, LSR #8\n\t"
13185             "orrw   $dst, $src1, $dst\n\t"
13186             "sxtb   $dst, $dst\t orr reduction16B"
13187   %}
13188   ins_encode %{
13189     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13190     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13191     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
13192     __ orr ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13193     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13194     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13195     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
13196     __ sxtb($dst$$Register, $dst$$Register);
13197   %}
13198   ins_pipe(pipe_class_default);
13199 %}
13200 
13201 instruct reduce_eor16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13202 %{
13203   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13204   match(Set dst (XorReductionV src1 src2));
13205   ins_cost(INSN_COST);
13206   effect(TEMP_DEF dst, TEMP tmp);
13207   format %{ "umov   $tmp, $src2, D, 0\n\t"
13208             "umov   $dst, $src2, D, 1\n\t"
13209             "eor    $dst, $dst, $tmp\n\t"
13210             "eor    $dst, $dst, $dst, LSR #32\n\t"
13211             "eorw   $dst, $dst, $dst, LSR #16\n\t"
13212             "eorw   $dst, $dst, $dst, LSR #8\n\t"
13213             "eorw   $dst, $src1, $dst\n\t"
13214             "sxtb   $dst, $dst\t eor reduction16B"
13215   %}
13216   ins_encode %{
13217     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13218     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13219     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
13220     __ eor ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13221     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13222     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
13223     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
13224     __ sxtb($dst$$Register, $dst$$Register);
13225   %}
13226   ins_pipe(pipe_class_default);
13227 %}
13228 
13229 instruct reduce_and4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13230 %{
13231   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13232   match(Set dst (AndReductionV src1 src2));
13233   ins_cost(INSN_COST);
13234   effect(TEMP_DEF dst, TEMP tmp);
13235   format %{ "umov   $tmp, $src2, S, 0\n\t"
13236             "umov   $dst, $src2, S, 1\n\t"
13237             "andw   $dst, $dst, $tmp\n\t"
13238             "andw   $dst, $dst, $dst, LSR #16\n\t"
13239             "andw   $dst, $src1, $dst\n\t"
13240             "sxth   $dst, $dst\t and reduction4S"
13241   %}
13242   ins_encode %{
13243     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13244     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13245     __ andw($dst$$Register, $dst$$Register, $tmp$$Register);
13246     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13247     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
13248     __ sxth($dst$$Register, $dst$$Register);
13249   %}
13250   ins_pipe(pipe_class_default);
13251 %}
13252 
13253 instruct reduce_orr4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13254 %{
13255   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13256   match(Set dst (OrReductionV src1 src2));
13257   ins_cost(INSN_COST);
13258   effect(TEMP_DEF dst, TEMP tmp);
13259   format %{ "umov   $tmp, $src2, S, 0\n\t"
13260             "umov   $dst, $src2, S, 1\n\t"
13261             "orrw   $dst, $dst, $tmp\n\t"
13262             "orrw   $dst, $dst, $dst, LSR #16\n\t"
13263             "orrw   $dst, $src1, $dst\n\t"
13264             "sxth   $dst, $dst\t orr reduction4S"
13265   %}
13266   ins_encode %{
13267     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13268     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13269     __ orrw($dst$$Register, $dst$$Register, $tmp$$Register);
13270     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13271     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
13272     __ sxth($dst$$Register, $dst$$Register);
13273   %}
13274   ins_pipe(pipe_class_default);
13275 %}
13276 
13277 instruct reduce_eor4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13278 %{
13279   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13280   match(Set dst (XorReductionV src1 src2));
13281   ins_cost(INSN_COST);
13282   effect(TEMP_DEF dst, TEMP tmp);
13283   format %{ "umov   $tmp, $src2, S, 0\n\t"
13284             "umov   $dst, $src2, S, 1\n\t"
13285             "eorw   $dst, $dst, $tmp\n\t"
13286             "eorw   $dst, $dst, $dst, LSR #16\n\t"
13287             "eorw   $dst, $src1, $dst\n\t"
13288             "sxth   $dst, $dst\t eor reduction4S"
13289   %}
13290   ins_encode %{
13291     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13292     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13293     __ eorw($dst$$Register, $dst$$Register, $tmp$$Register);
13294     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13295     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
13296     __ sxth($dst$$Register, $dst$$Register);
13297   %}
13298   ins_pipe(pipe_class_default);
13299 %}
13300 
13301 instruct reduce_and8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13302 %{
13303   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13304   match(Set dst (AndReductionV src1 src2));
13305   ins_cost(INSN_COST);
13306   effect(TEMP_DEF dst, TEMP tmp);
13307   format %{ "umov   $tmp, $src2, D, 0\n\t"
13308             "umov   $dst, $src2, D, 1\n\t"
13309             "andr   $dst, $dst, $tmp\n\t"
13310             "andr   $dst, $dst, $dst, LSR #32\n\t"
13311             "andw   $dst, $dst, $dst, LSR #16\n\t"
13312             "andw   $dst, $src1, $dst\n\t"
13313             "sxth   $dst, $dst\t and reduction8S"
13314   %}
13315   ins_encode %{
13316     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13317     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13318     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
13319     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13320     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13321     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
13322     __ sxth($dst$$Register, $dst$$Register);
13323   %}
13324   ins_pipe(pipe_class_default);
13325 %}
13326 
13327 instruct reduce_orr8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13328 %{
13329   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13330   match(Set dst (OrReductionV src1 src2));
13331   ins_cost(INSN_COST);
13332   effect(TEMP_DEF dst, TEMP tmp);
13333   format %{ "umov   $tmp, $src2, D, 0\n\t"
13334             "umov   $dst, $src2, D, 1\n\t"
13335             "orr    $dst, $dst, $tmp\n\t"
13336             "orr    $dst, $dst, $dst, LSR #32\n\t"
13337             "orrw   $dst, $dst, $dst, LSR #16\n\t"
13338             "orrw   $dst, $src1, $dst\n\t"
13339             "sxth   $dst, $dst\t orr reduction8S"
13340   %}
13341   ins_encode %{
13342     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13343     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13344     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
13345     __ orr ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13346     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13347     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
13348     __ sxth($dst$$Register, $dst$$Register);
13349   %}
13350   ins_pipe(pipe_class_default);
13351 %}
13352 
13353 instruct reduce_eor8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13354 %{
13355   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13356   match(Set dst (XorReductionV src1 src2));
13357   ins_cost(INSN_COST);
13358   effect(TEMP_DEF dst, TEMP tmp);
13359   format %{ "umov   $tmp, $src2, D, 0\n\t"
13360             "umov   $dst, $src2, D, 1\n\t"
13361             "eor    $dst, $dst, $tmp\n\t"
13362             "eor    $dst, $dst, $dst, LSR #32\n\t"
13363             "eorw   $dst, $dst, $dst, LSR #16\n\t"
13364             "eorw   $dst, $src1, $dst\n\t"
13365             "sxth   $dst, $dst\t eor reduction8S"
13366   %}
13367   ins_encode %{
13368     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13369     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13370     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
13371     __ eor ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13372     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
13373     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
13374     __ sxth($dst$$Register, $dst$$Register);
13375   %}
13376   ins_pipe(pipe_class_default);
13377 %}
13378 
13379 instruct reduce_and2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13380 %{
13381   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13382   match(Set dst (AndReductionV src1 src2));
13383   ins_cost(INSN_COST);
13384   effect(TEMP_DEF dst, TEMP tmp);
13385   format %{ "umov  $tmp, $src2, S, 0\n\t"
13386             "andw  $dst, $tmp, $src1\n\t"
13387             "umov  $tmp, $src2, S, 1\n\t"
13388             "andw  $dst, $tmp, $dst\t and reduction2I"
13389   %}
13390   ins_encode %{
13391     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13392     __ andw($dst$$Register, $tmp$$Register, $src1$$Register);
13393     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13394     __ andw($dst$$Register, $tmp$$Register, $dst$$Register);
13395   %}
13396   ins_pipe(pipe_class_default);
13397 %}
13398 
13399 instruct reduce_orr2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13400 %{
13401   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13402   match(Set dst (OrReductionV src1 src2));
13403   ins_cost(INSN_COST);
13404   effect(TEMP_DEF dst, TEMP tmp);
13405   format %{ "umov  $tmp, $src2, S, 0\n\t"
13406             "orrw  $dst, $tmp, $src1\n\t"
13407             "umov  $tmp, $src2, S, 1\n\t"
13408             "orrw  $dst, $tmp, $dst\t orr reduction2I"
13409   %}
13410   ins_encode %{
13411     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13412     __ orrw($dst$$Register, $tmp$$Register, $src1$$Register);
13413     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13414     __ orrw($dst$$Register, $tmp$$Register, $dst$$Register);
13415   %}
13416   ins_pipe(pipe_class_default);
13417 %}
13418 
13419 instruct reduce_eor2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
13420 %{
13421   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13422   match(Set dst (XorReductionV src1 src2));
13423   ins_cost(INSN_COST);
13424   effect(TEMP_DEF dst, TEMP tmp);
13425   format %{ "umov  $tmp, $src2, S, 0\n\t"
13426             "eorw  $dst, $tmp, $src1\n\t"
13427             "umov  $tmp, $src2, S, 1\n\t"
13428             "eorw  $dst, $tmp, $dst\t eor reduction2I"
13429   %}
13430   ins_encode %{
13431     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13432     __ eorw($dst$$Register, $tmp$$Register, $src1$$Register);
13433     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13434     __ eorw($dst$$Register, $tmp$$Register, $dst$$Register);
13435   %}
13436   ins_pipe(pipe_class_default);
13437 %}
13438 
13439 instruct reduce_and4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13440 %{
13441   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13442   match(Set dst (AndReductionV src1 src2));
13443   ins_cost(INSN_COST);
13444   effect(TEMP_DEF dst, TEMP tmp);
13445   format %{ "umov   $tmp, $src2, D, 0\n\t"
13446             "umov   $dst, $src2, D, 1\n\t"
13447             "andr   $dst, $dst, $tmp\n\t"
13448             "andr   $dst, $dst, $dst, LSR #32\n\t"
13449             "andw   $dst, $src1, $dst\t and reduction4I"
13450   %}
13451   ins_encode %{
13452     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13453     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13454     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
13455     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13456     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
13457   %}
13458   ins_pipe(pipe_class_default);
13459 %}
13460 
13461 instruct reduce_orr4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13462 %{
13463   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13464   match(Set dst (OrReductionV src1 src2));
13465   ins_cost(INSN_COST);
13466   effect(TEMP_DEF dst, TEMP tmp);
13467   format %{ "umov   $tmp, $src2, D, 0\n\t"
13468             "umov   $dst, $src2, D, 1\n\t"
13469             "orr    $dst, $dst, $tmp\n\t"
13470             "orr    $dst, $dst, $dst, LSR #32\n\t"
13471             "orrw   $dst, $src1, $dst\t orr reduction4I"
13472   %}
13473   ins_encode %{
13474     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13475     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13476     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
13477     __ orr ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13478     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
13479   %}
13480   ins_pipe(pipe_class_default);
13481 %}
13482 
13483 instruct reduce_eor4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
13484 %{
13485   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
13486   match(Set dst (XorReductionV src1 src2));
13487   ins_cost(INSN_COST);
13488   effect(TEMP_DEF dst, TEMP tmp);
13489   format %{ "umov   $tmp, $src2, D, 0\n\t"
13490             "umov   $dst, $src2, D, 1\n\t"
13491             "eor    $dst, $dst, $tmp\n\t"
13492             "eor    $dst, $dst, $dst, LSR #32\n\t"
13493             "eorw   $dst, $src1, $dst\t eor reduction4I"
13494   %}
13495   ins_encode %{
13496     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13497     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13498     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
13499     __ eor ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
13500     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
13501   %}
13502   ins_pipe(pipe_class_default);
13503 %}
13504 
13505 instruct reduce_and2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
13506 %{
13507   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
13508   match(Set dst (AndReductionV src1 src2));
13509   ins_cost(INSN_COST);
13510   effect(TEMP_DEF dst, TEMP tmp);
13511   format %{ "umov  $tmp, $src2, D, 0\n\t"
13512             "andr  $dst, $src1, $tmp\n\t"
13513             "umov  $tmp, $src2, D, 1\n\t"
13514             "andr  $dst, $dst, $tmp\t and reduction2L"
13515   %}
13516   ins_encode %{
13517     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13518     __ andr($dst$$Register, $src1$$Register, $tmp$$Register);
13519     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13520     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
13521   %}
13522   ins_pipe(pipe_class_default);
13523 %}
13524 
13525 instruct reduce_orr2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
13526 %{
13527   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
13528   match(Set dst (OrReductionV src1 src2));
13529   ins_cost(INSN_COST);
13530   effect(TEMP_DEF dst, TEMP tmp);
13531   format %{ "umov  $tmp, $src2, D, 0\n\t"
13532             "orr   $dst, $src1, $tmp\n\t"
13533             "umov  $tmp, $src2, D, 1\n\t"
13534             "orr   $dst, $dst, $tmp\t orr reduction2L"
13535   %}
13536   ins_encode %{
13537     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13538     __ orr ($dst$$Register, $src1$$Register, $tmp$$Register);
13539     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13540     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
13541   %}
13542   ins_pipe(pipe_class_default);
13543 %}
13544 
13545 instruct reduce_eor2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
13546 %{
13547   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
13548   match(Set dst (XorReductionV src1 src2));
13549   ins_cost(INSN_COST);
13550   effect(TEMP_DEF dst, TEMP tmp);
13551   format %{ "umov  $tmp, $src2, D, 0\n\t"
13552             "eor   $dst, $src1, $tmp\n\t"
13553             "umov  $tmp, $src2, D, 1\n\t"
13554             "eor   $dst, $dst, $tmp\t eor reduction2L"
13555   %}
13556   ins_encode %{
13557     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
13558     __ eor ($dst$$Register, $src1$$Register, $tmp$$Register);
13559     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
13560     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
13561   %}
13562   ins_pipe(pipe_class_default);
13563 %}
13564 
13565 // ------------------------------ Vector insert ---------------------------------
13566 
13567 instruct insert8B(vecD dst, vecD src, iRegIorL2I val, immI idx)
13568 %{
13569   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13570   match(Set dst (VectorInsert (Binary src val) idx));
13571   ins_cost(INSN_COST);
13572   format %{ "orr    $dst, T8B, $src, $src\n\t"
13573             "mov    $dst, T8B, $idx, $val\t# insert into vector(8B)" %}
13574   ins_encode %{
13575     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13576       __ orr(as_FloatRegister($dst$$reg), __ T8B,
13577              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13578     }
13579     __ mov(as_FloatRegister($dst$$reg), __ T8B, $idx$$constant, $val$$Register);
13580   %}
13581   ins_pipe(pipe_class_default);
13582 %}
13583 
13584 instruct insert16B(vecX dst, vecX src, iRegIorL2I val, immI idx)
13585 %{
13586   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
13587   match(Set dst (VectorInsert (Binary src val) idx));
13588   ins_cost(INSN_COST);
13589   format %{ "orr    $dst, T16B, $src, $src\n\t"
13590             "mov    $dst, T16B, $idx, $val\t# insert into vector(16B)" %}
13591   ins_encode %{
13592     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13593       __ orr(as_FloatRegister($dst$$reg), __ T16B,
13594              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13595     }
13596     __ mov(as_FloatRegister($dst$$reg), __ T16B, $idx$$constant, $val$$Register);
13597   %}
13598   ins_pipe(pipe_class_default);
13599 %}
13600 
13601 instruct insert4S(vecD dst, vecD src, iRegIorL2I val, immI idx)
13602 %{
13603   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13604   match(Set dst (VectorInsert (Binary src val) idx));
13605   ins_cost(INSN_COST);
13606   format %{ "orr    $dst, T8B, $src, $src\n\t"
13607             "mov    $dst, T4H, $idx, $val\t# insert into vector(4S)" %}
13608   ins_encode %{
13609     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13610       __ orr(as_FloatRegister($dst$$reg), __ T8B,
13611              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13612     }
13613     __ mov(as_FloatRegister($dst$$reg), __ T4H, $idx$$constant, $val$$Register);
13614   %}
13615   ins_pipe(pipe_class_default);
13616 %}
13617 
13618 instruct insert8S(vecX dst, vecX src, iRegIorL2I val, immI idx)
13619 %{
13620   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
13621   match(Set dst (VectorInsert (Binary src val) idx));
13622   ins_cost(INSN_COST);
13623   format %{ "orr    $dst, T16B, $src, $src\n\t"
13624             "mov    $dst, T8H, $idx, $val\t# insert into vector(8S)" %}
13625   ins_encode %{
13626     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13627       __ orr(as_FloatRegister($dst$$reg), __ T16B,
13628              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13629     }
13630     __ mov(as_FloatRegister($dst$$reg), __ T8H, $idx$$constant, $val$$Register);
13631   %}
13632   ins_pipe(pipe_class_default);
13633 %}
13634 
13635 instruct insert2I(vecD dst, vecD src, iRegIorL2I val, immI idx)
13636 %{
13637   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_INT);
13638   match(Set dst (VectorInsert (Binary src val) idx));
13639   ins_cost(INSN_COST);
13640   format %{ "orr    $dst, T8B, $src, $src\n\t"
13641             "mov    $dst, T2S, $idx, $val\t# insert into vector(2I)" %}
13642   ins_encode %{
13643     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13644       __ orr(as_FloatRegister($dst$$reg), __ T8B,
13645              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13646     }
13647     __ mov(as_FloatRegister($dst$$reg), __ T2S, $idx$$constant, $val$$Register);
13648   %}
13649   ins_pipe(pipe_class_default);
13650 %}
13651 
13652 instruct insert4I(vecX dst, vecX src, iRegIorL2I val, immI idx)
13653 %{
13654   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_INT);
13655   match(Set dst (VectorInsert (Binary src val) idx));
13656   ins_cost(INSN_COST);
13657   format %{ "orr    $dst, T16B, $src, $src\n\t"
13658             "mov    $dst, T4S, $idx, $val\t# insert into vector(4I)" %}
13659   ins_encode %{
13660     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13661       __ orr(as_FloatRegister($dst$$reg), __ T16B,
13662              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13663     }
13664     __ mov(as_FloatRegister($dst$$reg), __ T4S, $idx$$constant, $val$$Register);
13665   %}
13666   ins_pipe(pipe_class_default);
13667 %}
13668 
13669 instruct insert2L(vecX dst, vecX src, iRegL val, immI idx)
13670 %{
13671   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
13672   match(Set dst (VectorInsert (Binary src val) idx));
13673   ins_cost(INSN_COST);
13674   format %{ "orr    $dst, T16B, $src, $src\n\t"
13675             "mov    $dst, T2D, $idx, $val\t# insert into vector(2L)" %}
13676   ins_encode %{
13677     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
13678       __ orr(as_FloatRegister($dst$$reg), __ T16B,
13679              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13680     }
13681     __ mov(as_FloatRegister($dst$$reg), __ T2D, $idx$$constant, $val$$Register);
13682   %}
13683   ins_pipe(pipe_class_default);
13684 %}
13685 
13686 instruct insert2F(vecD dst, vecD src, vRegF val, immI idx)
13687 %{
13688   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
13689   match(Set dst (VectorInsert (Binary src val) idx));
13690   ins_cost(INSN_COST);
13691   effect(TEMP_DEF dst);
13692   format %{ "orr    $dst, T8B, $src, $src\n\t"
13693             "ins    $dst, S, $val, $idx, 0\t# insert into vector(2F)" %}
13694   ins_encode %{
13695     __ orr(as_FloatRegister($dst$$reg), __ T8B,
13696            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13697     __ ins(as_FloatRegister($dst$$reg), __ S,
13698            as_FloatRegister($val$$reg), $idx$$constant, 0);
13699   %}
13700   ins_pipe(pipe_class_default);
13701 %}
13702 
13703 instruct insert4F(vecX dst, vecX src, vRegF val, immI idx)
13704 %{
13705   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
13706   match(Set dst (VectorInsert (Binary src val) idx));
13707   ins_cost(INSN_COST);
13708   effect(TEMP_DEF dst);
13709   format %{ "orr    $dst, T16B, $src, $src\n\t"
13710             "ins    $dst, S, $val, $idx, 0\t# insert into vector(4F)" %}
13711   ins_encode %{
13712     __ orr(as_FloatRegister($dst$$reg), __ T16B,
13713            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13714     __ ins(as_FloatRegister($dst$$reg), __ S,
13715            as_FloatRegister($val$$reg), $idx$$constant, 0);
13716   %}
13717   ins_pipe(pipe_class_default);
13718 %}
13719 
13720 instruct insert2D(vecX dst, vecX src, vRegD val, immI idx)
13721 %{
13722   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
13723   match(Set dst (VectorInsert (Binary src val) idx));
13724   ins_cost(INSN_COST);
13725   effect(TEMP_DEF dst);
13726   format %{ "orr    $dst, T16B, $src, $src\n\t"
13727             "ins    $dst, D, $val, $idx, 0\t# insert into vector(2D)" %}
13728   ins_encode %{
13729     __ orr(as_FloatRegister($dst$$reg), __ T16B,
13730            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
13731     __ ins(as_FloatRegister($dst$$reg), __ D,
13732            as_FloatRegister($val$$reg), $idx$$constant, 0);
13733   %}
13734   ins_pipe(pipe_class_default);
13735 %}
13736 
13737 // ------------------------------ Vector extract ---------------------------------
13738 
13739 instruct extract8B(iRegINoSp dst, vecD src, immI idx)
13740 %{
13741   predicate(n->in(1)->bottom_type()->is_vect()->length() == 8);
13742   match(Set dst (ExtractB src idx));
13743   ins_cost(INSN_COST);
13744   format %{ "smov    $dst, $src, B, $idx\t# extract from vector(8B)" %}
13745   ins_encode %{
13746     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ B, $idx$$constant);
13747   %}
13748   ins_pipe(pipe_class_default);
13749 %}
13750 
13751 instruct extract16B(iRegINoSp dst, vecX src, immI idx)
13752 %{
13753   predicate(n->in(1)->bottom_type()->is_vect()->length() == 16);
13754   match(Set dst (ExtractB src idx));
13755   ins_cost(INSN_COST);
13756   format %{ "smov    $dst, $src, B, $idx\t# extract from vector(16B)" %}
13757   ins_encode %{
13758     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ B, $idx$$constant);
13759   %}
13760   ins_pipe(pipe_class_default);
13761 %}
13762 
13763 instruct extract4S(iRegINoSp dst, vecD src, immI idx)
13764 %{
13765   predicate(n->in(1)->bottom_type()->is_vect()->length() == 4);
13766   match(Set dst (ExtractS src idx));
13767   ins_cost(INSN_COST);
13768   format %{ "smov    $dst, $src, H, $idx\t# extract from vector(4S)" %}
13769   ins_encode %{
13770     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ H, $idx$$constant);
13771   %}
13772   ins_pipe(pipe_class_default);
13773 %}
13774 
13775 instruct extract8S(iRegINoSp dst, vecX src, immI idx)
13776 %{
13777   predicate(n->in(1)->bottom_type()->is_vect()->length() == 8);
13778   match(Set dst (ExtractS src idx));
13779   ins_cost(INSN_COST);
13780   format %{ "smov    $dst, $src, H, $idx\t# extract from vector(8S)" %}
13781   ins_encode %{
13782     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ H, $idx$$constant);
13783   %}
13784   ins_pipe(pipe_class_default);
13785 %}
13786 
13787 instruct extract2I(iRegINoSp dst, vecD src, immI idx)
13788 %{
13789   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
13790   match(Set dst (ExtractI src idx));
13791   ins_cost(INSN_COST);
13792   format %{ "umov    $dst, $src, S, $idx\t# extract from vector(2I)" %}
13793   ins_encode %{
13794     __ umov($dst$$Register, as_FloatRegister($src$$reg), __ S, $idx$$constant);
13795   %}
13796   ins_pipe(pipe_class_default);
13797 %}
13798 
13799 instruct extract4I(iRegINoSp dst, vecX src, immI idx)
13800 %{
13801   predicate(n->in(1)->bottom_type()->is_vect()->length() == 4);
13802   match(Set dst (ExtractI src idx));
13803   ins_cost(INSN_COST);
13804   format %{ "umov    $dst, $src, S, $idx\t# extract from vector(4I)" %}
13805   ins_encode %{
13806     __ umov($dst$$Register, as_FloatRegister($src$$reg), __ S, $idx$$constant);
13807   %}
13808   ins_pipe(pipe_class_default);
13809 %}
13810 
13811 instruct extract2L(iRegLNoSp dst, vecX src, immI idx)
13812 %{
13813   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
13814   match(Set dst (ExtractL src idx));
13815   ins_cost(INSN_COST);
13816   format %{ "umov    $dst, $src, D, $idx\t# extract from vector(2L)" %}
13817   ins_encode %{
13818     __ umov($dst$$Register, as_FloatRegister($src$$reg), __ D, $idx$$constant);
13819   %}
13820   ins_pipe(pipe_class_default);
13821 %}
13822 
13823 instruct extract2F(vRegF dst, vecD src, immI idx)
13824 %{
13825   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
13826   match(Set dst (ExtractF src idx));
13827   ins_cost(INSN_COST);
13828   format %{ "ins   $dst, S, $src, 0, $idx\t# extract from vector(2F)" %}
13829   ins_encode %{
13830     __ ins(as_FloatRegister($dst$$reg), __ S,
13831            as_FloatRegister($src$$reg), 0, $idx$$constant);
13832   %}
13833   ins_pipe(pipe_class_default);
13834 %}
13835 
13836 instruct extract4F(vRegF dst, vecX src, immI idx)
13837 %{
13838   predicate(n->in(1)->bottom_type()->is_vect()->length() == 4);
13839   match(Set dst (ExtractF src idx));
13840   ins_cost(INSN_COST);
13841   format %{ "ins   $dst, S, $src, 0, $idx\t# extract from vector(4F)" %}
13842   ins_encode %{
13843     __ ins(as_FloatRegister($dst$$reg), __ S,
13844            as_FloatRegister($src$$reg), 0, $idx$$constant);
13845   %}
13846   ins_pipe(pipe_class_default);
13847 %}
13848 
13849 instruct extract2D(vRegD dst, vecX src, immI idx)
13850 %{
13851   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
13852   match(Set dst (ExtractD src idx));
13853   ins_cost(INSN_COST);
13854   format %{ "ins   $dst, D, $src, 0, $idx\t# extract from vector(2D)" %}
13855   ins_encode %{
13856     __ ins(as_FloatRegister($dst$$reg), __ D,
13857            as_FloatRegister($src$$reg), 0, $idx$$constant);
13858   %}
13859   ins_pipe(pipe_class_default);
13860 %}
13861 // END This section of the file is automatically generated. Do not edit --------------
13862 
13863 // ============================================================================
13864 // Floating Point Arithmetic Instructions
13865 
13866 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13867   match(Set dst (AddF src1 src2));
13868 
13869   ins_cost(INSN_COST * 5);
13870   format %{ "fadds   $dst, $src1, $src2" %}
13871 
13872   ins_encode %{
13873     __ fadds(as_FloatRegister($dst$$reg),
13874              as_FloatRegister($src1$$reg),
13875              as_FloatRegister($src2$$reg));
13876   %}
13877 
13878   ins_pipe(fp_dop_reg_reg_s);
13879 %}
13880 
13881 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13882   match(Set dst (AddD src1 src2));
13883 
13884   ins_cost(INSN_COST * 5);
13885   format %{ "faddd   $dst, $src1, $src2" %}
13886 
13887   ins_encode %{
13888     __ faddd(as_FloatRegister($dst$$reg),
13889              as_FloatRegister($src1$$reg),
13890              as_FloatRegister($src2$$reg));
13891   %}
13892 
13893   ins_pipe(fp_dop_reg_reg_d);
13894 %}
13895 
13896 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13897   match(Set dst (SubF src1 src2));
13898 
13899   ins_cost(INSN_COST * 5);
13900   format %{ "fsubs   $dst, $src1, $src2" %}
13901 
13902   ins_encode %{
13903     __ fsubs(as_FloatRegister($dst$$reg),
13904              as_FloatRegister($src1$$reg),
13905              as_FloatRegister($src2$$reg));
13906   %}
13907 
13908   ins_pipe(fp_dop_reg_reg_s);
13909 %}
13910 
13911 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13912   match(Set dst (SubD src1 src2));
13913 
13914   ins_cost(INSN_COST * 5);
13915   format %{ "fsubd   $dst, $src1, $src2" %}
13916 
13917   ins_encode %{
13918     __ fsubd(as_FloatRegister($dst$$reg),
13919              as_FloatRegister($src1$$reg),
13920              as_FloatRegister($src2$$reg));
13921   %}
13922 
13923   ins_pipe(fp_dop_reg_reg_d);
13924 %}
13925 
13926 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13927   match(Set dst (MulF src1 src2));
13928 
13929   ins_cost(INSN_COST * 6);
13930   format %{ "fmuls   $dst, $src1, $src2" %}
13931 
13932   ins_encode %{
13933     __ fmuls(as_FloatRegister($dst$$reg),
13934              as_FloatRegister($src1$$reg),
13935              as_FloatRegister($src2$$reg));
13936   %}
13937 
13938   ins_pipe(fp_dop_reg_reg_s);
13939 %}
13940 
13941 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13942   match(Set dst (MulD src1 src2));
13943 
13944   ins_cost(INSN_COST * 6);
13945   format %{ "fmuld   $dst, $src1, $src2" %}
13946 
13947   ins_encode %{
13948     __ fmuld(as_FloatRegister($dst$$reg),
13949              as_FloatRegister($src1$$reg),
13950              as_FloatRegister($src2$$reg));
13951   %}
13952 
13953   ins_pipe(fp_dop_reg_reg_d);
13954 %}
13955 
13956 // src1 * src2 + src3
13957 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13958   predicate(UseFMA);
13959   match(Set dst (FmaF src3 (Binary src1 src2)));
13960 
13961   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13962 
13963   ins_encode %{
13964     __ fmadds(as_FloatRegister($dst$$reg),
13965              as_FloatRegister($src1$$reg),
13966              as_FloatRegister($src2$$reg),
13967              as_FloatRegister($src3$$reg));
13968   %}
13969 
13970   ins_pipe(pipe_class_default);
13971 %}
13972 
13973 // src1 * src2 + src3
13974 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13975   predicate(UseFMA);
13976   match(Set dst (FmaD src3 (Binary src1 src2)));
13977 
13978   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13979 
13980   ins_encode %{
13981     __ fmaddd(as_FloatRegister($dst$$reg),
13982              as_FloatRegister($src1$$reg),
13983              as_FloatRegister($src2$$reg),
13984              as_FloatRegister($src3$$reg));
13985   %}
13986 
13987   ins_pipe(pipe_class_default);
13988 %}
13989 
13990 // -src1 * src2 + src3
13991 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13992   predicate(UseFMA);
13993   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
13994   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13995 
13996   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13997 
13998   ins_encode %{
13999     __ fmsubs(as_FloatRegister($dst$$reg),
14000               as_FloatRegister($src1$$reg),
14001               as_FloatRegister($src2$$reg),
14002               as_FloatRegister($src3$$reg));
14003   %}
14004 
14005   ins_pipe(pipe_class_default);
14006 %}
14007 
14008 // -src1 * src2 + src3
14009 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14010   predicate(UseFMA);
14011   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
14012   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
14013 
14014   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
14015 
14016   ins_encode %{
14017     __ fmsubd(as_FloatRegister($dst$$reg),
14018               as_FloatRegister($src1$$reg),
14019               as_FloatRegister($src2$$reg),
14020               as_FloatRegister($src3$$reg));
14021   %}
14022 
14023   ins_pipe(pipe_class_default);
14024 %}
14025 
14026 // -src1 * src2 - src3
14027 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
14028   predicate(UseFMA);
14029   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
14030   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
14031 
14032   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
14033 
14034   ins_encode %{
14035     __ fnmadds(as_FloatRegister($dst$$reg),
14036                as_FloatRegister($src1$$reg),
14037                as_FloatRegister($src2$$reg),
14038                as_FloatRegister($src3$$reg));
14039   %}
14040 
14041   ins_pipe(pipe_class_default);
14042 %}
14043 
14044 // -src1 * src2 - src3
14045 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
14046   predicate(UseFMA);
14047   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
14048   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
14049 
14050   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
14051 
14052   ins_encode %{
14053     __ fnmaddd(as_FloatRegister($dst$$reg),
14054                as_FloatRegister($src1$$reg),
14055                as_FloatRegister($src2$$reg),
14056                as_FloatRegister($src3$$reg));
14057   %}
14058 
14059   ins_pipe(pipe_class_default);
14060 %}
14061 
14062 // src1 * src2 - src3
14063 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
14064   predicate(UseFMA);
14065   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
14066 
14067   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
14068 
14069   ins_encode %{
14070     __ fnmsubs(as_FloatRegister($dst$$reg),
14071                as_FloatRegister($src1$$reg),
14072                as_FloatRegister($src2$$reg),
14073                as_FloatRegister($src3$$reg));
14074   %}
14075 
14076   ins_pipe(pipe_class_default);
14077 %}
14078 
14079 // src1 * src2 - src3
14080 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
14081   predicate(UseFMA);
14082   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
14083 
14084   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
14085 
14086   ins_encode %{
14087   // n.b. insn name should be fnmsubd
14088     __ fnmsub(as_FloatRegister($dst$$reg),
14089               as_FloatRegister($src1$$reg),
14090               as_FloatRegister($src2$$reg),
14091               as_FloatRegister($src3$$reg));
14092   %}
14093 
14094   ins_pipe(pipe_class_default);
14095 %}
14096 
14097 
14098 // Math.max(FF)F
14099 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14100   match(Set dst (MaxF src1 src2));
14101 
14102   format %{ "fmaxs   $dst, $src1, $src2" %}
14103   ins_encode %{
14104     __ fmaxs(as_FloatRegister($dst$$reg),
14105              as_FloatRegister($src1$$reg),
14106              as_FloatRegister($src2$$reg));
14107   %}
14108 
14109   ins_pipe(fp_dop_reg_reg_s);
14110 %}
14111 
14112 // Math.min(FF)F
14113 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14114   match(Set dst (MinF src1 src2));
14115 
14116   format %{ "fmins   $dst, $src1, $src2" %}
14117   ins_encode %{
14118     __ fmins(as_FloatRegister($dst$$reg),
14119              as_FloatRegister($src1$$reg),
14120              as_FloatRegister($src2$$reg));
14121   %}
14122 
14123   ins_pipe(fp_dop_reg_reg_s);
14124 %}
14125 
14126 // Math.max(DD)D
14127 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14128   match(Set dst (MaxD src1 src2));
14129 
14130   format %{ "fmaxd   $dst, $src1, $src2" %}
14131   ins_encode %{
14132     __ fmaxd(as_FloatRegister($dst$$reg),
14133              as_FloatRegister($src1$$reg),
14134              as_FloatRegister($src2$$reg));
14135   %}
14136 
14137   ins_pipe(fp_dop_reg_reg_d);
14138 %}
14139 
14140 // Math.min(DD)D
14141 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14142   match(Set dst (MinD src1 src2));
14143 
14144   format %{ "fmind   $dst, $src1, $src2" %}
14145   ins_encode %{
14146     __ fmind(as_FloatRegister($dst$$reg),
14147              as_FloatRegister($src1$$reg),
14148              as_FloatRegister($src2$$reg));
14149   %}
14150 
14151   ins_pipe(fp_dop_reg_reg_d);
14152 %}
14153 
14154 
14155 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
14156   match(Set dst (DivF src1  src2));
14157 
14158   ins_cost(INSN_COST * 18);
14159   format %{ "fdivs   $dst, $src1, $src2" %}
14160 
14161   ins_encode %{
14162     __ fdivs(as_FloatRegister($dst$$reg),
14163              as_FloatRegister($src1$$reg),
14164              as_FloatRegister($src2$$reg));
14165   %}
14166 
14167   ins_pipe(fp_div_s);
14168 %}
14169 
14170 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
14171   match(Set dst (DivD src1  src2));
14172 
14173   ins_cost(INSN_COST * 32);
14174   format %{ "fdivd   $dst, $src1, $src2" %}
14175 
14176   ins_encode %{
14177     __ fdivd(as_FloatRegister($dst$$reg),
14178              as_FloatRegister($src1$$reg),
14179              as_FloatRegister($src2$$reg));
14180   %}
14181 
14182   ins_pipe(fp_div_d);
14183 %}
14184 
14185 instruct negF_reg_reg(vRegF dst, vRegF src) %{
14186   match(Set dst (NegF src));
14187 
14188   ins_cost(INSN_COST * 3);
14189   format %{ "fneg   $dst, $src" %}
14190 
14191   ins_encode %{
14192     __ fnegs(as_FloatRegister($dst$$reg),
14193              as_FloatRegister($src$$reg));
14194   %}
14195 
14196   ins_pipe(fp_uop_s);
14197 %}
14198 
14199 instruct negD_reg_reg(vRegD dst, vRegD src) %{
14200   match(Set dst (NegD src));
14201 
14202   ins_cost(INSN_COST * 3);
14203   format %{ "fnegd   $dst, $src" %}
14204 
14205   ins_encode %{
14206     __ fnegd(as_FloatRegister($dst$$reg),
14207              as_FloatRegister($src$$reg));
14208   %}
14209 
14210   ins_pipe(fp_uop_d);
14211 %}
14212 
14213 instruct absF_reg(vRegF dst, vRegF src) %{
14214   match(Set dst (AbsF src));
14215 
14216   ins_cost(INSN_COST * 3);
14217   format %{ "fabss   $dst, $src" %}
14218   ins_encode %{
14219     __ fabss(as_FloatRegister($dst$$reg),
14220              as_FloatRegister($src$$reg));
14221   %}
14222 
14223   ins_pipe(fp_uop_s);
14224 %}
14225 
14226 instruct absD_reg(vRegD dst, vRegD src) %{
14227   match(Set dst (AbsD src));
14228 
14229   ins_cost(INSN_COST * 3);
14230   format %{ "fabsd   $dst, $src" %}
14231   ins_encode %{
14232     __ fabsd(as_FloatRegister($dst$$reg),
14233              as_FloatRegister($src$$reg));
14234   %}
14235 
14236   ins_pipe(fp_uop_d);
14237 %}
14238 
14239 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14240   match(Set dst (SqrtD src));
14241 
14242   ins_cost(INSN_COST * 50);
14243   format %{ "fsqrtd  $dst, $src" %}
14244   ins_encode %{
14245     __ fsqrtd(as_FloatRegister($dst$$reg),
14246              as_FloatRegister($src$$reg));
14247   %}
14248 
14249   ins_pipe(fp_div_s);
14250 %}
14251 
14252 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14253   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
14254 
14255   ins_cost(INSN_COST * 50);
14256   format %{ "fsqrts  $dst, $src" %}
14257   ins_encode %{
14258     __ fsqrts(as_FloatRegister($dst$$reg),
14259              as_FloatRegister($src$$reg));
14260   %}
14261 
14262   ins_pipe(fp_div_d);
14263 %}
14264 
14265 // Math.rint, floor, ceil
14266 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14267   match(Set dst (RoundDoubleMode src rmode));
14268   format %{ "frint  $dst, $src, $rmode" %}
14269   ins_encode %{
14270     switch ($rmode$$constant) {
14271       case RoundDoubleModeNode::rmode_rint:
14272         __ frintnd(as_FloatRegister($dst$$reg),
14273                    as_FloatRegister($src$$reg));
14274         break;
14275       case RoundDoubleModeNode::rmode_floor:
14276         __ frintmd(as_FloatRegister($dst$$reg),
14277                    as_FloatRegister($src$$reg));
14278         break;
14279       case RoundDoubleModeNode::rmode_ceil:
14280         __ frintpd(as_FloatRegister($dst$$reg),
14281                    as_FloatRegister($src$$reg));
14282         break;
14283     }
14284   %}
14285   ins_pipe(fp_uop_d);
14286 %}
14287 
14288 // ============================================================================
14289 // Logical Instructions
14290 
14291 // Integer Logical Instructions
14292 
14293 // And Instructions
14294 
14295 
14296 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14297   match(Set dst (AndI src1 src2));
14298 
14299   format %{ "andw  $dst, $src1, $src2\t# int" %}
14300 
14301   ins_cost(INSN_COST);
14302   ins_encode %{
14303     __ andw(as_Register($dst$$reg),
14304             as_Register($src1$$reg),
14305             as_Register($src2$$reg));
14306   %}
14307 
14308   ins_pipe(ialu_reg_reg);
14309 %}
14310 
14311 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14312   match(Set dst (AndI src1 src2));
14313 
14314   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14315 
14316   ins_cost(INSN_COST);
14317   ins_encode %{
14318     __ andw(as_Register($dst$$reg),
14319             as_Register($src1$$reg),
14320             (unsigned long)($src2$$constant));
14321   %}
14322 
14323   ins_pipe(ialu_reg_imm);
14324 %}
14325 
14326 // Or Instructions
14327 
14328 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14329   match(Set dst (OrI src1 src2));
14330 
14331   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14332 
14333   ins_cost(INSN_COST);
14334   ins_encode %{
14335     __ orrw(as_Register($dst$$reg),
14336             as_Register($src1$$reg),
14337             as_Register($src2$$reg));
14338   %}
14339 
14340   ins_pipe(ialu_reg_reg);
14341 %}
14342 
14343 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14344   match(Set dst (OrI src1 src2));
14345 
14346   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14347 
14348   ins_cost(INSN_COST);
14349   ins_encode %{
14350     __ orrw(as_Register($dst$$reg),
14351             as_Register($src1$$reg),
14352             (unsigned long)($src2$$constant));
14353   %}
14354 
14355   ins_pipe(ialu_reg_imm);
14356 %}
14357 
14358 // Xor Instructions
14359 
14360 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14361   match(Set dst (XorI src1 src2));
14362 
14363   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14364 
14365   ins_cost(INSN_COST);
14366   ins_encode %{
14367     __ eorw(as_Register($dst$$reg),
14368             as_Register($src1$$reg),
14369             as_Register($src2$$reg));
14370   %}
14371 
14372   ins_pipe(ialu_reg_reg);
14373 %}
14374 
14375 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14376   match(Set dst (XorI src1 src2));
14377 
14378   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14379 
14380   ins_cost(INSN_COST);
14381   ins_encode %{
14382     __ eorw(as_Register($dst$$reg),
14383             as_Register($src1$$reg),
14384             (unsigned long)($src2$$constant));
14385   %}
14386 
14387   ins_pipe(ialu_reg_imm);
14388 %}
14389 
14390 // Long Logical Instructions
14391 // TODO
14392 
14393 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14394   match(Set dst (AndL src1 src2));
14395 
14396   format %{ "and  $dst, $src1, $src2\t# int" %}
14397 
14398   ins_cost(INSN_COST);
14399   ins_encode %{
14400     __ andr(as_Register($dst$$reg),
14401             as_Register($src1$$reg),
14402             as_Register($src2$$reg));
14403   %}
14404 
14405   ins_pipe(ialu_reg_reg);
14406 %}
14407 
14408 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14409   match(Set dst (AndL src1 src2));
14410 
14411   format %{ "and  $dst, $src1, $src2\t# int" %}
14412 
14413   ins_cost(INSN_COST);
14414   ins_encode %{
14415     __ andr(as_Register($dst$$reg),
14416             as_Register($src1$$reg),
14417             (unsigned long)($src2$$constant));
14418   %}
14419 
14420   ins_pipe(ialu_reg_imm);
14421 %}
14422 
14423 // Or Instructions
14424 
14425 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14426   match(Set dst (OrL src1 src2));
14427 
14428   format %{ "orr  $dst, $src1, $src2\t# int" %}
14429 
14430   ins_cost(INSN_COST);
14431   ins_encode %{
14432     __ orr(as_Register($dst$$reg),
14433            as_Register($src1$$reg),
14434            as_Register($src2$$reg));
14435   %}
14436 
14437   ins_pipe(ialu_reg_reg);
14438 %}
14439 
14440 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14441   match(Set dst (OrL src1 src2));
14442 
14443   format %{ "orr  $dst, $src1, $src2\t# int" %}
14444 
14445   ins_cost(INSN_COST);
14446   ins_encode %{
14447     __ orr(as_Register($dst$$reg),
14448            as_Register($src1$$reg),
14449            (unsigned long)($src2$$constant));
14450   %}
14451 
14452   ins_pipe(ialu_reg_imm);
14453 %}
14454 
14455 // Xor Instructions
14456 
14457 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14458   match(Set dst (XorL src1 src2));
14459 
14460   format %{ "eor  $dst, $src1, $src2\t# int" %}
14461 
14462   ins_cost(INSN_COST);
14463   ins_encode %{
14464     __ eor(as_Register($dst$$reg),
14465            as_Register($src1$$reg),
14466            as_Register($src2$$reg));
14467   %}
14468 
14469   ins_pipe(ialu_reg_reg);
14470 %}
14471 
14472 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14473   match(Set dst (XorL src1 src2));
14474 
14475   ins_cost(INSN_COST);
14476   format %{ "eor  $dst, $src1, $src2\t# int" %}
14477 
14478   ins_encode %{
14479     __ eor(as_Register($dst$$reg),
14480            as_Register($src1$$reg),
14481            (unsigned long)($src2$$constant));
14482   %}
14483 
14484   ins_pipe(ialu_reg_imm);
14485 %}
14486 
14487 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14488 %{
14489   match(Set dst (ConvI2L src));
14490 
14491   ins_cost(INSN_COST);
14492   format %{ "sxtw  $dst, $src\t# i2l" %}
14493   ins_encode %{
14494     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14495   %}
14496   ins_pipe(ialu_reg_shift);
14497 %}
14498 
14499 // this pattern occurs in bigmath arithmetic
14500 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14501 %{
14502   match(Set dst (AndL (ConvI2L src) mask));
14503 
14504   ins_cost(INSN_COST);
14505   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14506   ins_encode %{
14507     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14508   %}
14509 
14510   ins_pipe(ialu_reg_shift);
14511 %}
14512 
14513 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14514   match(Set dst (ConvL2I src));
14515 
14516   ins_cost(INSN_COST);
14517   format %{ "movw  $dst, $src \t// l2i" %}
14518 
14519   ins_encode %{
14520     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14521   %}
14522 
14523   ins_pipe(ialu_reg);
14524 %}
14525 
14526 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14527 %{
14528   match(Set dst (Conv2B src));
14529   effect(KILL cr);
14530 
14531   format %{
14532     "cmpw $src, zr\n\t"
14533     "cset $dst, ne"
14534   %}
14535 
14536   ins_encode %{
14537     __ cmpw(as_Register($src$$reg), zr);
14538     __ cset(as_Register($dst$$reg), Assembler::NE);
14539   %}
14540 
14541   ins_pipe(ialu_reg);
14542 %}
14543 
14544 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
14545 %{
14546   match(Set dst (Conv2B src));
14547   effect(KILL cr);
14548 
14549   format %{
14550     "cmp  $src, zr\n\t"
14551     "cset $dst, ne"
14552   %}
14553 
14554   ins_encode %{
14555     __ cmp(as_Register($src$$reg), zr);
14556     __ cset(as_Register($dst$$reg), Assembler::NE);
14557   %}
14558 
14559   ins_pipe(ialu_reg);
14560 %}
14561 
14562 instruct convD2F_reg(vRegF dst, vRegD src) %{
14563   match(Set dst (ConvD2F src));
14564 
14565   ins_cost(INSN_COST * 5);
14566   format %{ "fcvtd  $dst, $src \t// d2f" %}
14567 
14568   ins_encode %{
14569     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14570   %}
14571 
14572   ins_pipe(fp_d2f);
14573 %}
14574 
14575 instruct convF2D_reg(vRegD dst, vRegF src) %{
14576   match(Set dst (ConvF2D src));
14577 
14578   ins_cost(INSN_COST * 5);
14579   format %{ "fcvts  $dst, $src \t// f2d" %}
14580 
14581   ins_encode %{
14582     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14583   %}
14584 
14585   ins_pipe(fp_f2d);
14586 %}
14587 
14588 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14589   match(Set dst (ConvF2I src));
14590 
14591   ins_cost(INSN_COST * 5);
14592   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14593 
14594   ins_encode %{
14595     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14596   %}
14597 
14598   ins_pipe(fp_f2i);
14599 %}
14600 
14601 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14602   match(Set dst (ConvF2L src));
14603 
14604   ins_cost(INSN_COST * 5);
14605   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14606 
14607   ins_encode %{
14608     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14609   %}
14610 
14611   ins_pipe(fp_f2l);
14612 %}
14613 
14614 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14615   match(Set dst (ConvI2F src));
14616 
14617   ins_cost(INSN_COST * 5);
14618   format %{ "scvtfws  $dst, $src \t// i2f" %}
14619 
14620   ins_encode %{
14621     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14622   %}
14623 
14624   ins_pipe(fp_i2f);
14625 %}
14626 
14627 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14628   match(Set dst (ConvL2F src));
14629 
14630   ins_cost(INSN_COST * 5);
14631   format %{ "scvtfs  $dst, $src \t// l2f" %}
14632 
14633   ins_encode %{
14634     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14635   %}
14636 
14637   ins_pipe(fp_l2f);
14638 %}
14639 
14640 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14641   match(Set dst (ConvD2I src));
14642 
14643   ins_cost(INSN_COST * 5);
14644   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14645 
14646   ins_encode %{
14647     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14648   %}
14649 
14650   ins_pipe(fp_d2i);
14651 %}
14652 
14653 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14654   match(Set dst (ConvD2L src));
14655 
14656   ins_cost(INSN_COST * 5);
14657   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14658 
14659   ins_encode %{
14660     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14661   %}
14662 
14663   ins_pipe(fp_d2l);
14664 %}
14665 
14666 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14667   match(Set dst (ConvI2D src));
14668 
14669   ins_cost(INSN_COST * 5);
14670   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14671 
14672   ins_encode %{
14673     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14674   %}
14675 
14676   ins_pipe(fp_i2d);
14677 %}
14678 
14679 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14680   match(Set dst (ConvL2D src));
14681 
14682   ins_cost(INSN_COST * 5);
14683   format %{ "scvtfd  $dst, $src \t// l2d" %}
14684 
14685   ins_encode %{
14686     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14687   %}
14688 
14689   ins_pipe(fp_l2d);
14690 %}
14691 
14692 // stack <-> reg and reg <-> reg shuffles with no conversion
14693 
14694 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14695 
14696   match(Set dst (MoveF2I src));
14697 
14698   effect(DEF dst, USE src);
14699 
14700   ins_cost(4 * INSN_COST);
14701 
14702   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14703 
14704   ins_encode %{
14705     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14706   %}
14707 
14708   ins_pipe(iload_reg_reg);
14709 
14710 %}
14711 
14712 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14713 
14714   match(Set dst (MoveI2F src));
14715 
14716   effect(DEF dst, USE src);
14717 
14718   ins_cost(4 * INSN_COST);
14719 
14720   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14721 
14722   ins_encode %{
14723     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14724   %}
14725 
14726   ins_pipe(pipe_class_memory);
14727 
14728 %}
14729 
14730 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14731 
14732   match(Set dst (MoveD2L src));
14733 
14734   effect(DEF dst, USE src);
14735 
14736   ins_cost(4 * INSN_COST);
14737 
14738   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14739 
14740   ins_encode %{
14741     __ ldr($dst$$Register, Address(sp, $src$$disp));
14742   %}
14743 
14744   ins_pipe(iload_reg_reg);
14745 
14746 %}
14747 
14748 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14749 
14750   match(Set dst (MoveL2D src));
14751 
14752   effect(DEF dst, USE src);
14753 
14754   ins_cost(4 * INSN_COST);
14755 
14756   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14757 
14758   ins_encode %{
14759     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14760   %}
14761 
14762   ins_pipe(pipe_class_memory);
14763 
14764 %}
14765 
14766 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14767 
14768   match(Set dst (MoveF2I src));
14769 
14770   effect(DEF dst, USE src);
14771 
14772   ins_cost(INSN_COST);
14773 
14774   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14775 
14776   ins_encode %{
14777     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14778   %}
14779 
14780   ins_pipe(pipe_class_memory);
14781 
14782 %}
14783 
14784 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14785 
14786   match(Set dst (MoveI2F src));
14787 
14788   effect(DEF dst, USE src);
14789 
14790   ins_cost(INSN_COST);
14791 
14792   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14793 
14794   ins_encode %{
14795     __ strw($src$$Register, Address(sp, $dst$$disp));
14796   %}
14797 
14798   ins_pipe(istore_reg_reg);
14799 
14800 %}
14801 
14802 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14803 
14804   match(Set dst (MoveD2L src));
14805 
14806   effect(DEF dst, USE src);
14807 
14808   ins_cost(INSN_COST);
14809 
14810   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14811 
14812   ins_encode %{
14813     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14814   %}
14815 
14816   ins_pipe(pipe_class_memory);
14817 
14818 %}
14819 
14820 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14821 
14822   match(Set dst (MoveL2D src));
14823 
14824   effect(DEF dst, USE src);
14825 
14826   ins_cost(INSN_COST);
14827 
14828   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14829 
14830   ins_encode %{
14831     __ str($src$$Register, Address(sp, $dst$$disp));
14832   %}
14833 
14834   ins_pipe(istore_reg_reg);
14835 
14836 %}
14837 
14838 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14839 
14840   match(Set dst (MoveF2I src));
14841 
14842   effect(DEF dst, USE src);
14843 
14844   ins_cost(INSN_COST);
14845 
14846   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14847 
14848   ins_encode %{
14849     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14850   %}
14851 
14852   ins_pipe(fp_f2i);
14853 
14854 %}
14855 
14856 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14857 
14858   match(Set dst (MoveI2F src));
14859 
14860   effect(DEF dst, USE src);
14861 
14862   ins_cost(INSN_COST);
14863 
14864   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14865 
14866   ins_encode %{
14867     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14868   %}
14869 
14870   ins_pipe(fp_i2f);
14871 
14872 %}
14873 
14874 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14875 
14876   match(Set dst (MoveD2L src));
14877 
14878   effect(DEF dst, USE src);
14879 
14880   ins_cost(INSN_COST);
14881 
14882   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14883 
14884   ins_encode %{
14885     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14886   %}
14887 
14888   ins_pipe(fp_d2l);
14889 
14890 %}
14891 
14892 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14893 
14894   match(Set dst (MoveL2D src));
14895 
14896   effect(DEF dst, USE src);
14897 
14898   ins_cost(INSN_COST);
14899 
14900   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14901 
14902   ins_encode %{
14903     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14904   %}
14905 
14906   ins_pipe(fp_l2d);
14907 
14908 %}
14909 
14910 // ============================================================================
14911 // clearing of an array
14912 
14913 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14914 %{
14915   match(Set dummy (ClearArray cnt base));
14916   effect(USE_KILL cnt, USE_KILL base);
14917 
14918   ins_cost(4 * INSN_COST);
14919   format %{ "ClearArray $cnt, $base" %}
14920 
14921   ins_encode %{
14922     __ zero_words($base$$Register, $cnt$$Register);
14923   %}
14924 
14925   ins_pipe(pipe_class_memory);
14926 %}
14927 
14928 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14929 %{
14930   predicate((u_int64_t)n->in(2)->get_long()
14931             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14932   match(Set dummy (ClearArray cnt base));
14933   effect(USE_KILL base);
14934 
14935   ins_cost(4 * INSN_COST);
14936   format %{ "ClearArray $cnt, $base" %}
14937 
14938   ins_encode %{
14939     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
14940   %}
14941 
14942   ins_pipe(pipe_class_memory);
14943 %}
14944 
14945 // ============================================================================
14946 // Overflow Math Instructions
14947 
14948 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14949 %{
14950   match(Set cr (OverflowAddI op1 op2));
14951 
14952   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14953   ins_cost(INSN_COST);
14954   ins_encode %{
14955     __ cmnw($op1$$Register, $op2$$Register);
14956   %}
14957 
14958   ins_pipe(icmp_reg_reg);
14959 %}
14960 
14961 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14962 %{
14963   match(Set cr (OverflowAddI op1 op2));
14964 
14965   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14966   ins_cost(INSN_COST);
14967   ins_encode %{
14968     __ cmnw($op1$$Register, $op2$$constant);
14969   %}
14970 
14971   ins_pipe(icmp_reg_imm);
14972 %}
14973 
14974 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14975 %{
14976   match(Set cr (OverflowAddL op1 op2));
14977 
14978   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14979   ins_cost(INSN_COST);
14980   ins_encode %{
14981     __ cmn($op1$$Register, $op2$$Register);
14982   %}
14983 
14984   ins_pipe(icmp_reg_reg);
14985 %}
14986 
14987 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14988 %{
14989   match(Set cr (OverflowAddL op1 op2));
14990 
14991   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14992   ins_cost(INSN_COST);
14993   ins_encode %{
14994     __ cmn($op1$$Register, $op2$$constant);
14995   %}
14996 
14997   ins_pipe(icmp_reg_imm);
14998 %}
14999 
15000 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15001 %{
15002   match(Set cr (OverflowSubI op1 op2));
15003 
15004   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15005   ins_cost(INSN_COST);
15006   ins_encode %{
15007     __ cmpw($op1$$Register, $op2$$Register);
15008   %}
15009 
15010   ins_pipe(icmp_reg_reg);
15011 %}
15012 
15013 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
15014 %{
15015   match(Set cr (OverflowSubI op1 op2));
15016 
15017   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
15018   ins_cost(INSN_COST);
15019   ins_encode %{
15020     __ cmpw($op1$$Register, $op2$$constant);
15021   %}
15022 
15023   ins_pipe(icmp_reg_imm);
15024 %}
15025 
15026 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15027 %{
15028   match(Set cr (OverflowSubL op1 op2));
15029 
15030   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15031   ins_cost(INSN_COST);
15032   ins_encode %{
15033     __ cmp($op1$$Register, $op2$$Register);
15034   %}
15035 
15036   ins_pipe(icmp_reg_reg);
15037 %}
15038 
15039 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
15040 %{
15041   match(Set cr (OverflowSubL op1 op2));
15042 
15043   format %{ "cmp   $op1, $op2\t# overflow check long" %}
15044   ins_cost(INSN_COST);
15045   ins_encode %{
15046     __ subs(zr, $op1$$Register, $op2$$constant);
15047   %}
15048 
15049   ins_pipe(icmp_reg_imm);
15050 %}
15051 
15052 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
15053 %{
15054   match(Set cr (OverflowSubI zero op1));
15055 
15056   format %{ "cmpw  zr, $op1\t# overflow check int" %}
15057   ins_cost(INSN_COST);
15058   ins_encode %{
15059     __ cmpw(zr, $op1$$Register);
15060   %}
15061 
15062   ins_pipe(icmp_reg_imm);
15063 %}
15064 
15065 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
15066 %{
15067   match(Set cr (OverflowSubL zero op1));
15068 
15069   format %{ "cmp   zr, $op1\t# overflow check long" %}
15070   ins_cost(INSN_COST);
15071   ins_encode %{
15072     __ cmp(zr, $op1$$Register);
15073   %}
15074 
15075   ins_pipe(icmp_reg_imm);
15076 %}
15077 
15078 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
15079 %{
15080   match(Set cr (OverflowMulI op1 op2));
15081 
15082   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15083             "cmp   rscratch1, rscratch1, sxtw\n\t"
15084             "movw  rscratch1, #0x80000000\n\t"
15085             "cselw rscratch1, rscratch1, zr, NE\n\t"
15086             "cmpw  rscratch1, #1" %}
15087   ins_cost(5 * INSN_COST);
15088   ins_encode %{
15089     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15090     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15091     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15092     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15093     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15094   %}
15095 
15096   ins_pipe(pipe_slow);
15097 %}
15098 
15099 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15100 %{
15101   match(If cmp (OverflowMulI op1 op2));
15102   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15103             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15104   effect(USE labl, KILL cr);
15105 
15106   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15107             "cmp   rscratch1, rscratch1, sxtw\n\t"
15108             "b$cmp   $labl" %}
15109   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15110   ins_encode %{
15111     Label* L = $labl$$label;
15112     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15113     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15114     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15115     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15116   %}
15117 
15118   ins_pipe(pipe_serial);
15119 %}
15120 
15121 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15122 %{
15123   match(Set cr (OverflowMulL op1 op2));
15124 
15125   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15126             "smulh rscratch2, $op1, $op2\n\t"
15127             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15128             "movw  rscratch1, #0x80000000\n\t"
15129             "cselw rscratch1, rscratch1, zr, NE\n\t"
15130             "cmpw  rscratch1, #1" %}
15131   ins_cost(6 * INSN_COST);
15132   ins_encode %{
15133     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15134     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15135     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15136     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15137     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15138     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15139   %}
15140 
15141   ins_pipe(pipe_slow);
15142 %}
15143 
15144 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15145 %{
15146   match(If cmp (OverflowMulL op1 op2));
15147   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15148             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15149   effect(USE labl, KILL cr);
15150 
15151   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15152             "smulh rscratch2, $op1, $op2\n\t"
15153             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15154             "b$cmp $labl" %}
15155   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15156   ins_encode %{
15157     Label* L = $labl$$label;
15158     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15159     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15160     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15161     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15162     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15163   %}
15164 
15165   ins_pipe(pipe_serial);
15166 %}
15167 
15168 // ============================================================================
15169 // Compare Instructions
15170 
15171 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15172 %{
15173   match(Set cr (CmpI op1 op2));
15174 
15175   effect(DEF cr, USE op1, USE op2);
15176 
15177   ins_cost(INSN_COST);
15178   format %{ "cmpw  $op1, $op2" %}
15179 
15180   ins_encode(aarch64_enc_cmpw(op1, op2));
15181 
15182   ins_pipe(icmp_reg_reg);
15183 %}
15184 
15185 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15186 %{
15187   match(Set cr (CmpI op1 zero));
15188 
15189   effect(DEF cr, USE op1);
15190 
15191   ins_cost(INSN_COST);
15192   format %{ "cmpw $op1, 0" %}
15193 
15194   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15195 
15196   ins_pipe(icmp_reg_imm);
15197 %}
15198 
15199 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15200 %{
15201   match(Set cr (CmpI op1 op2));
15202 
15203   effect(DEF cr, USE op1);
15204 
15205   ins_cost(INSN_COST);
15206   format %{ "cmpw  $op1, $op2" %}
15207 
15208   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15209 
15210   ins_pipe(icmp_reg_imm);
15211 %}
15212 
15213 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15214 %{
15215   match(Set cr (CmpI op1 op2));
15216 
15217   effect(DEF cr, USE op1);
15218 
15219   ins_cost(INSN_COST * 2);
15220   format %{ "cmpw  $op1, $op2" %}
15221 
15222   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15223 
15224   ins_pipe(icmp_reg_imm);
15225 %}
15226 
15227 // Unsigned compare Instructions; really, same as signed compare
15228 // except it should only be used to feed an If or a CMovI which takes a
15229 // cmpOpU.
15230 
15231 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15232 %{
15233   match(Set cr (CmpU op1 op2));
15234 
15235   effect(DEF cr, USE op1, USE op2);
15236 
15237   ins_cost(INSN_COST);
15238   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15239 
15240   ins_encode(aarch64_enc_cmpw(op1, op2));
15241 
15242   ins_pipe(icmp_reg_reg);
15243 %}
15244 
15245 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15246 %{
15247   match(Set cr (CmpU op1 zero));
15248 
15249   effect(DEF cr, USE op1);
15250 
15251   ins_cost(INSN_COST);
15252   format %{ "cmpw $op1, #0\t# unsigned" %}
15253 
15254   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15255 
15256   ins_pipe(icmp_reg_imm);
15257 %}
15258 
15259 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15260 %{
15261   match(Set cr (CmpU op1 op2));
15262 
15263   effect(DEF cr, USE op1);
15264 
15265   ins_cost(INSN_COST);
15266   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15267 
15268   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15269 
15270   ins_pipe(icmp_reg_imm);
15271 %}
15272 
15273 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15274 %{
15275   match(Set cr (CmpU op1 op2));
15276 
15277   effect(DEF cr, USE op1);
15278 
15279   ins_cost(INSN_COST * 2);
15280   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15281 
15282   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15283 
15284   ins_pipe(icmp_reg_imm);
15285 %}
15286 
15287 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15288 %{
15289   match(Set cr (CmpL op1 op2));
15290 
15291   effect(DEF cr, USE op1, USE op2);
15292 
15293   ins_cost(INSN_COST);
15294   format %{ "cmp  $op1, $op2" %}
15295 
15296   ins_encode(aarch64_enc_cmp(op1, op2));
15297 
15298   ins_pipe(icmp_reg_reg);
15299 %}
15300 
15301 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15302 %{
15303   match(Set cr (CmpL op1 zero));
15304 
15305   effect(DEF cr, USE op1);
15306 
15307   ins_cost(INSN_COST);
15308   format %{ "tst  $op1" %}
15309 
15310   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15311 
15312   ins_pipe(icmp_reg_imm);
15313 %}
15314 
15315 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15316 %{
15317   match(Set cr (CmpL op1 op2));
15318 
15319   effect(DEF cr, USE op1);
15320 
15321   ins_cost(INSN_COST);
15322   format %{ "cmp  $op1, $op2" %}
15323 
15324   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15325 
15326   ins_pipe(icmp_reg_imm);
15327 %}
15328 
15329 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15330 %{
15331   match(Set cr (CmpL op1 op2));
15332 
15333   effect(DEF cr, USE op1);
15334 
15335   ins_cost(INSN_COST * 2);
15336   format %{ "cmp  $op1, $op2" %}
15337 
15338   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15339 
15340   ins_pipe(icmp_reg_imm);
15341 %}
15342 
15343 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15344 %{
15345   match(Set cr (CmpUL op1 op2));
15346 
15347   effect(DEF cr, USE op1, USE op2);
15348 
15349   ins_cost(INSN_COST);
15350   format %{ "cmp  $op1, $op2" %}
15351 
15352   ins_encode(aarch64_enc_cmp(op1, op2));
15353 
15354   ins_pipe(icmp_reg_reg);
15355 %}
15356 
15357 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15358 %{
15359   match(Set cr (CmpUL op1 zero));
15360 
15361   effect(DEF cr, USE op1);
15362 
15363   ins_cost(INSN_COST);
15364   format %{ "tst  $op1" %}
15365 
15366   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15367 
15368   ins_pipe(icmp_reg_imm);
15369 %}
15370 
15371 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15372 %{
15373   match(Set cr (CmpUL op1 op2));
15374 
15375   effect(DEF cr, USE op1);
15376 
15377   ins_cost(INSN_COST);
15378   format %{ "cmp  $op1, $op2" %}
15379 
15380   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15381 
15382   ins_pipe(icmp_reg_imm);
15383 %}
15384 
15385 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15386 %{
15387   match(Set cr (CmpUL op1 op2));
15388 
15389   effect(DEF cr, USE op1);
15390 
15391   ins_cost(INSN_COST * 2);
15392   format %{ "cmp  $op1, $op2" %}
15393 
15394   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15395 
15396   ins_pipe(icmp_reg_imm);
15397 %}
15398 
15399 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15400 %{
15401   match(Set cr (CmpP op1 op2));
15402 
15403   effect(DEF cr, USE op1, USE op2);
15404 
15405   ins_cost(INSN_COST);
15406   format %{ "cmp  $op1, $op2\t // ptr" %}
15407 
15408   ins_encode(aarch64_enc_cmpp(op1, op2));
15409 
15410   ins_pipe(icmp_reg_reg);
15411 %}
15412 
15413 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15414 %{
15415   match(Set cr (CmpN op1 op2));
15416 
15417   effect(DEF cr, USE op1, USE op2);
15418 
15419   ins_cost(INSN_COST);
15420   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15421 
15422   ins_encode(aarch64_enc_cmpn(op1, op2));
15423 
15424   ins_pipe(icmp_reg_reg);
15425 %}
15426 
15427 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15428 %{
15429   match(Set cr (CmpP op1 zero));
15430 
15431   effect(DEF cr, USE op1, USE zero);
15432 
15433   ins_cost(INSN_COST);
15434   format %{ "cmp  $op1, 0\t // ptr" %}
15435 
15436   ins_encode(aarch64_enc_testp(op1));
15437 
15438   ins_pipe(icmp_reg_imm);
15439 %}
15440 
15441 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15442 %{
15443   match(Set cr (CmpN op1 zero));
15444 
15445   effect(DEF cr, USE op1, USE zero);
15446 
15447   ins_cost(INSN_COST);
15448   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15449 
15450   ins_encode(aarch64_enc_testn(op1));
15451 
15452   ins_pipe(icmp_reg_imm);
15453 %}
15454 
15455 // FP comparisons
15456 //
15457 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15458 // using normal cmpOp. See declaration of rFlagsReg for details.
15459 
15460 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15461 %{
15462   match(Set cr (CmpF src1 src2));
15463 
15464   ins_cost(3 * INSN_COST);
15465   format %{ "fcmps $src1, $src2" %}
15466 
15467   ins_encode %{
15468     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15469   %}
15470 
15471   ins_pipe(pipe_class_compare);
15472 %}
15473 
15474 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15475 %{
15476   match(Set cr (CmpF src1 src2));
15477 
15478   ins_cost(3 * INSN_COST);
15479   format %{ "fcmps $src1, 0.0" %}
15480 
15481   ins_encode %{
15482     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15483   %}
15484 
15485   ins_pipe(pipe_class_compare);
15486 %}
15487 // FROM HERE
15488 
15489 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15490 %{
15491   match(Set cr (CmpD src1 src2));
15492 
15493   ins_cost(3 * INSN_COST);
15494   format %{ "fcmpd $src1, $src2" %}
15495 
15496   ins_encode %{
15497     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15498   %}
15499 
15500   ins_pipe(pipe_class_compare);
15501 %}
15502 
15503 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15504 %{
15505   match(Set cr (CmpD src1 src2));
15506 
15507   ins_cost(3 * INSN_COST);
15508   format %{ "fcmpd $src1, 0.0" %}
15509 
15510   ins_encode %{
15511     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15512   %}
15513 
15514   ins_pipe(pipe_class_compare);
15515 %}
15516 
15517 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15518 %{
15519   match(Set dst (CmpF3 src1 src2));
15520   effect(KILL cr);
15521 
15522   ins_cost(5 * INSN_COST);
15523   format %{ "fcmps $src1, $src2\n\t"
15524             "csinvw($dst, zr, zr, eq\n\t"
15525             "csnegw($dst, $dst, $dst, lt)"
15526   %}
15527 
15528   ins_encode %{
15529     Label done;
15530     FloatRegister s1 = as_FloatRegister($src1$$reg);
15531     FloatRegister s2 = as_FloatRegister($src2$$reg);
15532     Register d = as_Register($dst$$reg);
15533     __ fcmps(s1, s2);
15534     // installs 0 if EQ else -1
15535     __ csinvw(d, zr, zr, Assembler::EQ);
15536     // keeps -1 if less or unordered else installs 1
15537     __ csnegw(d, d, d, Assembler::LT);
15538     __ bind(done);
15539   %}
15540 
15541   ins_pipe(pipe_class_default);
15542 
15543 %}
15544 
15545 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15546 %{
15547   match(Set dst (CmpD3 src1 src2));
15548   effect(KILL cr);
15549 
15550   ins_cost(5 * INSN_COST);
15551   format %{ "fcmpd $src1, $src2\n\t"
15552             "csinvw($dst, zr, zr, eq\n\t"
15553             "csnegw($dst, $dst, $dst, lt)"
15554   %}
15555 
15556   ins_encode %{
15557     Label done;
15558     FloatRegister s1 = as_FloatRegister($src1$$reg);
15559     FloatRegister s2 = as_FloatRegister($src2$$reg);
15560     Register d = as_Register($dst$$reg);
15561     __ fcmpd(s1, s2);
15562     // installs 0 if EQ else -1
15563     __ csinvw(d, zr, zr, Assembler::EQ);
15564     // keeps -1 if less or unordered else installs 1
15565     __ csnegw(d, d, d, Assembler::LT);
15566     __ bind(done);
15567   %}
15568   ins_pipe(pipe_class_default);
15569 
15570 %}
15571 
15572 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15573 %{
15574   match(Set dst (CmpF3 src1 zero));
15575   effect(KILL cr);
15576 
15577   ins_cost(5 * INSN_COST);
15578   format %{ "fcmps $src1, 0.0\n\t"
15579             "csinvw($dst, zr, zr, eq\n\t"
15580             "csnegw($dst, $dst, $dst, lt)"
15581   %}
15582 
15583   ins_encode %{
15584     Label done;
15585     FloatRegister s1 = as_FloatRegister($src1$$reg);
15586     Register d = as_Register($dst$$reg);
15587     __ fcmps(s1, 0.0);
15588     // installs 0 if EQ else -1
15589     __ csinvw(d, zr, zr, Assembler::EQ);
15590     // keeps -1 if less or unordered else installs 1
15591     __ csnegw(d, d, d, Assembler::LT);
15592     __ bind(done);
15593   %}
15594 
15595   ins_pipe(pipe_class_default);
15596 
15597 %}
15598 
15599 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15600 %{
15601   match(Set dst (CmpD3 src1 zero));
15602   effect(KILL cr);
15603 
15604   ins_cost(5 * INSN_COST);
15605   format %{ "fcmpd $src1, 0.0\n\t"
15606             "csinvw($dst, zr, zr, eq\n\t"
15607             "csnegw($dst, $dst, $dst, lt)"
15608   %}
15609 
15610   ins_encode %{
15611     Label done;
15612     FloatRegister s1 = as_FloatRegister($src1$$reg);
15613     Register d = as_Register($dst$$reg);
15614     __ fcmpd(s1, 0.0);
15615     // installs 0 if EQ else -1
15616     __ csinvw(d, zr, zr, Assembler::EQ);
15617     // keeps -1 if less or unordered else installs 1
15618     __ csnegw(d, d, d, Assembler::LT);
15619     __ bind(done);
15620   %}
15621   ins_pipe(pipe_class_default);
15622 
15623 %}
15624 
15625 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15626 %{
15627   match(Set dst (CmpLTMask p q));
15628   effect(KILL cr);
15629 
15630   ins_cost(3 * INSN_COST);
15631 
15632   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15633             "csetw $dst, lt\n\t"
15634             "subw $dst, zr, $dst"
15635   %}
15636 
15637   ins_encode %{
15638     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15639     __ csetw(as_Register($dst$$reg), Assembler::LT);
15640     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15641   %}
15642 
15643   ins_pipe(ialu_reg_reg);
15644 %}
15645 
15646 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15647 %{
15648   match(Set dst (CmpLTMask src zero));
15649   effect(KILL cr);
15650 
15651   ins_cost(INSN_COST);
15652 
15653   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15654 
15655   ins_encode %{
15656     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15657   %}
15658 
15659   ins_pipe(ialu_reg_shift);
15660 %}
15661 
15662 // ============================================================================
15663 // Max and Min
15664 
15665 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15666 %{
15667   effect( DEF dst, USE src1, USE src2, USE cr );
15668 
15669   ins_cost(INSN_COST * 2);
15670   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
15671 
15672   ins_encode %{
15673     __ cselw(as_Register($dst$$reg),
15674              as_Register($src1$$reg),
15675              as_Register($src2$$reg),
15676              Assembler::LT);
15677   %}
15678 
15679   ins_pipe(icond_reg_reg);
15680 %}
15681 
15682 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
15683 %{
15684   match(Set dst (MinI src1 src2));
15685   ins_cost(INSN_COST * 3);
15686 
15687   expand %{
15688     rFlagsReg cr;
15689     compI_reg_reg(cr, src1, src2);
15690     cmovI_reg_reg_lt(dst, src1, src2, cr);
15691   %}
15692 
15693 %}
15694 // FROM HERE
15695 
15696 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15697 %{
15698   effect( DEF dst, USE src1, USE src2, USE cr );
15699 
15700   ins_cost(INSN_COST * 2);
15701   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
15702 
15703   ins_encode %{
15704     __ cselw(as_Register($dst$$reg),
15705              as_Register($src1$$reg),
15706              as_Register($src2$$reg),
15707              Assembler::GT);
15708   %}
15709 
15710   ins_pipe(icond_reg_reg);
15711 %}
15712 
15713 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
15714 %{
15715   match(Set dst (MaxI src1 src2));
15716   ins_cost(INSN_COST * 3);
15717   expand %{
15718     rFlagsReg cr;
15719     compI_reg_reg(cr, src1, src2);
15720     cmovI_reg_reg_gt(dst, src1, src2, cr);
15721   %}
15722 %}
15723 
15724 // ============================================================================
15725 // Branch Instructions
15726 
15727 // Direct Branch.
15728 instruct branch(label lbl)
15729 %{
15730   match(Goto);
15731 
15732   effect(USE lbl);
15733 
15734   ins_cost(BRANCH_COST);
15735   format %{ "b  $lbl" %}
15736 
15737   ins_encode(aarch64_enc_b(lbl));
15738 
15739   ins_pipe(pipe_branch);
15740 %}
15741 
15742 // Conditional Near Branch
15743 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15744 %{
15745   // Same match rule as `branchConFar'.
15746   match(If cmp cr);
15747 
15748   effect(USE lbl);
15749 
15750   ins_cost(BRANCH_COST);
15751   // If set to 1 this indicates that the current instruction is a
15752   // short variant of a long branch. This avoids using this
15753   // instruction in first-pass matching. It will then only be used in
15754   // the `Shorten_branches' pass.
15755   // ins_short_branch(1);
15756   format %{ "b$cmp  $lbl" %}
15757 
15758   ins_encode(aarch64_enc_br_con(cmp, lbl));
15759 
15760   ins_pipe(pipe_branch_cond);
15761 %}
15762 
15763 // Conditional Near Branch Unsigned
15764 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15765 %{
15766   // Same match rule as `branchConFar'.
15767   match(If cmp cr);
15768 
15769   effect(USE lbl);
15770 
15771   ins_cost(BRANCH_COST);
15772   // If set to 1 this indicates that the current instruction is a
15773   // short variant of a long branch. This avoids using this
15774   // instruction in first-pass matching. It will then only be used in
15775   // the `Shorten_branches' pass.
15776   // ins_short_branch(1);
15777   format %{ "b$cmp  $lbl\t# unsigned" %}
15778 
15779   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15780 
15781   ins_pipe(pipe_branch_cond);
15782 %}
15783 
15784 // Make use of CBZ and CBNZ.  These instructions, as well as being
15785 // shorter than (cmp; branch), have the additional benefit of not
15786 // killing the flags.
15787 
15788 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15789   match(If cmp (CmpI op1 op2));
15790   effect(USE labl);
15791 
15792   ins_cost(BRANCH_COST);
15793   format %{ "cbw$cmp   $op1, $labl" %}
15794   ins_encode %{
15795     Label* L = $labl$$label;
15796     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15797     if (cond == Assembler::EQ)
15798       __ cbzw($op1$$Register, *L);
15799     else
15800       __ cbnzw($op1$$Register, *L);
15801   %}
15802   ins_pipe(pipe_cmp_branch);
15803 %}
15804 
15805 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15806   match(If cmp (CmpL op1 op2));
15807   effect(USE labl);
15808 
15809   ins_cost(BRANCH_COST);
15810   format %{ "cb$cmp   $op1, $labl" %}
15811   ins_encode %{
15812     Label* L = $labl$$label;
15813     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15814     if (cond == Assembler::EQ)
15815       __ cbz($op1$$Register, *L);
15816     else
15817       __ cbnz($op1$$Register, *L);
15818   %}
15819   ins_pipe(pipe_cmp_branch);
15820 %}
15821 
15822 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15823   match(If cmp (CmpP op1 op2));
15824   effect(USE labl);
15825 
15826   ins_cost(BRANCH_COST);
15827   format %{ "cb$cmp   $op1, $labl" %}
15828   ins_encode %{
15829     Label* L = $labl$$label;
15830     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15831     if (cond == Assembler::EQ)
15832       __ cbz($op1$$Register, *L);
15833     else
15834       __ cbnz($op1$$Register, *L);
15835   %}
15836   ins_pipe(pipe_cmp_branch);
15837 %}
15838 
15839 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15840   match(If cmp (CmpN op1 op2));
15841   effect(USE labl);
15842 
15843   ins_cost(BRANCH_COST);
15844   format %{ "cbw$cmp   $op1, $labl" %}
15845   ins_encode %{
15846     Label* L = $labl$$label;
15847     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15848     if (cond == Assembler::EQ)
15849       __ cbzw($op1$$Register, *L);
15850     else
15851       __ cbnzw($op1$$Register, *L);
15852   %}
15853   ins_pipe(pipe_cmp_branch);
15854 %}
15855 
15856 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15857   match(If cmp (CmpP (DecodeN oop) zero));
15858   effect(USE labl);
15859 
15860   ins_cost(BRANCH_COST);
15861   format %{ "cb$cmp   $oop, $labl" %}
15862   ins_encode %{
15863     Label* L = $labl$$label;
15864     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15865     if (cond == Assembler::EQ)
15866       __ cbzw($oop$$Register, *L);
15867     else
15868       __ cbnzw($oop$$Register, *L);
15869   %}
15870   ins_pipe(pipe_cmp_branch);
15871 %}
15872 
15873 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
15874   match(If cmp (CmpU op1 op2));
15875   effect(USE labl);
15876 
15877   ins_cost(BRANCH_COST);
15878   format %{ "cbw$cmp   $op1, $labl" %}
15879   ins_encode %{
15880     Label* L = $labl$$label;
15881     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15882     if (cond == Assembler::EQ || cond == Assembler::LS)
15883       __ cbzw($op1$$Register, *L);
15884     else
15885       __ cbnzw($op1$$Register, *L);
15886   %}
15887   ins_pipe(pipe_cmp_branch);
15888 %}
15889 
15890 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
15891   match(If cmp (CmpUL op1 op2));
15892   effect(USE labl);
15893 
15894   ins_cost(BRANCH_COST);
15895   format %{ "cb$cmp   $op1, $labl" %}
15896   ins_encode %{
15897     Label* L = $labl$$label;
15898     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15899     if (cond == Assembler::EQ || cond == Assembler::LS)
15900       __ cbz($op1$$Register, *L);
15901     else
15902       __ cbnz($op1$$Register, *L);
15903   %}
15904   ins_pipe(pipe_cmp_branch);
15905 %}
15906 
15907 // Test bit and Branch
15908 
15909 // Patterns for short (< 32KiB) variants
15910 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15911   match(If cmp (CmpL op1 op2));
15912   effect(USE labl);
15913 
15914   ins_cost(BRANCH_COST);
15915   format %{ "cb$cmp   $op1, $labl # long" %}
15916   ins_encode %{
15917     Label* L = $labl$$label;
15918     Assembler::Condition cond =
15919       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15920     __ tbr(cond, $op1$$Register, 63, *L);
15921   %}
15922   ins_pipe(pipe_cmp_branch);
15923   ins_short_branch(1);
15924 %}
15925 
15926 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15927   match(If cmp (CmpI op1 op2));
15928   effect(USE labl);
15929 
15930   ins_cost(BRANCH_COST);
15931   format %{ "cb$cmp   $op1, $labl # int" %}
15932   ins_encode %{
15933     Label* L = $labl$$label;
15934     Assembler::Condition cond =
15935       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15936     __ tbr(cond, $op1$$Register, 31, *L);
15937   %}
15938   ins_pipe(pipe_cmp_branch);
15939   ins_short_branch(1);
15940 %}
15941 
15942 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15943   match(If cmp (CmpL (AndL op1 op2) op3));
15944   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15945   effect(USE labl);
15946 
15947   ins_cost(BRANCH_COST);
15948   format %{ "tb$cmp   $op1, $op2, $labl" %}
15949   ins_encode %{
15950     Label* L = $labl$$label;
15951     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15952     int bit = exact_log2_long($op2$$constant);
15953     __ tbr(cond, $op1$$Register, bit, *L);
15954   %}
15955   ins_pipe(pipe_cmp_branch);
15956   ins_short_branch(1);
15957 %}
15958 
15959 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15960   match(If cmp (CmpI (AndI op1 op2) op3));
15961   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15962   effect(USE labl);
15963 
15964   ins_cost(BRANCH_COST);
15965   format %{ "tb$cmp   $op1, $op2, $labl" %}
15966   ins_encode %{
15967     Label* L = $labl$$label;
15968     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15969     int bit = exact_log2((juint)$op2$$constant);
15970     __ tbr(cond, $op1$$Register, bit, *L);
15971   %}
15972   ins_pipe(pipe_cmp_branch);
15973   ins_short_branch(1);
15974 %}
15975 
15976 // And far variants
15977 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15978   match(If cmp (CmpL op1 op2));
15979   effect(USE labl);
15980 
15981   ins_cost(BRANCH_COST);
15982   format %{ "cb$cmp   $op1, $labl # long" %}
15983   ins_encode %{
15984     Label* L = $labl$$label;
15985     Assembler::Condition cond =
15986       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15987     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15988   %}
15989   ins_pipe(pipe_cmp_branch);
15990 %}
15991 
15992 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15993   match(If cmp (CmpI op1 op2));
15994   effect(USE labl);
15995 
15996   ins_cost(BRANCH_COST);
15997   format %{ "cb$cmp   $op1, $labl # int" %}
15998   ins_encode %{
15999     Label* L = $labl$$label;
16000     Assembler::Condition cond =
16001       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
16002     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
16003   %}
16004   ins_pipe(pipe_cmp_branch);
16005 %}
16006 
16007 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
16008   match(If cmp (CmpL (AndL op1 op2) op3));
16009   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
16010   effect(USE labl);
16011 
16012   ins_cost(BRANCH_COST);
16013   format %{ "tb$cmp   $op1, $op2, $labl" %}
16014   ins_encode %{
16015     Label* L = $labl$$label;
16016     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16017     int bit = exact_log2_long($op2$$constant);
16018     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16019   %}
16020   ins_pipe(pipe_cmp_branch);
16021 %}
16022 
16023 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
16024   match(If cmp (CmpI (AndI op1 op2) op3));
16025   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
16026   effect(USE labl);
16027 
16028   ins_cost(BRANCH_COST);
16029   format %{ "tb$cmp   $op1, $op2, $labl" %}
16030   ins_encode %{
16031     Label* L = $labl$$label;
16032     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
16033     int bit = exact_log2((juint)$op2$$constant);
16034     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
16035   %}
16036   ins_pipe(pipe_cmp_branch);
16037 %}
16038 
16039 // Test bits
16040 
16041 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
16042   match(Set cr (CmpL (AndL op1 op2) op3));
16043   predicate(Assembler::operand_valid_for_logical_immediate
16044             (/*is_32*/false, n->in(1)->in(2)->get_long()));
16045 
16046   ins_cost(INSN_COST);
16047   format %{ "tst $op1, $op2 # long" %}
16048   ins_encode %{
16049     __ tst($op1$$Register, $op2$$constant);
16050   %}
16051   ins_pipe(ialu_reg_reg);
16052 %}
16053 
16054 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
16055   match(Set cr (CmpI (AndI op1 op2) op3));
16056   predicate(Assembler::operand_valid_for_logical_immediate
16057             (/*is_32*/true, n->in(1)->in(2)->get_int()));
16058 
16059   ins_cost(INSN_COST);
16060   format %{ "tst $op1, $op2 # int" %}
16061   ins_encode %{
16062     __ tstw($op1$$Register, $op2$$constant);
16063   %}
16064   ins_pipe(ialu_reg_reg);
16065 %}
16066 
16067 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
16068   match(Set cr (CmpL (AndL op1 op2) op3));
16069 
16070   ins_cost(INSN_COST);
16071   format %{ "tst $op1, $op2 # long" %}
16072   ins_encode %{
16073     __ tst($op1$$Register, $op2$$Register);
16074   %}
16075   ins_pipe(ialu_reg_reg);
16076 %}
16077 
16078 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
16079   match(Set cr (CmpI (AndI op1 op2) op3));
16080 
16081   ins_cost(INSN_COST);
16082   format %{ "tstw $op1, $op2 # int" %}
16083   ins_encode %{
16084     __ tstw($op1$$Register, $op2$$Register);
16085   %}
16086   ins_pipe(ialu_reg_reg);
16087 %}
16088 
16089 
16090 // Conditional Far Branch
16091 // Conditional Far Branch Unsigned
16092 // TODO: fixme
16093 
16094 // counted loop end branch near
16095 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
16096 %{
16097   match(CountedLoopEnd cmp cr);
16098 
16099   effect(USE lbl);
16100 
16101   ins_cost(BRANCH_COST);
16102   // short variant.
16103   // ins_short_branch(1);
16104   format %{ "b$cmp $lbl \t// counted loop end" %}
16105 
16106   ins_encode(aarch64_enc_br_con(cmp, lbl));
16107 
16108   ins_pipe(pipe_branch);
16109 %}
16110 
16111 // counted loop end branch near Unsigned
16112 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
16113 %{
16114   match(CountedLoopEnd cmp cr);
16115 
16116   effect(USE lbl);
16117 
16118   ins_cost(BRANCH_COST);
16119   // short variant.
16120   // ins_short_branch(1);
16121   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
16122 
16123   ins_encode(aarch64_enc_br_conU(cmp, lbl));
16124 
16125   ins_pipe(pipe_branch);
16126 %}
16127 
16128 // counted loop end branch far
16129 // counted loop end branch far unsigned
16130 // TODO: fixme
16131 
16132 // ============================================================================
16133 // inlined locking and unlocking
16134 
16135 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16136 %{
16137   match(Set cr (FastLock object box));
16138   effect(TEMP tmp, TEMP tmp2);
16139 
16140   // TODO
16141   // identify correct cost
16142   ins_cost(5 * INSN_COST);
16143   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
16144 
16145   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
16146 
16147   ins_pipe(pipe_serial);
16148 %}
16149 
16150 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16151 %{
16152   match(Set cr (FastUnlock object box));
16153   effect(TEMP tmp, TEMP tmp2);
16154 
16155   ins_cost(5 * INSN_COST);
16156   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16157 
16158   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
16159 
16160   ins_pipe(pipe_serial);
16161 %}
16162 
16163 
16164 // ============================================================================
16165 // Safepoint Instructions
16166 
16167 // TODO
16168 // provide a near and far version of this code
16169 
16170 instruct safePoint(rFlagsReg cr, iRegP poll)
16171 %{
16172   match(SafePoint poll);
16173   effect(KILL cr);
16174 
16175   format %{
16176     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16177   %}
16178   ins_encode %{
16179     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16180   %}
16181   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16182 %}
16183 
16184 
16185 // ============================================================================
16186 // Procedure Call/Return Instructions
16187 
16188 // Call Java Static Instruction
16189 
16190 instruct CallStaticJavaDirect(method meth)
16191 %{
16192   match(CallStaticJava);
16193 
16194   effect(USE meth);
16195 
16196   ins_cost(CALL_COST);
16197 
16198   format %{ "call,static $meth \t// ==> " %}
16199 
16200   ins_encode( aarch64_enc_java_static_call(meth),
16201               aarch64_enc_call_epilog );
16202 
16203   ins_pipe(pipe_class_call);
16204 %}
16205 
16206 // TO HERE
16207 
16208 // Call Java Dynamic Instruction
16209 instruct CallDynamicJavaDirect(method meth)
16210 %{
16211   match(CallDynamicJava);
16212 
16213   effect(USE meth);
16214 
16215   ins_cost(CALL_COST);
16216 
16217   format %{ "CALL,dynamic $meth \t// ==> " %}
16218 
16219   ins_encode( aarch64_enc_java_dynamic_call(meth),
16220                aarch64_enc_call_epilog );
16221 
16222   ins_pipe(pipe_class_call);
16223 %}
16224 
16225 // Call Runtime Instruction
16226 
16227 instruct CallRuntimeDirect(method meth)
16228 %{
16229   match(CallRuntime);
16230 
16231   effect(USE meth);
16232 
16233   ins_cost(CALL_COST);
16234 
16235   format %{ "CALL, runtime $meth" %}
16236 
16237   ins_encode( aarch64_enc_java_to_runtime(meth) );
16238 
16239   ins_pipe(pipe_class_call);
16240 %}
16241 
16242 // Call Runtime Instruction
16243 
16244 instruct CallLeafDirect(method meth)
16245 %{
16246   match(CallLeaf);
16247 
16248   effect(USE meth);
16249 
16250   ins_cost(CALL_COST);
16251 
16252   format %{ "CALL, runtime leaf $meth" %}
16253 
16254   ins_encode( aarch64_enc_java_to_runtime(meth) );
16255 
16256   ins_pipe(pipe_class_call);
16257 %}
16258 
16259 // Call Runtime Instruction
16260 
16261 instruct CallLeafNoFPDirect(method meth)
16262 %{
16263   match(CallLeafNoFP);
16264 
16265   effect(USE meth);
16266 
16267   ins_cost(CALL_COST);
16268 
16269   format %{ "CALL, runtime leaf nofp $meth" %}
16270 
16271   ins_encode( aarch64_enc_java_to_runtime(meth) );
16272 
16273   ins_pipe(pipe_class_call);
16274 %}
16275 
16276 // Tail Call; Jump from runtime stub to Java code.
16277 // Also known as an 'interprocedural jump'.
16278 // Target of jump will eventually return to caller.
16279 // TailJump below removes the return address.
16280 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
16281 %{
16282   match(TailCall jump_target method_oop);
16283 
16284   ins_cost(CALL_COST);
16285 
16286   format %{ "br $jump_target\t# $method_oop holds method oop" %}
16287 
16288   ins_encode(aarch64_enc_tail_call(jump_target));
16289 
16290   ins_pipe(pipe_class_call);
16291 %}
16292 
16293 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
16294 %{
16295   match(TailJump jump_target ex_oop);
16296 
16297   ins_cost(CALL_COST);
16298 
16299   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16300 
16301   ins_encode(aarch64_enc_tail_jmp(jump_target));
16302 
16303   ins_pipe(pipe_class_call);
16304 %}
16305 
16306 // Create exception oop: created by stack-crawling runtime code.
16307 // Created exception is now available to this handler, and is setup
16308 // just prior to jumping to this handler. No code emitted.
16309 // TODO check
16310 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16311 instruct CreateException(iRegP_R0 ex_oop)
16312 %{
16313   match(Set ex_oop (CreateEx));
16314 
16315   format %{ " -- \t// exception oop; no code emitted" %}
16316 
16317   size(0);
16318 
16319   ins_encode( /*empty*/ );
16320 
16321   ins_pipe(pipe_class_empty);
16322 %}
16323 
16324 // Rethrow exception: The exception oop will come in the first
16325 // argument position. Then JUMP (not call) to the rethrow stub code.
16326 instruct RethrowException() %{
16327   match(Rethrow);
16328   ins_cost(CALL_COST);
16329 
16330   format %{ "b rethrow_stub" %}
16331 
16332   ins_encode( aarch64_enc_rethrow() );
16333 
16334   ins_pipe(pipe_class_call);
16335 %}
16336 
16337 
16338 // Return Instruction
16339 // epilog node loads ret address into lr as part of frame pop
16340 instruct Ret()
16341 %{
16342   match(Return);
16343 
16344   format %{ "ret\t// return register" %}
16345 
16346   ins_encode( aarch64_enc_ret() );
16347 
16348   ins_pipe(pipe_branch);
16349 %}
16350 
16351 // Die now.
16352 instruct ShouldNotReachHere() %{
16353   match(Halt);
16354 
16355   ins_cost(CALL_COST);
16356   format %{ "ShouldNotReachHere" %}
16357 
16358   ins_encode %{
16359     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
16360     // return true
16361     __ dpcs1(0xdead + 1);
16362   %}
16363 
16364   ins_pipe(pipe_class_default);
16365 %}
16366 
16367 // ============================================================================
16368 // Partial Subtype Check
16369 //
16370 // superklass array for an instance of the superklass.  Set a hidden
16371 // internal cache on a hit (cache is checked with exposed code in
16372 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16373 // encoding ALSO sets flags.
16374 
16375 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16376 %{
16377   match(Set result (PartialSubtypeCheck sub super));
16378   effect(KILL cr, KILL temp);
16379 
16380   ins_cost(1100);  // slightly larger than the next version
16381   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16382 
16383   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16384 
16385   opcode(0x1); // Force zero of result reg on hit
16386 
16387   ins_pipe(pipe_class_memory);
16388 %}
16389 
16390 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
16391 %{
16392   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
16393   effect(KILL temp, KILL result);
16394 
16395   ins_cost(1100);  // slightly larger than the next version
16396   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
16397 
16398   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16399 
16400   opcode(0x0); // Don't zero result reg on hit
16401 
16402   ins_pipe(pipe_class_memory);
16403 %}
16404 
16405 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16406                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16407 %{
16408   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
16409   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16410   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16411 
16412   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16413   ins_encode %{
16414     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16415     __ string_compare($str1$$Register, $str2$$Register,
16416                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16417                       $tmp1$$Register, $tmp2$$Register,
16418                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
16419   %}
16420   ins_pipe(pipe_class_memory);
16421 %}
16422 
16423 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16424                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16425 %{
16426   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
16427   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16428   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16429 
16430   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16431   ins_encode %{
16432     __ string_compare($str1$$Register, $str2$$Register,
16433                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16434                       $tmp1$$Register, $tmp2$$Register,
16435                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
16436   %}
16437   ins_pipe(pipe_class_memory);
16438 %}
16439 
16440 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16441                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16442                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16443 %{
16444   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
16445   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16446   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16447          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16448 
16449   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16450   ins_encode %{
16451     __ string_compare($str1$$Register, $str2$$Register,
16452                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16453                       $tmp1$$Register, $tmp2$$Register,
16454                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16455                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
16456   %}
16457   ins_pipe(pipe_class_memory);
16458 %}
16459 
16460 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16461                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16462                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16463 %{
16464   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
16465   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16466   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16467          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16468 
16469   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16470   ins_encode %{
16471     __ string_compare($str1$$Register, $str2$$Register,
16472                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16473                       $tmp1$$Register, $tmp2$$Register,
16474                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16475                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
16476   %}
16477   ins_pipe(pipe_class_memory);
16478 %}
16479 
16480 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16481        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16482        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16483 %{
16484   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16485   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16486   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16487          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16488   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
16489 
16490   ins_encode %{
16491     __ string_indexof($str1$$Register, $str2$$Register,
16492                       $cnt1$$Register, $cnt2$$Register,
16493                       $tmp1$$Register, $tmp2$$Register,
16494                       $tmp3$$Register, $tmp4$$Register,
16495                       $tmp5$$Register, $tmp6$$Register,
16496                       -1, $result$$Register, StrIntrinsicNode::UU);
16497   %}
16498   ins_pipe(pipe_class_memory);
16499 %}
16500 
16501 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16502        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16503        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16504 %{
16505   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16506   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16507   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16508          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16509   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
16510 
16511   ins_encode %{
16512     __ string_indexof($str1$$Register, $str2$$Register,
16513                       $cnt1$$Register, $cnt2$$Register,
16514                       $tmp1$$Register, $tmp2$$Register,
16515                       $tmp3$$Register, $tmp4$$Register,
16516                       $tmp5$$Register, $tmp6$$Register,
16517                       -1, $result$$Register, StrIntrinsicNode::LL);
16518   %}
16519   ins_pipe(pipe_class_memory);
16520 %}
16521 
16522 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16523        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16524        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16525 %{
16526   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16527   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16528   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16529          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16530   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
16531 
16532   ins_encode %{
16533     __ string_indexof($str1$$Register, $str2$$Register,
16534                       $cnt1$$Register, $cnt2$$Register,
16535                       $tmp1$$Register, $tmp2$$Register,
16536                       $tmp3$$Register, $tmp4$$Register,
16537                       $tmp5$$Register, $tmp6$$Register,
16538                       -1, $result$$Register, StrIntrinsicNode::UL);
16539   %}
16540   ins_pipe(pipe_class_memory);
16541 %}
16542 
16543 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16544                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16545                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16546 %{
16547   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16548   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16549   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16550          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16551   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
16552 
16553   ins_encode %{
16554     int icnt2 = (int)$int_cnt2$$constant;
16555     __ string_indexof($str1$$Register, $str2$$Register,
16556                       $cnt1$$Register, zr,
16557                       $tmp1$$Register, $tmp2$$Register,
16558                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16559                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16560   %}
16561   ins_pipe(pipe_class_memory);
16562 %}
16563 
16564 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16565                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16566                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16567 %{
16568   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16569   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16570   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16571          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16572   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
16573 
16574   ins_encode %{
16575     int icnt2 = (int)$int_cnt2$$constant;
16576     __ string_indexof($str1$$Register, $str2$$Register,
16577                       $cnt1$$Register, zr,
16578                       $tmp1$$Register, $tmp2$$Register,
16579                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16580                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16581   %}
16582   ins_pipe(pipe_class_memory);
16583 %}
16584 
16585 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16586                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16587                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16588 %{
16589   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16590   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16591   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16592          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16593   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
16594 
16595   ins_encode %{
16596     int icnt2 = (int)$int_cnt2$$constant;
16597     __ string_indexof($str1$$Register, $str2$$Register,
16598                       $cnt1$$Register, zr,
16599                       $tmp1$$Register, $tmp2$$Register,
16600                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16601                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16602   %}
16603   ins_pipe(pipe_class_memory);
16604 %}
16605 
16606 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16607                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16608                               iRegINoSp tmp3, rFlagsReg cr)
16609 %{
16610   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16611   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16612          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16613 
16614   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16615 
16616   ins_encode %{
16617     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16618                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16619                            $tmp3$$Register);
16620   %}
16621   ins_pipe(pipe_class_memory);
16622 %}
16623 
16624 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16625                         iRegI_R0 result, rFlagsReg cr)
16626 %{
16627   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16628   match(Set result (StrEquals (Binary str1 str2) cnt));
16629   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16630 
16631   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16632   ins_encode %{
16633     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16634     __ string_equals($str1$$Register, $str2$$Register,
16635                      $result$$Register, $cnt$$Register, 1);
16636   %}
16637   ins_pipe(pipe_class_memory);
16638 %}
16639 
16640 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16641                         iRegI_R0 result, rFlagsReg cr)
16642 %{
16643   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
16644   match(Set result (StrEquals (Binary str1 str2) cnt));
16645   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16646 
16647   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16648   ins_encode %{
16649     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16650     __ string_equals($str1$$Register, $str2$$Register,
16651                      $result$$Register, $cnt$$Register, 2);
16652   %}
16653   ins_pipe(pipe_class_memory);
16654 %}
16655 
16656 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16657                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16658                        iRegP_R10 tmp, rFlagsReg cr)
16659 %{
16660   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16661   match(Set result (AryEq ary1 ary2));
16662   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16663 
16664   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16665   ins_encode %{
16666     __ arrays_equals($ary1$$Register, $ary2$$Register,
16667                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16668                      $result$$Register, $tmp$$Register, 1);
16669     %}
16670   ins_pipe(pipe_class_memory);
16671 %}
16672 
16673 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16674                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16675                        iRegP_R10 tmp, rFlagsReg cr)
16676 %{
16677   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16678   match(Set result (AryEq ary1 ary2));
16679   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16680 
16681   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16682   ins_encode %{
16683     __ arrays_equals($ary1$$Register, $ary2$$Register,
16684                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16685                      $result$$Register, $tmp$$Register, 2);
16686   %}
16687   ins_pipe(pipe_class_memory);
16688 %}
16689 
16690 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16691 %{
16692   match(Set result (HasNegatives ary1 len));
16693   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16694   format %{ "has negatives byte[] $ary1,$len -> $result" %}
16695   ins_encode %{
16696     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
16697   %}
16698   ins_pipe( pipe_slow );
16699 %}
16700 
16701 // fast char[] to byte[] compression
16702 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16703                          vRegD_V0 tmp1, vRegD_V1 tmp2,
16704                          vRegD_V2 tmp3, vRegD_V3 tmp4,
16705                          iRegI_R0 result, rFlagsReg cr)
16706 %{
16707   match(Set result (StrCompressedCopy src (Binary dst len)));
16708   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16709 
16710   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
16711   ins_encode %{
16712     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16713                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
16714                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
16715                            $result$$Register);
16716   %}
16717   ins_pipe( pipe_slow );
16718 %}
16719 
16720 // fast byte[] to char[] inflation
16721 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
16722                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
16723 %{
16724   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16725   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16726 
16727   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
16728   ins_encode %{
16729     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16730                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
16731   %}
16732   ins_pipe(pipe_class_memory);
16733 %}
16734 
16735 // encode char[] to byte[] in ISO_8859_1
16736 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16737                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
16738                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
16739                           iRegI_R0 result, rFlagsReg cr)
16740 %{
16741   match(Set result (EncodeISOArray src (Binary dst len)));
16742   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
16743          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
16744 
16745   format %{ "Encode array $src,$dst,$len -> $result" %}
16746   ins_encode %{
16747     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16748          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
16749          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
16750   %}
16751   ins_pipe( pipe_class_memory );
16752 %}
16753 
16754 // ============================================================================
16755 // This name is KNOWN by the ADLC and cannot be changed.
16756 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16757 // for this guy.
16758 instruct tlsLoadP(thread_RegP dst)
16759 %{
16760   match(Set dst (ThreadLocal));
16761 
16762   ins_cost(0);
16763 
16764   format %{ " -- \t// $dst=Thread::current(), empty" %}
16765 
16766   size(0);
16767 
16768   ins_encode( /*empty*/ );
16769 
16770   ins_pipe(pipe_class_empty);
16771 %}
16772 
16773 // ====================VECTOR INSTRUCTIONS=====================================
16774 
16775 instruct reinterpretD(vecD dst) %{
16776   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 8 &&
16777             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 8);
16778   match(Set dst (VectorReinterpret dst));
16779   ins_cost(0);
16780   format %{ " # reinterpret $dst" %}
16781   ins_encode %{
16782     // empty
16783   %}
16784   ins_pipe(pipe_class_empty);
16785 %}
16786 
16787 instruct reinterpretD2X(vecX dst, vecD src) %{
16788   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 16 &&
16789             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 8);
16790   match(Set dst (VectorReinterpret src));
16791   ins_cost(INSN_COST);
16792   format %{ " # reinterpret $dst,$src" %}
16793   ins_encode %{
16794     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
16795       __ orr(as_FloatRegister($dst$$reg), __ T8B,
16796              as_FloatRegister($src$$reg),
16797              as_FloatRegister($src$$reg));
16798     }
16799   %}
16800   ins_pipe(vlogical64);
16801 %}
16802 
16803 instruct reinterpretX2D(vecD dst, vecX src) %{
16804   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 8 &&
16805             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 16);
16806   match(Set dst (VectorReinterpret src));
16807   ins_cost(INSN_COST);
16808   format %{ " # reinterpret $dst,$src" %}
16809   ins_encode %{
16810     // If register is the same, then move is not needed.
16811     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
16812       __ orr(as_FloatRegister($dst$$reg), __ T8B,
16813              as_FloatRegister($src$$reg),
16814              as_FloatRegister($src$$reg));
16815     }
16816   %}
16817   ins_pipe(vlogical64);
16818 %}
16819 
16820 instruct reinterpretX(vecX dst) %{
16821   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 16 &&
16822             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 16);
16823   match(Set dst (VectorReinterpret dst));
16824   ins_cost(0);
16825   format %{ " # reinterpret $dst" %}
16826   ins_encode %{
16827     // empty
16828   %}
16829   ins_pipe(pipe_class_empty);
16830 %}
16831 
16832 // Load vector (16 bits)
16833 instruct loadV2(vecD dst, memory mem)
16834 %{
16835   predicate(n->as_LoadVector()->memory_size() == 2);
16836   match(Set dst (LoadVector mem));
16837   ins_cost(4 * INSN_COST);
16838   format %{ "ldrh   $dst,$mem\t# vector (16 bits)" %}
16839   ins_encode( aarch64_enc_ldrvH(dst, mem) );
16840   ins_pipe(vload_reg_mem64);
16841 %}
16842 
16843 // Load vector (32 bits)
16844 instruct loadV4(vecD dst, vmem4 mem)
16845 %{
16846   predicate(n->as_LoadVector()->memory_size() == 4);
16847   match(Set dst (LoadVector mem));
16848   ins_cost(4 * INSN_COST);
16849   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
16850   ins_encode( aarch64_enc_ldrvS(dst, mem) );
16851   ins_pipe(vload_reg_mem64);
16852 %}
16853 
16854 // Load vector (64 bits)
16855 instruct loadV8(vecD dst, vmem8 mem)
16856 %{
16857   predicate(n->as_LoadVector()->memory_size() == 8);
16858   match(Set dst (LoadVector mem));
16859   ins_cost(4 * INSN_COST);
16860   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
16861   ins_encode( aarch64_enc_ldrvD(dst, mem) );
16862   ins_pipe(vload_reg_mem64);
16863 %}
16864 
16865 // Load Vector (128 bits)
16866 instruct loadV16(vecX dst, vmem16 mem)
16867 %{
16868   predicate(n->as_LoadVector()->memory_size() == 16);
16869   match(Set dst (LoadVector mem));
16870   ins_cost(4 * INSN_COST);
16871   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
16872   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
16873   ins_pipe(vload_reg_mem128);
16874 %}
16875 
16876 // Store Vector (16 bits)
16877 instruct storeV2(vecD src, memory mem)
16878 %{
16879   predicate(n->as_StoreVector()->memory_size() == 2);
16880   match(Set mem (StoreVector mem src));
16881   ins_cost(4 * INSN_COST);
16882   format %{ "strh   $mem,$src\t# vector (16 bits)" %}
16883   ins_encode( aarch64_enc_strvH(src, mem) );
16884   ins_pipe(vstore_reg_mem64);
16885 %}
16886 
16887 // Store Vector (32 bits)
16888 instruct storeV4(vecD src, vmem4 mem)
16889 %{
16890   predicate(n->as_StoreVector()->memory_size() == 4);
16891   match(Set mem (StoreVector mem src));
16892   ins_cost(4 * INSN_COST);
16893   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
16894   ins_encode( aarch64_enc_strvS(src, mem) );
16895   ins_pipe(vstore_reg_mem64);
16896 %}
16897 
16898 // Store Vector (64 bits)
16899 instruct storeV8(vecD src, vmem8 mem)
16900 %{
16901   predicate(n->as_StoreVector()->memory_size() == 8);
16902   match(Set mem (StoreVector mem src));
16903   ins_cost(4 * INSN_COST);
16904   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
16905   ins_encode( aarch64_enc_strvD(src, mem) );
16906   ins_pipe(vstore_reg_mem64);
16907 %}
16908 
16909 // Store Vector (128 bits)
16910 instruct storeV16(vecX src, vmem16 mem)
16911 %{
16912   predicate(n->as_StoreVector()->memory_size() == 16);
16913   match(Set mem (StoreVector mem src));
16914   ins_cost(4 * INSN_COST);
16915   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
16916   ins_encode( aarch64_enc_strvQ(src, mem) );
16917   ins_pipe(vstore_reg_mem128);
16918 %}
16919 
16920 instruct replicate8B(vecD dst, iRegIorL2I src)
16921 %{
16922   predicate(n->as_Vector()->length() == 4 ||
16923             n->as_Vector()->length() == 8);
16924   match(Set dst (ReplicateB src));
16925   ins_cost(INSN_COST);
16926   format %{ "dup  $dst, $src\t# vector (8B)" %}
16927   ins_encode %{
16928     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
16929   %}
16930   ins_pipe(vdup_reg_reg64);
16931 %}
16932 
16933 instruct replicate16B(vecX dst, iRegIorL2I src)
16934 %{
16935   predicate(n->as_Vector()->length() == 16);
16936   match(Set dst (ReplicateB src));
16937   ins_cost(INSN_COST);
16938   format %{ "dup  $dst, $src\t# vector (16B)" %}
16939   ins_encode %{
16940     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
16941   %}
16942   ins_pipe(vdup_reg_reg128);
16943 %}
16944 
16945 instruct replicate8B_imm(vecD dst, immI con)
16946 %{
16947   predicate(n->as_Vector()->length() == 4 ||
16948             n->as_Vector()->length() == 8);
16949   match(Set dst (ReplicateB con));
16950   ins_cost(INSN_COST);
16951   format %{ "movi  $dst, $con\t# vector(8B)" %}
16952   ins_encode %{
16953     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
16954   %}
16955   ins_pipe(vmovi_reg_imm64);
16956 %}
16957 
16958 instruct replicate16B_imm(vecX dst, immI con)
16959 %{
16960   predicate(n->as_Vector()->length() == 16);
16961   match(Set dst (ReplicateB con));
16962   ins_cost(INSN_COST);
16963   format %{ "movi  $dst, $con\t# vector(16B)" %}
16964   ins_encode %{
16965     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
16966   %}
16967   ins_pipe(vmovi_reg_imm128);
16968 %}
16969 
16970 instruct replicate4S(vecD dst, iRegIorL2I src)
16971 %{
16972   predicate(n->as_Vector()->length() == 2 ||
16973             n->as_Vector()->length() == 4);
16974   match(Set dst (ReplicateS src));
16975   ins_cost(INSN_COST);
16976   format %{ "dup  $dst, $src\t# vector (4S)" %}
16977   ins_encode %{
16978     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
16979   %}
16980   ins_pipe(vdup_reg_reg64);
16981 %}
16982 
16983 instruct replicate8S(vecX dst, iRegIorL2I src)
16984 %{
16985   predicate(n->as_Vector()->length() == 8);
16986   match(Set dst (ReplicateS src));
16987   ins_cost(INSN_COST);
16988   format %{ "dup  $dst, $src\t# vector (8S)" %}
16989   ins_encode %{
16990     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
16991   %}
16992   ins_pipe(vdup_reg_reg128);
16993 %}
16994 
16995 instruct replicate4S_imm(vecD dst, immI con)
16996 %{
16997   predicate(n->as_Vector()->length() == 2 ||
16998             n->as_Vector()->length() == 4);
16999   match(Set dst (ReplicateS con));
17000   ins_cost(INSN_COST);
17001   format %{ "movi  $dst, $con\t# vector(4H)" %}
17002   ins_encode %{
17003     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
17004   %}
17005   ins_pipe(vmovi_reg_imm64);
17006 %}
17007 
17008 instruct replicate8S_imm(vecX dst, immI con)
17009 %{
17010   predicate(n->as_Vector()->length() == 8);
17011   match(Set dst (ReplicateS con));
17012   ins_cost(INSN_COST);
17013   format %{ "movi  $dst, $con\t# vector(8H)" %}
17014   ins_encode %{
17015     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
17016   %}
17017   ins_pipe(vmovi_reg_imm128);
17018 %}
17019 
17020 instruct replicate2I(vecD dst, iRegIorL2I src)
17021 %{
17022   predicate(n->as_Vector()->length() == 2);
17023   match(Set dst (ReplicateI src));
17024   ins_cost(INSN_COST);
17025   format %{ "dup  $dst, $src\t# vector (2I)" %}
17026   ins_encode %{
17027     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
17028   %}
17029   ins_pipe(vdup_reg_reg64);
17030 %}
17031 
17032 instruct replicate4I(vecX dst, iRegIorL2I src)
17033 %{
17034   predicate(n->as_Vector()->length() == 4);
17035   match(Set dst (ReplicateI src));
17036   ins_cost(INSN_COST);
17037   format %{ "dup  $dst, $src\t# vector (4I)" %}
17038   ins_encode %{
17039     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
17040   %}
17041   ins_pipe(vdup_reg_reg128);
17042 %}
17043 
17044 instruct replicate2I_imm(vecD dst, immI con)
17045 %{
17046   predicate(n->as_Vector()->length() == 2);
17047   match(Set dst (ReplicateI con));
17048   ins_cost(INSN_COST);
17049   format %{ "movi  $dst, $con\t# vector(2I)" %}
17050   ins_encode %{
17051     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
17052   %}
17053   ins_pipe(vmovi_reg_imm64);
17054 %}
17055 
17056 instruct replicate4I_imm(vecX dst, immI con)
17057 %{
17058   predicate(n->as_Vector()->length() == 4);
17059   match(Set dst (ReplicateI con));
17060   ins_cost(INSN_COST);
17061   format %{ "movi  $dst, $con\t# vector(4I)" %}
17062   ins_encode %{
17063     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
17064   %}
17065   ins_pipe(vmovi_reg_imm128);
17066 %}
17067 
17068 instruct replicate2L(vecX dst, iRegL src)
17069 %{
17070   predicate(n->as_Vector()->length() == 2);
17071   match(Set dst (ReplicateL src));
17072   ins_cost(INSN_COST);
17073   format %{ "dup  $dst, $src\t# vector (2L)" %}
17074   ins_encode %{
17075     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
17076   %}
17077   ins_pipe(vdup_reg_reg128);
17078 %}
17079 
17080 instruct replicate2L_zero(vecX dst, immI0 zero)
17081 %{
17082   predicate(n->as_Vector()->length() == 2);
17083   match(Set dst (ReplicateI zero));
17084   ins_cost(INSN_COST);
17085   format %{ "movi  $dst, $zero\t# vector(4I)" %}
17086   ins_encode %{
17087     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17088            as_FloatRegister($dst$$reg),
17089            as_FloatRegister($dst$$reg));
17090   %}
17091   ins_pipe(vmovi_reg_imm128);
17092 %}
17093 
17094 instruct replicate2F(vecD dst, vRegF src)
17095 %{
17096   predicate(n->as_Vector()->length() == 2);
17097   match(Set dst (ReplicateF src));
17098   ins_cost(INSN_COST);
17099   format %{ "dup  $dst, $src\t# vector (2F)" %}
17100   ins_encode %{
17101     __ dup(as_FloatRegister($dst$$reg), __ T2S,
17102            as_FloatRegister($src$$reg));
17103   %}
17104   ins_pipe(vdup_reg_freg64);
17105 %}
17106 
17107 instruct replicate4F(vecX dst, vRegF src)
17108 %{
17109   predicate(n->as_Vector()->length() == 4);
17110   match(Set dst (ReplicateF src));
17111   ins_cost(INSN_COST);
17112   format %{ "dup  $dst, $src\t# vector (4F)" %}
17113   ins_encode %{
17114     __ dup(as_FloatRegister($dst$$reg), __ T4S,
17115            as_FloatRegister($src$$reg));
17116   %}
17117   ins_pipe(vdup_reg_freg128);
17118 %}
17119 
17120 instruct replicate2D(vecX dst, vRegD src)
17121 %{
17122   predicate(n->as_Vector()->length() == 2);
17123   match(Set dst (ReplicateD src));
17124   ins_cost(INSN_COST);
17125   format %{ "dup  $dst, $src\t# vector (2D)" %}
17126   ins_encode %{
17127     __ dup(as_FloatRegister($dst$$reg), __ T2D,
17128            as_FloatRegister($src$$reg));
17129   %}
17130   ins_pipe(vdup_reg_dreg128);
17131 %}
17132 
17133 // ====================REDUCTION ARITHMETIC====================================
17134 
17135 instruct reduce_add8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp)
17136 %{
17137   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17138   match(Set dst (AddReductionVI src1 src2));
17139   ins_cost(INSN_COST);
17140   effect(TEMP_DEF dst, TEMP tmp);
17141   format %{ "addv  $tmp, T8B, $src2\n\t"
17142             "smov  $dst, $tmp, B, 0\n\t"
17143             "addw  $dst, $dst, $src1\n\t"
17144             "sxtb  $dst, $dst\t add reduction8B"
17145   %}
17146   ins_encode %{
17147     __ addv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src2$$reg));
17148     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17149     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
17150     __ sxtb($dst$$Register, $dst$$Register);
17151   %}
17152   ins_pipe(pipe_class_default);
17153 %}
17154 
17155 instruct reduce_add16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp)
17156 %{
17157   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17158   match(Set dst (AddReductionVI src1 src2));
17159   ins_cost(INSN_COST);
17160   effect(TEMP_DEF dst, TEMP tmp);
17161   format %{ "addv  $tmp, T16B, $src2\n\t"
17162             "smov  $dst, $tmp, B, 0\n\t"
17163             "addw  $dst, $dst, $src1\n\t"
17164             "sxtb  $dst, $dst\t add reduction16B"
17165   %}
17166   ins_encode %{
17167     __ addv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src2$$reg));
17168     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17169     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
17170     __ sxtb($dst$$Register, $dst$$Register);
17171   %}
17172   ins_pipe(pipe_class_default);
17173 %}
17174 
17175 instruct reduce_add4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp)
17176 %{
17177   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17178   match(Set dst (AddReductionVI src1 src2));
17179   ins_cost(INSN_COST);
17180   effect(TEMP_DEF dst, TEMP tmp);
17181   format %{ "addv  $tmp, T4H, $src2\n\t"
17182             "smov  $dst, $tmp, H, 0\n\t"
17183             "addw  $dst, $dst, $src1\n\t"
17184             "sxth  $dst, $dst\t add reduction4S"
17185   %}
17186   ins_encode %{
17187     __ addv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($src2$$reg));
17188     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17189     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
17190     __ sxth($dst$$Register, $dst$$Register);
17191   %}
17192   ins_pipe(pipe_class_default);
17193 %}
17194 
17195 instruct reduce_add8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp)
17196 %{
17197   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17198   match(Set dst (AddReductionVI src1 src2));
17199   ins_cost(INSN_COST);
17200   effect(TEMP_DEF dst, TEMP tmp);
17201   format %{ "addv  $tmp, T8H, $src2\n\t"
17202             "smov  $dst, $tmp, H, 0\n\t"
17203             "addw  $dst, $dst, $src1\n\t"
17204             "sxth  $dst, $dst\t add reduction8S"
17205   %}
17206   ins_encode %{
17207     __ addv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($src2$$reg));
17208     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17209     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
17210     __ sxth($dst$$Register, $dst$$Register);
17211   %}
17212   ins_pipe(pipe_class_default);
17213 %}
17214 
17215 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
17216 %{
17217   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17218   match(Set dst (AddReductionVI src1 src2));
17219   ins_cost(INSN_COST);
17220   effect(TEMP tmp, TEMP tmp2);
17221   format %{ "umov  $tmp, $src2, S, 0\n\t"
17222             "umov  $tmp2, $src2, S, 1\n\t"
17223             "addw  $tmp, $src1, $tmp\n\t"
17224             "addw  $dst, $tmp, $tmp2\t add reduction2I"
17225   %}
17226   ins_encode %{
17227     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
17228     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
17229     __ addw($tmp$$Register, $src1$$Register, $tmp$$Register);
17230     __ addw($dst$$Register, $tmp$$Register, $tmp2$$Register);
17231   %}
17232   ins_pipe(pipe_class_default);
17233 %}
17234 
17235 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
17236 %{
17237   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17238   match(Set dst (AddReductionVI src1 src2));
17239   ins_cost(INSN_COST);
17240   effect(TEMP tmp, TEMP tmp2);
17241   format %{ "addv  $tmp, T4S, $src2\n\t"
17242             "umov  $tmp2, $tmp, S, 0\n\t"
17243             "addw  $dst, $tmp2, $src1\t add reduction4I"
17244   %}
17245   ins_encode %{
17246     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
17247             as_FloatRegister($src2$$reg));
17248     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
17249     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
17250   %}
17251   ins_pipe(pipe_class_default);
17252 %}
17253 
17254 instruct reduce_mul8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD vtmp1, vecD vtmp2, iRegINoSp itmp)
17255 %{
17256   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17257   match(Set dst (MulReductionVI src1 src2));
17258   ins_cost(INSN_COST);
17259   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
17260   format %{ "ins   $vtmp1, S, $src2, 0, 1\n\t"
17261             "mulv  $vtmp1, T8B, $vtmp1, $src2\n\t"
17262             "ins   $vtmp2, H, $vtmp1, 0, 1\n\t"
17263             "mulv  $vtmp2, T8B, $vtmp2, $vtmp1\n\t"
17264             "umov  $itmp, $vtmp2, B, 0\n\t"
17265             "mulw  $dst, $itmp, $src1\n\t"
17266             "sxtb  $dst, $dst\n\t"
17267             "umov  $itmp, $vtmp2, B, 1\n\t"
17268             "mulw  $dst, $itmp, $dst\n\t"
17269             "sxtb  $dst, $dst\t mul reduction8B"
17270   %}
17271   ins_encode %{
17272     __ ins(as_FloatRegister($vtmp1$$reg), __ S,
17273            as_FloatRegister($src2$$reg), 0, 1);
17274     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
17275             as_FloatRegister($vtmp1$$reg), as_FloatRegister($src2$$reg));
17276     __ ins(as_FloatRegister($vtmp2$$reg), __ H,
17277            as_FloatRegister($vtmp1$$reg), 0, 1);
17278     __ mulv(as_FloatRegister($vtmp2$$reg), __ T8B,
17279             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
17280     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 0);
17281     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
17282     __ sxtb($dst$$Register, $dst$$Register);
17283     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 1);
17284     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
17285     __ sxtb($dst$$Register, $dst$$Register);
17286   %}
17287   ins_pipe(pipe_class_default);
17288 %}
17289 
17290 instruct reduce_mul16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX vtmp1, vecX vtmp2, iRegINoSp itmp)
17291 %{
17292   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17293   match(Set dst (MulReductionVI src1 src2));
17294   ins_cost(INSN_COST);
17295   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
17296   format %{ "ins   $vtmp1, D, $src2, 0, 1\n\t"
17297             "mulv  $vtmp1, T8B, $vtmp1, $src2\n\t"
17298             "ins   $vtmp2, S, $vtmp1, 0, 1\n\t"
17299             "mulv  $vtmp1, T8B, $vtmp2, $vtmp1\n\t"
17300             "ins   $vtmp2, H, $vtmp1, 0, 1\n\t"
17301             "mulv  $vtmp2, T8B, $vtmp2, $vtmp1\n\t"
17302             "umov  $itmp, $vtmp2, B, 0\n\t"
17303             "mulw  $dst, $itmp, $src1\n\t"
17304             "sxtb  $dst, $dst\n\t"
17305             "umov  $itmp, $vtmp2, B, 1\n\t"
17306             "mulw  $dst, $itmp, $dst\n\t"
17307             "sxtb  $dst, $dst\t mul reduction16B"
17308   %}
17309   ins_encode %{
17310     __ ins(as_FloatRegister($vtmp1$$reg), __ D,
17311            as_FloatRegister($src2$$reg), 0, 1);
17312     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
17313             as_FloatRegister($vtmp1$$reg), as_FloatRegister($src2$$reg));
17314     __ ins(as_FloatRegister($vtmp2$$reg), __ S,
17315            as_FloatRegister($vtmp1$$reg), 0, 1);
17316     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
17317             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
17318     __ ins(as_FloatRegister($vtmp2$$reg), __ H,
17319            as_FloatRegister($vtmp1$$reg), 0, 1);
17320     __ mulv(as_FloatRegister($vtmp2$$reg), __ T8B,
17321             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
17322     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 0);
17323     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
17324     __ sxtb($dst$$Register, $dst$$Register);
17325     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 1);
17326     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
17327     __ sxtb($dst$$Register, $dst$$Register);
17328   %}
17329   ins_pipe(pipe_class_default);
17330 %}
17331 
17332 instruct reduce_mul4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD vtmp, iRegINoSp itmp)
17333 %{
17334   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17335   match(Set dst (MulReductionVI src1 src2));
17336   ins_cost(INSN_COST);
17337   effect(TEMP_DEF dst, TEMP vtmp, TEMP itmp);
17338   format %{ "ins   $vtmp, S, $src2, 0, 1\n\t"
17339             "mulv  $vtmp, T4H, $vtmp, $src2\n\t"
17340             "umov  $itmp, $vtmp, H, 0\n\t"
17341             "mulw  $dst, $itmp, $src1\n\t"
17342             "sxth  $dst, $dst\n\t"
17343             "umov  $itmp, $vtmp, H, 1\n\t"
17344             "mulw  $dst, $itmp, $dst\n\t"
17345             "sxth  $dst, $dst\t mul reduction4S"
17346   %}
17347   ins_encode %{
17348     __ ins(as_FloatRegister($vtmp$$reg), __ S,
17349            as_FloatRegister($src2$$reg), 0, 1);
17350     __ mulv(as_FloatRegister($vtmp$$reg), __ T4H,
17351             as_FloatRegister($vtmp$$reg), as_FloatRegister($src2$$reg));
17352     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ H, 0);
17353     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
17354     __ sxth($dst$$Register, $dst$$Register);
17355     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ H, 1);
17356     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
17357     __ sxth($dst$$Register, $dst$$Register);
17358   %}
17359   ins_pipe(pipe_class_default);
17360 %}
17361 
17362 instruct reduce_mul8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX vtmp1, vecX vtmp2, iRegINoSp itmp)
17363 %{
17364   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17365   match(Set dst (MulReductionVI src1 src2));
17366   ins_cost(INSN_COST);
17367   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
17368   format %{ "ins   $vtmp1, D, $src2, 0, 1\n\t"
17369             "mulv  $vtmp1, T4H, $vtmp1, $src2\n\t"
17370             "ins   $vtmp2, S, $vtmp1, 0, 1\n\t"
17371             "mulv  $vtmp2, T4H, $vtmp2, $vtmp1\n\t"
17372             "umov  $itmp, $vtmp2, H, 0\n\t"
17373             "mulw  $dst, $itmp, $src1\n\t"
17374             "sxth  $dst, $dst\n\t"
17375             "umov  $itmp, $vtmp2, H, 1\n\t"
17376             "mulw  $dst, $itmp, $dst\n\t"
17377             "sxth  $dst, $dst\t mul reduction8S"
17378   %}
17379   ins_encode %{
17380     __ ins(as_FloatRegister($vtmp1$$reg), __ D,
17381            as_FloatRegister($src2$$reg), 0, 1);
17382     __ mulv(as_FloatRegister($vtmp1$$reg), __ T4H,
17383             as_FloatRegister($vtmp1$$reg), as_FloatRegister($src2$$reg));
17384     __ ins(as_FloatRegister($vtmp2$$reg), __ S,
17385            as_FloatRegister($vtmp1$$reg), 0, 1);
17386     __ mulv(as_FloatRegister($vtmp2$$reg), __ T4H,
17387             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
17388     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ H, 0);
17389     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
17390     __ sxth($dst$$Register, $dst$$Register);
17391     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ H, 1);
17392     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
17393     __ sxth($dst$$Register, $dst$$Register);
17394   %}
17395   ins_pipe(pipe_class_default);
17396 %}
17397 
17398 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
17399 %{
17400   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17401   match(Set dst (MulReductionVI src1 src2));
17402   ins_cost(INSN_COST);
17403   effect(TEMP tmp, TEMP dst);
17404   format %{ "umov  $tmp, $src2, S, 0\n\t"
17405             "mul   $dst, $tmp, $src1\n\t"
17406             "umov  $tmp, $src2, S, 1\n\t"
17407             "mul   $dst, $tmp, $dst\t mul reduction2I\n\t"
17408   %}
17409   ins_encode %{
17410     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
17411     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
17412     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
17413     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
17414   %}
17415   ins_pipe(pipe_class_default);
17416 %}
17417 
17418 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
17419 %{
17420   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17421   match(Set dst (MulReductionVI src1 src2));
17422   ins_cost(INSN_COST);
17423   effect(TEMP tmp, TEMP tmp2, TEMP dst);
17424   format %{ "ins   $tmp, D, $src2, 0, 1\n\t"
17425             "mulv  $tmp, T2S, $tmp, $src2\n\t"
17426             "umov  $tmp2, $tmp, S, 0\n\t"
17427             "mul   $dst, $tmp2, $src1\n\t"
17428             "umov  $tmp2, $tmp, S, 1\n\t"
17429             "mul   $dst, $tmp2, $dst\t mul reduction4I\n\t"
17430   %}
17431   ins_encode %{
17432     __ ins(as_FloatRegister($tmp$$reg), __ D,
17433            as_FloatRegister($src2$$reg), 0, 1);
17434     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
17435             as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
17436     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
17437     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
17438     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
17439     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
17440   %}
17441   ins_pipe(pipe_class_default);
17442 %}
17443 
17444 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
17445 %{
17446   match(Set dst (AddReductionVF src1 src2));
17447   ins_cost(INSN_COST);
17448   effect(TEMP tmp, TEMP dst);
17449   format %{ "fadds $dst, $src1, $src2\n\t"
17450             "ins   $tmp, S, $src2, 0, 1\n\t"
17451             "fadds $dst, $dst, $tmp\t add reduction2F"
17452   %}
17453   ins_encode %{
17454     __ fadds(as_FloatRegister($dst$$reg),
17455              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
17456     __ ins(as_FloatRegister($tmp$$reg), __ S,
17457            as_FloatRegister($src2$$reg), 0, 1);
17458     __ fadds(as_FloatRegister($dst$$reg),
17459              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17460   %}
17461   ins_pipe(pipe_class_default);
17462 %}
17463 
17464 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
17465 %{
17466   match(Set dst (AddReductionVF src1 src2));
17467   ins_cost(INSN_COST);
17468   effect(TEMP tmp, TEMP dst);
17469   format %{ "fadds $dst, $src1, $src2\n\t"
17470             "ins   $tmp, S, $src2, 0, 1\n\t"
17471             "fadds $dst, $dst, $tmp\n\t"
17472             "ins   $tmp, S, $src2, 0, 2\n\t"
17473             "fadds $dst, $dst, $tmp\n\t"
17474             "ins   $tmp, S, $src2, 0, 3\n\t"
17475             "fadds $dst, $dst, $tmp\t add reduction4F"
17476   %}
17477   ins_encode %{
17478     __ fadds(as_FloatRegister($dst$$reg),
17479              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
17480     __ ins(as_FloatRegister($tmp$$reg), __ S,
17481            as_FloatRegister($src2$$reg), 0, 1);
17482     __ fadds(as_FloatRegister($dst$$reg),
17483              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17484     __ ins(as_FloatRegister($tmp$$reg), __ S,
17485            as_FloatRegister($src2$$reg), 0, 2);
17486     __ fadds(as_FloatRegister($dst$$reg),
17487              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17488     __ ins(as_FloatRegister($tmp$$reg), __ S,
17489            as_FloatRegister($src2$$reg), 0, 3);
17490     __ fadds(as_FloatRegister($dst$$reg),
17491              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17492   %}
17493   ins_pipe(pipe_class_default);
17494 %}
17495 
17496 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
17497 %{
17498   match(Set dst (MulReductionVF src1 src2));
17499   ins_cost(INSN_COST);
17500   effect(TEMP tmp, TEMP dst);
17501   format %{ "fmuls $dst, $src1, $src2\n\t"
17502             "ins   $tmp, S, $src2, 0, 1\n\t"
17503             "fmuls $dst, $dst, $tmp\t mul reduction2F"
17504   %}
17505   ins_encode %{
17506     __ fmuls(as_FloatRegister($dst$$reg),
17507              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
17508     __ ins(as_FloatRegister($tmp$$reg), __ S,
17509            as_FloatRegister($src2$$reg), 0, 1);
17510     __ fmuls(as_FloatRegister($dst$$reg),
17511              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17512   %}
17513   ins_pipe(pipe_class_default);
17514 %}
17515 
17516 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
17517 %{
17518   match(Set dst (MulReductionVF src1 src2));
17519   ins_cost(INSN_COST);
17520   effect(TEMP tmp, TEMP dst);
17521   format %{ "fmuls $dst, $src1, $src2\n\t"
17522             "ins   $tmp, S, $src2, 0, 1\n\t"
17523             "fmuls $dst, $dst, $tmp\n\t"
17524             "ins   $tmp, S, $src2, 0, 2\n\t"
17525             "fmuls $dst, $dst, $tmp\n\t"
17526             "ins   $tmp, S, $src2, 0, 3\n\t"
17527             "fmuls $dst, $dst, $tmp\t mul reduction4F"
17528   %}
17529   ins_encode %{
17530     __ fmuls(as_FloatRegister($dst$$reg),
17531              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
17532     __ ins(as_FloatRegister($tmp$$reg), __ S,
17533            as_FloatRegister($src2$$reg), 0, 1);
17534     __ fmuls(as_FloatRegister($dst$$reg),
17535              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17536     __ ins(as_FloatRegister($tmp$$reg), __ S,
17537            as_FloatRegister($src2$$reg), 0, 2);
17538     __ fmuls(as_FloatRegister($dst$$reg),
17539              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17540     __ ins(as_FloatRegister($tmp$$reg), __ S,
17541            as_FloatRegister($src2$$reg), 0, 3);
17542     __ fmuls(as_FloatRegister($dst$$reg),
17543              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17544   %}
17545   ins_pipe(pipe_class_default);
17546 %}
17547 
17548 instruct reduce_add2L(iRegLNoSp dst, iRegL src1, vecX src2, vecX tmp)
17549 %{
17550   match(Set dst (AddReductionVL src1 src2));
17551   ins_cost(INSN_COST);
17552   effect(TEMP_DEF dst, TEMP tmp);
17553   format %{ "addpd $tmp, $src2\n\t"
17554             "umov  $dst, $tmp, D, 0\n\t"
17555             "add   $dst, $src1, $dst\t add reduction2L"
17556   %}
17557   ins_encode %{
17558     __ addpd(as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
17559     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ D, 0);
17560     __ add($dst$$Register, $src1$$Register, $dst$$Register);
17561   %}
17562   ins_pipe(pipe_class_default);
17563 %}
17564 
17565 instruct reduce_mul2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
17566 %{
17567   match(Set dst (MulReductionVL src1 src2));
17568   ins_cost(INSN_COST);
17569   effect(TEMP_DEF dst, TEMP tmp);
17570   format %{ "umov  $tmp, $src2, D, 0\n\t"
17571             "mul   $dst, $src1, $tmp\n\t"
17572             "umov  $tmp, $src2, D, 1\n\t"
17573             "mul   $dst, $dst, $tmp\t mul reduction2L"
17574   %}
17575   ins_encode %{
17576     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
17577     __ mul($dst$$Register, $src1$$Register, $tmp$$Register);
17578     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
17579     __ mul($dst$$Register, $dst$$Register, $tmp$$Register);
17580   %}
17581   ins_pipe(pipe_class_default);
17582 %}
17583 
17584 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
17585 %{
17586   match(Set dst (AddReductionVD src1 src2));
17587   ins_cost(INSN_COST);
17588   effect(TEMP tmp, TEMP dst);
17589   format %{ "faddd $dst, $src1, $src2\n\t"
17590             "ins   $tmp, D, $src2, 0, 1\n\t"
17591             "faddd $dst, $dst, $tmp\t add reduction2D"
17592   %}
17593   ins_encode %{
17594     __ faddd(as_FloatRegister($dst$$reg),
17595              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
17596     __ ins(as_FloatRegister($tmp$$reg), __ D,
17597            as_FloatRegister($src2$$reg), 0, 1);
17598     __ faddd(as_FloatRegister($dst$$reg),
17599              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17600   %}
17601   ins_pipe(pipe_class_default);
17602 %}
17603 
17604 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
17605 %{
17606   match(Set dst (MulReductionVD src1 src2));
17607   ins_cost(INSN_COST);
17608   effect(TEMP tmp, TEMP dst);
17609   format %{ "fmuld $dst, $src1, $src2\n\t"
17610             "ins   $tmp, D, $src2, 0, 1\n\t"
17611             "fmuld $dst, $dst, $tmp\t mul reduction2D"
17612   %}
17613   ins_encode %{
17614     __ fmuld(as_FloatRegister($dst$$reg),
17615              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
17616     __ ins(as_FloatRegister($tmp$$reg), __ D,
17617            as_FloatRegister($src2$$reg), 0, 1);
17618     __ fmuld(as_FloatRegister($dst$$reg),
17619              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17620   %}
17621   ins_pipe(pipe_class_default);
17622 %}
17623 
17624 instruct reduce_max8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
17625   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17626   match(Set dst (MaxReductionV src1 src2));
17627   ins_cost(INSN_COST);
17628   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17629   format %{ "smaxv $tmp, T8B, $src2\n\t"
17630             "smov  $dst, $tmp, B, 0\n\t"
17631             "cmpw  $dst, $src1\n\t"
17632             "cselw $dst, $dst, $src1 gt\t max reduction8B" %}
17633   ins_encode %{
17634     __ smaxv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src2$$reg));
17635     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17636     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17637     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17638   %}
17639   ins_pipe(pipe_class_default);
17640 %}
17641 
17642 instruct reduce_max16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17643   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17644   match(Set dst (MaxReductionV src1 src2));
17645   ins_cost(INSN_COST);
17646   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17647   format %{ "smaxv $tmp, T16B, $src2\n\t"
17648             "smov  $dst, $tmp, B, 0\n\t"
17649             "cmpw  $dst, $src1\n\t"
17650             "cselw $dst, $dst, $src1 gt\t max reduction16B" %}
17651   ins_encode %{
17652     __ smaxv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src2$$reg));
17653     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17654     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17655     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17656   %}
17657   ins_pipe(pipe_class_default);
17658 %}
17659 
17660 instruct reduce_max4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
17661   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17662   match(Set dst (MaxReductionV src1 src2));
17663   ins_cost(INSN_COST);
17664   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17665   format %{ "smaxv $tmp, T4H, $src2\n\t"
17666             "smov  $dst, $tmp, H, 0\n\t"
17667             "cmpw  $dst, $src1\n\t"
17668             "cselw $dst, $dst, $src1 gt\t max reduction4S" %}
17669   ins_encode %{
17670     __ smaxv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($src2$$reg));
17671     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17672     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17673     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17674   %}
17675   ins_pipe(pipe_class_default);
17676 %}
17677 
17678 instruct reduce_max8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17679   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17680   match(Set dst (MaxReductionV src1 src2));
17681   ins_cost(INSN_COST);
17682   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17683   format %{ "smaxv  $tmp, T8H, $src2\n\t"
17684             "smov  $dst, $tmp, H, 0\n\t"
17685             "cmpw  $dst, $src1\n\t"
17686             "cselw $dst, $dst, $src1 gt\t max reduction8S" %}
17687   ins_encode %{
17688     __ smaxv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($src2$$reg));
17689     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17690     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17691     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17692   %}
17693   ins_pipe(pipe_class_default);
17694 %}
17695 
17696 instruct reduce_max2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecX tmp, rFlagsReg cr) %{
17697   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17698   match(Set dst (MaxReductionV src1 src2));
17699   ins_cost(INSN_COST);
17700   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17701   format %{ "dup   $tmp, T2D, $src2\n\t"
17702             "smaxv $tmp, T4S, $tmp\n\t"
17703             "umov  $dst, $tmp, S, 0\n\t"
17704             "cmpw  $dst, $src1\n\t"
17705             "cselw $dst, $dst, $src1 gt\t max reduction2I" %}
17706   ins_encode %{
17707     __ dup(as_FloatRegister($tmp$$reg), __ T2D, as_FloatRegister($src2$$reg));
17708     __ smaxv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($tmp$$reg));
17709     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
17710     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17711     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17712   %}
17713   ins_pipe(pipe_class_default);
17714 %}
17715 
17716 instruct reduce_max4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17717   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17718   match(Set dst (MaxReductionV src1 src2));
17719   ins_cost(INSN_COST);
17720   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17721   format %{ "smaxv $tmp, T4S, $src2\n\t"
17722             "umov  $dst, $tmp, S, 0\n\t"
17723             "cmpw  $dst, $src1\n\t"
17724             "cselw $dst, $dst, $src1 gt\t max reduction4I" %}
17725   ins_encode %{
17726     __ smaxv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($src2$$reg));
17727     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
17728     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17729     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
17730   %}
17731   ins_pipe(pipe_class_default);
17732 %}
17733 
17734 instruct reduce_max2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp, rFlagsReg cr) %{
17735   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
17736   match(Set dst (MaxReductionV src1 src2));
17737   ins_cost(INSN_COST);
17738   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17739   format %{ "umov  $tmp, $src2, D, 0\n\t"
17740             "cmp   $src1,$tmp\n\t"
17741             "csel  $dst, $src1, $tmp gt\n\t"
17742             "umov  $tmp, $src2, D, 1\n\t"
17743             "cmp   $dst, $tmp\n\t"
17744             "csel  $dst, $dst, $tmp gt\t max reduction2L" %}
17745   ins_encode %{
17746     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
17747     __ cmp(as_Register($src1$$reg), as_Register($tmp$$reg));
17748     __ csel(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($tmp$$reg), Assembler::GT);
17749     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
17750     __ cmp(as_Register($dst$$reg), as_Register($tmp$$reg));
17751     __ csel(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($tmp$$reg), Assembler::GT);
17752   %}
17753   ins_pipe(pipe_class_default);
17754 %}
17755 
17756 instruct reduce_max2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
17757   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17758   match(Set dst (MaxReductionV src1 src2));
17759   ins_cost(INSN_COST);
17760   effect(TEMP_DEF dst, TEMP tmp);
17761   format %{ "fmaxs $dst, $src1, $src2\n\t"
17762             "ins   $tmp, S, $src2, 0, 1\n\t"
17763             "fmaxs $dst, $dst, $tmp\t max reduction2F" %}
17764   ins_encode %{
17765     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
17766     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
17767     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17768   %}
17769   ins_pipe(pipe_class_default);
17770 %}
17771 
17772 instruct reduce_max4F(vRegF dst, vRegF src1, vecX src2) %{
17773   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17774   match(Set dst (MaxReductionV src1 src2));
17775   ins_cost(INSN_COST);
17776   effect(TEMP_DEF dst);
17777   format %{ "fmaxv $dst, T4S, $src2\n\t"
17778             "fmaxs $dst, $dst, $src1\t max reduction4F" %}
17779   ins_encode %{
17780     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
17781     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
17782   %}
17783   ins_pipe(pipe_class_default);
17784 %}
17785 
17786 instruct reduce_max2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
17787   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17788   match(Set dst (MaxReductionV src1 src2));
17789   ins_cost(INSN_COST);
17790   effect(TEMP_DEF dst, TEMP tmp);
17791   format %{ "fmaxd $dst, $src1, $src2\n\t"
17792             "ins   $tmp, D, $src2, 0, 1\n\t"
17793             "fmaxd $dst, $dst, $tmp\t max reduction2D" %}
17794   ins_encode %{
17795     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
17796     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
17797     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17798   %}
17799   ins_pipe(pipe_class_default);
17800 %}
17801 
17802 instruct reduce_min8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
17803   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17804   match(Set dst (MinReductionV src1 src2));
17805   ins_cost(INSN_COST);
17806   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17807   format %{ "sminv $tmp, T8B, $src2\n\t"
17808             "smov  $dst, $tmp, B, 0\n\t"
17809             "cmpw  $dst, $src1\n\t"
17810             "cselw $dst, $dst, $src1 lt\t min reduction8B" %}
17811   ins_encode %{
17812     __ sminv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src2$$reg));
17813     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17814     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17815     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17816   %}
17817   ins_pipe(pipe_class_default);
17818 %}
17819 
17820 instruct reduce_min16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17821   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17822   match(Set dst (MinReductionV src1 src2));
17823   ins_cost(INSN_COST);
17824   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17825   format %{ "sminv $tmp, T16B, $src2\n\t"
17826             "smov  $dst, $tmp, B, 0\n\t"
17827             "cmpw  $dst, $src1\n\t"
17828             "cselw $dst, $dst, $src1 lt\t min reduction16B" %}
17829   ins_encode %{
17830     __ sminv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src2$$reg));
17831     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
17832     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17833     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17834   %}
17835   ins_pipe(pipe_class_default);
17836 %}
17837 
17838 instruct reduce_min4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
17839   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17840   match(Set dst (MinReductionV src1 src2));
17841   ins_cost(INSN_COST);
17842   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17843   format %{ "sminv $tmp, T4H, $src2\n\t"
17844             "smov  $dst, $tmp, H, 0\n\t"
17845             "cmpw  $dst, $src1\n\t"
17846             "cselw $dst, $dst, $src1 lt\t min reduction4S" %}
17847   ins_encode %{
17848     __ sminv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($src2$$reg));
17849     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17850     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17851     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17852   %}
17853   ins_pipe(pipe_class_default);
17854 %}
17855 
17856 instruct reduce_min8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17857   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17858   match(Set dst (MinReductionV src1 src2));
17859   ins_cost(INSN_COST);
17860   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17861   format %{ "sminv $tmp, T8H, $src2\n\t"
17862             "smov  $dst, $tmp, H, 0\n\t"
17863             "cmpw  $dst, $src1\n\t"
17864             "cselw $dst, $dst, $src1 lt\t min reduction8S" %}
17865   ins_encode %{
17866     __ sminv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($src2$$reg));
17867     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
17868     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17869     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17870   %}
17871   ins_pipe(pipe_class_default);
17872 %}
17873 
17874 instruct reduce_min2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecX tmp, rFlagsReg cr) %{
17875   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17876   match(Set dst (MinReductionV src1 src2));
17877   ins_cost(INSN_COST);
17878   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17879   format %{ "dup   $tmp, T2D, $src2\n\t"
17880             "sminv $tmp, T2S, $tmp\n\t"
17881             "umov  $dst, $tmp, S, 0\n\t"
17882             "cmpw  $dst, $src1\n\t"
17883             "cselw $dst, $dst, $src1 lt\t min reduction2I" %}
17884   ins_encode %{
17885     __ dup(as_FloatRegister($tmp$$reg), __ T2D, as_FloatRegister($src2$$reg));
17886     __ sminv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($tmp$$reg));
17887     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
17888     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17889     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17890   %}
17891   ins_pipe(pipe_class_default);
17892 %}
17893 
17894 instruct reduce_min4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
17895   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
17896   match(Set dst (MinReductionV src1 src2));
17897   ins_cost(INSN_COST);
17898   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17899   format %{ "sminv $tmp, T4S, $src2\n\t"
17900             "umov  $dst, $tmp, S, 0\n\t"
17901             "cmpw  $dst, $src1\n\t"
17902             "cselw $dst, $dst, $src1 lt\t min reduction4I" %}
17903   ins_encode %{
17904     __ sminv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($src2$$reg));
17905     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
17906     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
17907     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
17908   %}
17909   ins_pipe(pipe_class_default);
17910 %}
17911 
17912 instruct reduce_min2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp, rFlagsReg cr) %{
17913   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
17914   match(Set dst (MinReductionV src1 src2));
17915   ins_cost(INSN_COST);
17916   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
17917   format %{ "umov  $tmp, $src2, D, 0\n\t"
17918             "cmp   $src1,$tmp\n\t"
17919             "csel  $dst, $src1, $tmp lt\n\t"
17920             "umov  $tmp, $src2, D, 1\n\t"
17921             "cmp   $dst, $tmp\n\t"
17922             "csel  $dst, $dst, $tmp lt\t min reduction2L" %}
17923   ins_encode %{
17924     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
17925     __ cmp(as_Register($src1$$reg), as_Register($tmp$$reg));
17926     __ csel(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($tmp$$reg), Assembler::LT);
17927     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
17928     __ cmp(as_Register($dst$$reg), as_Register($tmp$$reg));
17929     __ csel(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($tmp$$reg), Assembler::LT);
17930   %}
17931   ins_pipe(pipe_class_default);
17932 %}
17933 
17934 instruct reduce_min2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
17935   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17936   match(Set dst (MinReductionV src1 src2));
17937   ins_cost(INSN_COST);
17938   effect(TEMP_DEF dst, TEMP tmp);
17939   format %{ "fmins $dst, $src1, $src2\n\t"
17940             "ins   $tmp, S, $src2, 0, 1\n\t"
17941             "fmins $dst, $dst, $tmp\t min reduction2F" %}
17942   ins_encode %{
17943     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
17944     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
17945     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17946   %}
17947   ins_pipe(pipe_class_default);
17948 %}
17949 
17950 instruct reduce_min4F(vRegF dst, vRegF src1, vecX src2) %{
17951   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17952   match(Set dst (MinReductionV src1 src2));
17953   ins_cost(INSN_COST);
17954   effect(TEMP_DEF dst);
17955   format %{ "fminv $dst, T4S, $src2\n\t"
17956             "fmins $dst, $dst, $src1\t min reduction4F" %}
17957   ins_encode %{
17958     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
17959     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
17960   %}
17961   ins_pipe(pipe_class_default);
17962 %}
17963 
17964 instruct reduce_min2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
17965   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17966   match(Set dst (MinReductionV src1 src2));
17967   ins_cost(INSN_COST);
17968   effect(TEMP_DEF dst, TEMP tmp);
17969   format %{ "fmind $dst, $src1, $src2\n\t"
17970             "ins   $tmp, D, $src2, 0, 1\n\t"
17971             "fmind $dst, $dst, $tmp\t min reduction2D" %}
17972   ins_encode %{
17973     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
17974     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
17975     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
17976   %}
17977   ins_pipe(pipe_class_default);
17978 %}
17979 
17980 // ====================VECTOR ARITHMETIC=======================================
17981 
17982 // --------------------------------- ADD --------------------------------------
17983 
17984 instruct vadd8B(vecD dst, vecD src1, vecD src2)
17985 %{
17986   predicate(n->as_Vector()->length() == 4 ||
17987             n->as_Vector()->length() == 8);
17988   match(Set dst (AddVB src1 src2));
17989   ins_cost(INSN_COST);
17990   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
17991   ins_encode %{
17992     __ addv(as_FloatRegister($dst$$reg), __ T8B,
17993             as_FloatRegister($src1$$reg),
17994             as_FloatRegister($src2$$reg));
17995   %}
17996   ins_pipe(vdop64);
17997 %}
17998 
17999 instruct vadd16B(vecX dst, vecX src1, vecX src2)
18000 %{
18001   predicate(n->as_Vector()->length() == 16);
18002   match(Set dst (AddVB src1 src2));
18003   ins_cost(INSN_COST);
18004   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
18005   ins_encode %{
18006     __ addv(as_FloatRegister($dst$$reg), __ T16B,
18007             as_FloatRegister($src1$$reg),
18008             as_FloatRegister($src2$$reg));
18009   %}
18010   ins_pipe(vdop128);
18011 %}
18012 
18013 instruct vadd4S(vecD dst, vecD src1, vecD src2)
18014 %{
18015   predicate(n->as_Vector()->length() == 2 ||
18016             n->as_Vector()->length() == 4);
18017   match(Set dst (AddVS src1 src2));
18018   ins_cost(INSN_COST);
18019   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
18020   ins_encode %{
18021     __ addv(as_FloatRegister($dst$$reg), __ T4H,
18022             as_FloatRegister($src1$$reg),
18023             as_FloatRegister($src2$$reg));
18024   %}
18025   ins_pipe(vdop64);
18026 %}
18027 
18028 instruct vadd8S(vecX dst, vecX src1, vecX src2)
18029 %{
18030   predicate(n->as_Vector()->length() == 8);
18031   match(Set dst (AddVS src1 src2));
18032   ins_cost(INSN_COST);
18033   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
18034   ins_encode %{
18035     __ addv(as_FloatRegister($dst$$reg), __ T8H,
18036             as_FloatRegister($src1$$reg),
18037             as_FloatRegister($src2$$reg));
18038   %}
18039   ins_pipe(vdop128);
18040 %}
18041 
18042 instruct vadd2I(vecD dst, vecD src1, vecD src2)
18043 %{
18044   predicate(n->as_Vector()->length() == 2);
18045   match(Set dst (AddVI src1 src2));
18046   ins_cost(INSN_COST);
18047   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
18048   ins_encode %{
18049     __ addv(as_FloatRegister($dst$$reg), __ T2S,
18050             as_FloatRegister($src1$$reg),
18051             as_FloatRegister($src2$$reg));
18052   %}
18053   ins_pipe(vdop64);
18054 %}
18055 
18056 instruct vadd4I(vecX dst, vecX src1, vecX src2)
18057 %{
18058   predicate(n->as_Vector()->length() == 4);
18059   match(Set dst (AddVI src1 src2));
18060   ins_cost(INSN_COST);
18061   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
18062   ins_encode %{
18063     __ addv(as_FloatRegister($dst$$reg), __ T4S,
18064             as_FloatRegister($src1$$reg),
18065             as_FloatRegister($src2$$reg));
18066   %}
18067   ins_pipe(vdop128);
18068 %}
18069 
18070 instruct vadd2L(vecX dst, vecX src1, vecX src2)
18071 %{
18072   predicate(n->as_Vector()->length() == 2);
18073   match(Set dst (AddVL src1 src2));
18074   ins_cost(INSN_COST);
18075   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
18076   ins_encode %{
18077     __ addv(as_FloatRegister($dst$$reg), __ T2D,
18078             as_FloatRegister($src1$$reg),
18079             as_FloatRegister($src2$$reg));
18080   %}
18081   ins_pipe(vdop128);
18082 %}
18083 
18084 instruct vadd2F(vecD dst, vecD src1, vecD src2)
18085 %{
18086   predicate(n->as_Vector()->length() == 2);
18087   match(Set dst (AddVF src1 src2));
18088   ins_cost(INSN_COST);
18089   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
18090   ins_encode %{
18091     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
18092             as_FloatRegister($src1$$reg),
18093             as_FloatRegister($src2$$reg));
18094   %}
18095   ins_pipe(vdop_fp64);
18096 %}
18097 
18098 instruct vadd4F(vecX dst, vecX src1, vecX src2)
18099 %{
18100   predicate(n->as_Vector()->length() == 4);
18101   match(Set dst (AddVF src1 src2));
18102   ins_cost(INSN_COST);
18103   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
18104   ins_encode %{
18105     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
18106             as_FloatRegister($src1$$reg),
18107             as_FloatRegister($src2$$reg));
18108   %}
18109   ins_pipe(vdop_fp128);
18110 %}
18111 
18112 instruct vadd2D(vecX dst, vecX src1, vecX src2)
18113 %{
18114   match(Set dst (AddVD src1 src2));
18115   ins_cost(INSN_COST);
18116   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
18117   ins_encode %{
18118     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
18119             as_FloatRegister($src1$$reg),
18120             as_FloatRegister($src2$$reg));
18121   %}
18122   ins_pipe(vdop_fp128);
18123 %}
18124 
18125 // --------------------------------- SUB --------------------------------------
18126 
18127 instruct vsub8B(vecD dst, vecD src1, vecD src2)
18128 %{
18129   predicate(n->as_Vector()->length() == 4 ||
18130             n->as_Vector()->length() == 8);
18131   match(Set dst (SubVB src1 src2));
18132   ins_cost(INSN_COST);
18133   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
18134   ins_encode %{
18135     __ subv(as_FloatRegister($dst$$reg), __ T8B,
18136             as_FloatRegister($src1$$reg),
18137             as_FloatRegister($src2$$reg));
18138   %}
18139   ins_pipe(vdop64);
18140 %}
18141 
18142 instruct vsub16B(vecX dst, vecX src1, vecX src2)
18143 %{
18144   predicate(n->as_Vector()->length() == 16);
18145   match(Set dst (SubVB src1 src2));
18146   ins_cost(INSN_COST);
18147   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
18148   ins_encode %{
18149     __ subv(as_FloatRegister($dst$$reg), __ T16B,
18150             as_FloatRegister($src1$$reg),
18151             as_FloatRegister($src2$$reg));
18152   %}
18153   ins_pipe(vdop128);
18154 %}
18155 
18156 instruct vsub4S(vecD dst, vecD src1, vecD src2)
18157 %{
18158   predicate(n->as_Vector()->length() == 2 ||
18159             n->as_Vector()->length() == 4);
18160   match(Set dst (SubVS src1 src2));
18161   ins_cost(INSN_COST);
18162   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
18163   ins_encode %{
18164     __ subv(as_FloatRegister($dst$$reg), __ T4H,
18165             as_FloatRegister($src1$$reg),
18166             as_FloatRegister($src2$$reg));
18167   %}
18168   ins_pipe(vdop64);
18169 %}
18170 
18171 instruct vsub8S(vecX dst, vecX src1, vecX src2)
18172 %{
18173   predicate(n->as_Vector()->length() == 8);
18174   match(Set dst (SubVS src1 src2));
18175   ins_cost(INSN_COST);
18176   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
18177   ins_encode %{
18178     __ subv(as_FloatRegister($dst$$reg), __ T8H,
18179             as_FloatRegister($src1$$reg),
18180             as_FloatRegister($src2$$reg));
18181   %}
18182   ins_pipe(vdop128);
18183 %}
18184 
18185 instruct vsub2I(vecD dst, vecD src1, vecD src2)
18186 %{
18187   predicate(n->as_Vector()->length() == 2);
18188   match(Set dst (SubVI src1 src2));
18189   ins_cost(INSN_COST);
18190   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
18191   ins_encode %{
18192     __ subv(as_FloatRegister($dst$$reg), __ T2S,
18193             as_FloatRegister($src1$$reg),
18194             as_FloatRegister($src2$$reg));
18195   %}
18196   ins_pipe(vdop64);
18197 %}
18198 
18199 instruct vsub4I(vecX dst, vecX src1, vecX src2)
18200 %{
18201   predicate(n->as_Vector()->length() == 4);
18202   match(Set dst (SubVI src1 src2));
18203   ins_cost(INSN_COST);
18204   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
18205   ins_encode %{
18206     __ subv(as_FloatRegister($dst$$reg), __ T4S,
18207             as_FloatRegister($src1$$reg),
18208             as_FloatRegister($src2$$reg));
18209   %}
18210   ins_pipe(vdop128);
18211 %}
18212 
18213 instruct vsub2L(vecX dst, vecX src1, vecX src2)
18214 %{
18215   predicate(n->as_Vector()->length() == 2);
18216   match(Set dst (SubVL src1 src2));
18217   ins_cost(INSN_COST);
18218   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
18219   ins_encode %{
18220     __ subv(as_FloatRegister($dst$$reg), __ T2D,
18221             as_FloatRegister($src1$$reg),
18222             as_FloatRegister($src2$$reg));
18223   %}
18224   ins_pipe(vdop128);
18225 %}
18226 
18227 instruct vsub2F(vecD dst, vecD src1, vecD src2)
18228 %{
18229   predicate(n->as_Vector()->length() == 2);
18230   match(Set dst (SubVF src1 src2));
18231   ins_cost(INSN_COST);
18232   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
18233   ins_encode %{
18234     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
18235             as_FloatRegister($src1$$reg),
18236             as_FloatRegister($src2$$reg));
18237   %}
18238   ins_pipe(vdop_fp64);
18239 %}
18240 
18241 instruct vsub4F(vecX dst, vecX src1, vecX src2)
18242 %{
18243   predicate(n->as_Vector()->length() == 4);
18244   match(Set dst (SubVF src1 src2));
18245   ins_cost(INSN_COST);
18246   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
18247   ins_encode %{
18248     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
18249             as_FloatRegister($src1$$reg),
18250             as_FloatRegister($src2$$reg));
18251   %}
18252   ins_pipe(vdop_fp128);
18253 %}
18254 
18255 instruct vsub2D(vecX dst, vecX src1, vecX src2)
18256 %{
18257   predicate(n->as_Vector()->length() == 2);
18258   match(Set dst (SubVD src1 src2));
18259   ins_cost(INSN_COST);
18260   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
18261   ins_encode %{
18262     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
18263             as_FloatRegister($src1$$reg),
18264             as_FloatRegister($src2$$reg));
18265   %}
18266   ins_pipe(vdop_fp128);
18267 %}
18268 
18269 // --------------------------------- MUL --------------------------------------
18270 
18271 instruct vmul8B(vecD dst, vecD src1, vecD src2)
18272 %{
18273   predicate(n->as_Vector()->length() == 4 ||
18274             n->as_Vector()->length() == 8);
18275   match(Set dst (MulVB src1 src2));
18276   ins_cost(INSN_COST);
18277   format %{ "mulv  $dst,$src1,$src2\t# vector (8B)" %}
18278   ins_encode %{
18279     __ mulv(as_FloatRegister($dst$$reg), __ T8B,
18280             as_FloatRegister($src1$$reg),
18281             as_FloatRegister($src2$$reg));
18282   %}
18283   ins_pipe(vmul64);
18284 %}
18285 
18286 instruct vmul16B(vecX dst, vecX src1, vecX src2)
18287 %{
18288   predicate(n->as_Vector()->length() == 16);
18289   match(Set dst (MulVB src1 src2));
18290   ins_cost(INSN_COST);
18291   format %{ "mulv  $dst,$src1,$src2\t# vector (16B)" %}
18292   ins_encode %{
18293     __ mulv(as_FloatRegister($dst$$reg), __ T16B,
18294             as_FloatRegister($src1$$reg),
18295             as_FloatRegister($src2$$reg));
18296   %}
18297   ins_pipe(vmul128);
18298 %}
18299 
18300 instruct vmul4S(vecD dst, vecD src1, vecD src2)
18301 %{
18302   predicate(n->as_Vector()->length() == 2 ||
18303             n->as_Vector()->length() == 4);
18304   match(Set dst (MulVS src1 src2));
18305   ins_cost(INSN_COST);
18306   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
18307   ins_encode %{
18308     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
18309             as_FloatRegister($src1$$reg),
18310             as_FloatRegister($src2$$reg));
18311   %}
18312   ins_pipe(vmul64);
18313 %}
18314 
18315 instruct vmul8S(vecX dst, vecX src1, vecX src2)
18316 %{
18317   predicate(n->as_Vector()->length() == 8);
18318   match(Set dst (MulVS src1 src2));
18319   ins_cost(INSN_COST);
18320   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
18321   ins_encode %{
18322     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
18323             as_FloatRegister($src1$$reg),
18324             as_FloatRegister($src2$$reg));
18325   %}
18326   ins_pipe(vmul128);
18327 %}
18328 
18329 instruct vmul2I(vecD dst, vecD src1, vecD src2)
18330 %{
18331   predicate(n->as_Vector()->length() == 2);
18332   match(Set dst (MulVI src1 src2));
18333   ins_cost(INSN_COST);
18334   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
18335   ins_encode %{
18336     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
18337             as_FloatRegister($src1$$reg),
18338             as_FloatRegister($src2$$reg));
18339   %}
18340   ins_pipe(vmul64);
18341 %}
18342 
18343 instruct vmul4I(vecX dst, vecX src1, vecX src2)
18344 %{
18345   predicate(n->as_Vector()->length() == 4);
18346   match(Set dst (MulVI src1 src2));
18347   ins_cost(INSN_COST);
18348   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
18349   ins_encode %{
18350     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
18351             as_FloatRegister($src1$$reg),
18352             as_FloatRegister($src2$$reg));
18353   %}
18354   ins_pipe(vmul128);
18355 %}
18356 
18357 instruct vmul2L(vecX dst, vecX src1, vecX src2, iRegLNoSp tmp1, iRegLNoSp tmp2)
18358 %{
18359   predicate(n->as_Vector()->length() == 2);
18360   match(Set dst (MulVL src1 src2));
18361   ins_cost(INSN_COST);
18362   effect(TEMP tmp1, TEMP tmp2);
18363   format %{ "umov   $tmp1, $src1, D, 0\n\t"
18364             "umov   $tmp2, $src2, D, 0\n\t"
18365             "mul    $tmp2, $tmp2, $tmp1\n\t"
18366             "mov    $dst,  T2D,   0, $tmp2\t# insert into vector(2L)\n\t"
18367             "umov   $tmp1, $src1, D, 1\n\t"
18368             "umov   $tmp2, $src2, D, 1\n\t"
18369             "mul    $tmp2, $tmp2, $tmp1\n\t"
18370             "mov    $dst,  T2D,   1, $tmp2\t# insert into vector(2L)\n\t"
18371   %}
18372   ins_encode %{
18373     __ umov($tmp1$$Register, as_FloatRegister($src1$$reg), __ D, 0);
18374     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ D, 0);
18375     __ mul(as_Register($tmp2$$reg), as_Register($tmp2$$reg), as_Register($tmp1$$reg));
18376     __ mov(as_FloatRegister($dst$$reg), __ T2D, 0, $tmp2$$Register);
18377     __ umov($tmp1$$Register, as_FloatRegister($src1$$reg), __ D, 1);
18378     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ D, 1);
18379     __ mul(as_Register($tmp2$$reg), as_Register($tmp2$$reg), as_Register($tmp1$$reg));
18380     __ mov(as_FloatRegister($dst$$reg), __ T2D, 1, $tmp2$$Register);
18381   %}
18382   ins_pipe(pipe_slow);
18383 %}
18384 
18385 instruct vmul2F(vecD dst, vecD src1, vecD src2)
18386 %{
18387   predicate(n->as_Vector()->length() == 2);
18388   match(Set dst (MulVF src1 src2));
18389   ins_cost(INSN_COST);
18390   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
18391   ins_encode %{
18392     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
18393             as_FloatRegister($src1$$reg),
18394             as_FloatRegister($src2$$reg));
18395   %}
18396   ins_pipe(vmuldiv_fp64);
18397 %}
18398 
18399 instruct vmul4F(vecX dst, vecX src1, vecX src2)
18400 %{
18401   predicate(n->as_Vector()->length() == 4);
18402   match(Set dst (MulVF src1 src2));
18403   ins_cost(INSN_COST);
18404   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
18405   ins_encode %{
18406     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
18407             as_FloatRegister($src1$$reg),
18408             as_FloatRegister($src2$$reg));
18409   %}
18410   ins_pipe(vmuldiv_fp128);
18411 %}
18412 
18413 instruct vmul2D(vecX dst, vecX src1, vecX src2)
18414 %{
18415   predicate(n->as_Vector()->length() == 2);
18416   match(Set dst (MulVD src1 src2));
18417   ins_cost(INSN_COST);
18418   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
18419   ins_encode %{
18420     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
18421             as_FloatRegister($src1$$reg),
18422             as_FloatRegister($src2$$reg));
18423   %}
18424   ins_pipe(vmuldiv_fp128);
18425 %}
18426 
18427 // --------------------------------- MLA --------------------------------------
18428 
18429 instruct vmla4S(vecD dst, vecD src1, vecD src2)
18430 %{
18431   predicate(n->as_Vector()->length() == 2 ||
18432             n->as_Vector()->length() == 4);
18433   match(Set dst (AddVS dst (MulVS src1 src2)));
18434   ins_cost(INSN_COST);
18435   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
18436   ins_encode %{
18437     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
18438             as_FloatRegister($src1$$reg),
18439             as_FloatRegister($src2$$reg));
18440   %}
18441   ins_pipe(vmla64);
18442 %}
18443 
18444 instruct vmla8S(vecX dst, vecX src1, vecX src2)
18445 %{
18446   predicate(n->as_Vector()->length() == 8);
18447   match(Set dst (AddVS dst (MulVS src1 src2)));
18448   ins_cost(INSN_COST);
18449   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
18450   ins_encode %{
18451     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
18452             as_FloatRegister($src1$$reg),
18453             as_FloatRegister($src2$$reg));
18454   %}
18455   ins_pipe(vmla128);
18456 %}
18457 
18458 instruct vmla2I(vecD dst, vecD src1, vecD src2)
18459 %{
18460   predicate(n->as_Vector()->length() == 2);
18461   match(Set dst (AddVI dst (MulVI src1 src2)));
18462   ins_cost(INSN_COST);
18463   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
18464   ins_encode %{
18465     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
18466             as_FloatRegister($src1$$reg),
18467             as_FloatRegister($src2$$reg));
18468   %}
18469   ins_pipe(vmla64);
18470 %}
18471 
18472 instruct vmla4I(vecX dst, vecX src1, vecX src2)
18473 %{
18474   predicate(n->as_Vector()->length() == 4);
18475   match(Set dst (AddVI dst (MulVI src1 src2)));
18476   ins_cost(INSN_COST);
18477   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
18478   ins_encode %{
18479     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
18480             as_FloatRegister($src1$$reg),
18481             as_FloatRegister($src2$$reg));
18482   %}
18483   ins_pipe(vmla128);
18484 %}
18485 
18486 // dst + src1 * src2
18487 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
18488   predicate(UseFMA && n->as_Vector()->length() == 2);
18489   match(Set dst (FmaVF  dst (Binary src1 src2)));
18490   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
18491   ins_cost(INSN_COST);
18492   ins_encode %{
18493     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
18494             as_FloatRegister($src1$$reg),
18495             as_FloatRegister($src2$$reg));
18496   %}
18497   ins_pipe(vmuldiv_fp64);
18498 %}
18499 
18500 // dst + src1 * src2
18501 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
18502   predicate(UseFMA && n->as_Vector()->length() == 4);
18503   match(Set dst (FmaVF  dst (Binary src1 src2)));
18504   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
18505   ins_cost(INSN_COST);
18506   ins_encode %{
18507     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
18508             as_FloatRegister($src1$$reg),
18509             as_FloatRegister($src2$$reg));
18510   %}
18511   ins_pipe(vmuldiv_fp128);
18512 %}
18513 
18514 // dst + src1 * src2
18515 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
18516   predicate(UseFMA && n->as_Vector()->length() == 2);
18517   match(Set dst (FmaVD  dst (Binary src1 src2)));
18518   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
18519   ins_cost(INSN_COST);
18520   ins_encode %{
18521     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
18522             as_FloatRegister($src1$$reg),
18523             as_FloatRegister($src2$$reg));
18524   %}
18525   ins_pipe(vmuldiv_fp128);
18526 %}
18527 
18528 // --------------------------------- MLS --------------------------------------
18529 
18530 instruct vmls4S(vecD dst, vecD src1, vecD src2)
18531 %{
18532   predicate(n->as_Vector()->length() == 2 ||
18533             n->as_Vector()->length() == 4);
18534   match(Set dst (SubVS dst (MulVS src1 src2)));
18535   ins_cost(INSN_COST);
18536   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
18537   ins_encode %{
18538     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
18539             as_FloatRegister($src1$$reg),
18540             as_FloatRegister($src2$$reg));
18541   %}
18542   ins_pipe(vmla64);
18543 %}
18544 
18545 instruct vmls8S(vecX dst, vecX src1, vecX src2)
18546 %{
18547   predicate(n->as_Vector()->length() == 8);
18548   match(Set dst (SubVS dst (MulVS src1 src2)));
18549   ins_cost(INSN_COST);
18550   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
18551   ins_encode %{
18552     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
18553             as_FloatRegister($src1$$reg),
18554             as_FloatRegister($src2$$reg));
18555   %}
18556   ins_pipe(vmla128);
18557 %}
18558 
18559 instruct vmls2I(vecD dst, vecD src1, vecD src2)
18560 %{
18561   predicate(n->as_Vector()->length() == 2);
18562   match(Set dst (SubVI dst (MulVI src1 src2)));
18563   ins_cost(INSN_COST);
18564   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
18565   ins_encode %{
18566     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
18567             as_FloatRegister($src1$$reg),
18568             as_FloatRegister($src2$$reg));
18569   %}
18570   ins_pipe(vmla64);
18571 %}
18572 
18573 instruct vmls4I(vecX dst, vecX src1, vecX src2)
18574 %{
18575   predicate(n->as_Vector()->length() == 4);
18576   match(Set dst (SubVI dst (MulVI src1 src2)));
18577   ins_cost(INSN_COST);
18578   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
18579   ins_encode %{
18580     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
18581             as_FloatRegister($src1$$reg),
18582             as_FloatRegister($src2$$reg));
18583   %}
18584   ins_pipe(vmla128);
18585 %}
18586 
18587 // dst - src1 * src2
18588 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
18589   predicate(UseFMA && n->as_Vector()->length() == 2);
18590   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
18591   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
18592   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
18593   ins_cost(INSN_COST);
18594   ins_encode %{
18595     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
18596             as_FloatRegister($src1$$reg),
18597             as_FloatRegister($src2$$reg));
18598   %}
18599   ins_pipe(vmuldiv_fp64);
18600 %}
18601 
18602 // dst - src1 * src2
18603 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
18604   predicate(UseFMA && n->as_Vector()->length() == 4);
18605   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
18606   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
18607   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
18608   ins_cost(INSN_COST);
18609   ins_encode %{
18610     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
18611             as_FloatRegister($src1$$reg),
18612             as_FloatRegister($src2$$reg));
18613   %}
18614   ins_pipe(vmuldiv_fp128);
18615 %}
18616 
18617 // dst - src1 * src2
18618 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
18619   predicate(UseFMA && n->as_Vector()->length() == 2);
18620   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
18621   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
18622   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
18623   ins_cost(INSN_COST);
18624   ins_encode %{
18625     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
18626             as_FloatRegister($src1$$reg),
18627             as_FloatRegister($src2$$reg));
18628   %}
18629   ins_pipe(vmuldiv_fp128);
18630 %}
18631 
18632 // --------------- Vector Multiply-Add Shorts into Integer --------------------
18633 
18634 instruct vmuladdS2I(vecX dst, vecX src1, vecX src2, vecX tmp) %{
18635   predicate(n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18636   match(Set dst (MulAddVS2VI src1 src2));
18637   ins_cost(INSN_COST);
18638   effect(TEMP_DEF dst, TEMP tmp);
18639   format %{ "smullv  $tmp, $src1, $src2\t# vector (4H)\n\t"
18640             "smullv  $dst, $src1, $src2\t# vector (8H)\n\t"
18641             "addpv   $dst, $tmp, $dst\t# vector (4S)\n\t" %}
18642   ins_encode %{
18643     __ smullv(as_FloatRegister($tmp$$reg), __ T4H,
18644               as_FloatRegister($src1$$reg),
18645               as_FloatRegister($src2$$reg));
18646     __ smullv(as_FloatRegister($dst$$reg), __ T8H,
18647               as_FloatRegister($src1$$reg),
18648               as_FloatRegister($src2$$reg));
18649     __ addpv(as_FloatRegister($dst$$reg), __ T4S,
18650              as_FloatRegister($tmp$$reg),
18651              as_FloatRegister($dst$$reg));
18652   %}
18653   ins_pipe(vmuldiv_fp128);
18654 %}
18655 
18656 // --------------------------------- DIV --------------------------------------
18657 
18658 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
18659 %{
18660   predicate(n->as_Vector()->length() == 2);
18661   match(Set dst (DivVF src1 src2));
18662   ins_cost(INSN_COST);
18663   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
18664   ins_encode %{
18665     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
18666             as_FloatRegister($src1$$reg),
18667             as_FloatRegister($src2$$reg));
18668   %}
18669   ins_pipe(vmuldiv_fp64);
18670 %}
18671 
18672 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
18673 %{
18674   predicate(n->as_Vector()->length() == 4);
18675   match(Set dst (DivVF src1 src2));
18676   ins_cost(INSN_COST);
18677   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
18678   ins_encode %{
18679     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
18680             as_FloatRegister($src1$$reg),
18681             as_FloatRegister($src2$$reg));
18682   %}
18683   ins_pipe(vmuldiv_fp128);
18684 %}
18685 
18686 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
18687 %{
18688   predicate(n->as_Vector()->length() == 2);
18689   match(Set dst (DivVD src1 src2));
18690   ins_cost(INSN_COST);
18691   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
18692   ins_encode %{
18693     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
18694             as_FloatRegister($src1$$reg),
18695             as_FloatRegister($src2$$reg));
18696   %}
18697   ins_pipe(vmuldiv_fp128);
18698 %}
18699 
18700 // --------------------------------- SQRT -------------------------------------
18701 
18702 instruct vsqrt2D(vecX dst, vecX src)
18703 %{
18704   predicate(n->as_Vector()->length() == 2);
18705   match(Set dst (SqrtVD src));
18706   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
18707   ins_encode %{
18708     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
18709              as_FloatRegister($src$$reg));
18710   %}
18711   ins_pipe(vsqrt_fp128);
18712 %}
18713 
18714 instruct vsqrt2F(vecD dst, vecD src)
18715 %{
18716   predicate(n->as_Vector()->length() == 2);
18717   match(Set dst (SqrtVF src));
18718   format %{ "fsqrt  $dst, $src\t# vector (2F)" %}
18719   ins_encode %{
18720     __ fsqrt(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
18721   %}
18722   ins_pipe(vunop_fp64);
18723 %}
18724 
18725 instruct vsqrt4F(vecX dst, vecX src)
18726 %{
18727   predicate(n->as_Vector()->length() == 4);
18728   match(Set dst (SqrtVF src));
18729   format %{ "fsqrt  $dst, $src\t# vector (4S)" %}
18730   ins_encode %{
18731     __ fsqrt(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
18732   %}
18733   ins_pipe(vsqrt_fp128);
18734 %}
18735 
18736 // --------------------------------- ABS --------------------------------------
18737 
18738 instruct vabs8B(vecD dst, vecD src)
18739 %{
18740   predicate(n->as_Vector()->length() == 8);
18741   match(Set dst (AbsVB src));
18742   ins_cost(INSN_COST);
18743   format %{ "abs  $dst,$src\t# vector (8B)" %}
18744   ins_encode %{
18745     __ absr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
18746   %}
18747   ins_pipe(vlogical64);
18748 %}
18749 
18750 instruct vabs16B(vecX dst, vecX src)
18751 %{
18752   predicate(n->as_Vector()->length() == 16);
18753   match(Set dst (AbsVB src));
18754   ins_cost(INSN_COST);
18755   format %{ "abs  $dst,$src\t# vector (16B)" %}
18756   ins_encode %{
18757     __ absr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
18758   %}
18759   ins_pipe(vlogical128);
18760 %}
18761 
18762 instruct vabs4S(vecD dst, vecD src)
18763 %{
18764   predicate(n->as_Vector()->length() == 4);
18765   match(Set dst (AbsVS src));
18766   ins_cost(INSN_COST);
18767   format %{ "abs  $dst,$src\t# vector (4H)" %}
18768   ins_encode %{
18769     __ absr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg));
18770   %}
18771   ins_pipe(vlogical64);
18772 %}
18773 
18774 instruct vabs8S(vecX dst, vecX src)
18775 %{
18776   predicate(n->as_Vector()->length() == 8);
18777   match(Set dst (AbsVS src));
18778   ins_cost(INSN_COST);
18779   format %{ "abs  $dst,$src\t# vector (8H)" %}
18780   ins_encode %{
18781     __ absr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg));
18782   %}
18783   ins_pipe(vlogical128);
18784 %}
18785 
18786 instruct vabs2I(vecD dst, vecD src)
18787 %{
18788   predicate(n->as_Vector()->length() == 2);
18789   match(Set dst (AbsVI src));
18790   ins_cost(INSN_COST);
18791   format %{ "abs  $dst,$src\t# vector (2S)" %}
18792   ins_encode %{
18793     __ absr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
18794   %}
18795   ins_pipe(vlogical64);
18796 %}
18797 
18798 instruct vabs4I(vecX dst, vecX src)
18799 %{
18800   predicate(n->as_Vector()->length() == 4);
18801   match(Set dst (AbsVI src));
18802   ins_cost(INSN_COST);
18803   format %{ "abs  $dst,$src\t# vector (4S)" %}
18804   ins_encode %{
18805     __ absr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
18806   %}
18807   ins_pipe(vlogical128);
18808 %}
18809 
18810 instruct vabs2L(vecX dst, vecX src)
18811 %{
18812   predicate(n->as_Vector()->length() == 2);
18813   match(Set dst (AbsVL src));
18814   ins_cost(INSN_COST);
18815   format %{ "abs  $dst,$src\t# vector (2D)" %}
18816   ins_encode %{
18817     __ absr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
18818   %}
18819   ins_pipe(vlogical128);
18820 %}
18821 
18822 instruct vabs2F(vecD dst, vecD src)
18823 %{
18824   predicate(n->as_Vector()->length() == 2);
18825   match(Set dst (AbsVF src));
18826   ins_cost(INSN_COST * 3);
18827   format %{ "fabs  $dst,$src\t# vector (2S)" %}
18828   ins_encode %{
18829     __ fabs(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
18830   %}
18831   ins_pipe(vunop_fp64);
18832 %}
18833 
18834 instruct vabs4F(vecX dst, vecX src)
18835 %{
18836   predicate(n->as_Vector()->length() == 4);
18837   match(Set dst (AbsVF src));
18838   ins_cost(INSN_COST * 3);
18839   format %{ "fabs  $dst,$src\t# vector (4S)" %}
18840   ins_encode %{
18841     __ fabs(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
18842   %}
18843   ins_pipe(vunop_fp128);
18844 %}
18845 
18846 instruct vabs2D(vecX dst, vecX src)
18847 %{
18848   predicate(n->as_Vector()->length() == 2);
18849   match(Set dst (AbsVD src));
18850   ins_cost(INSN_COST * 3);
18851   format %{ "fabs  $dst,$src\t# vector (2D)" %}
18852   ins_encode %{
18853     __ fabs(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
18854   %}
18855   ins_pipe(vunop_fp128);
18856 %}
18857 
18858 // --------------------------------- NEG --------------------------------------
18859 
18860 instruct vneg2F(vecD dst, vecD src)
18861 %{
18862   predicate(n->as_Vector()->length() == 2);
18863   match(Set dst (NegVF src));
18864   ins_cost(INSN_COST * 3);
18865   format %{ "fneg  $dst,$src\t# vector (2S)" %}
18866   ins_encode %{
18867     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
18868             as_FloatRegister($src$$reg));
18869   %}
18870   ins_pipe(vunop_fp64);
18871 %}
18872 
18873 instruct vneg4F(vecX dst, vecX src)
18874 %{
18875   predicate(n->as_Vector()->length() == 4);
18876   match(Set dst (NegVF src));
18877   ins_cost(INSN_COST * 3);
18878   format %{ "fneg  $dst,$src\t# vector (4S)" %}
18879   ins_encode %{
18880     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
18881             as_FloatRegister($src$$reg));
18882   %}
18883   ins_pipe(vunop_fp128);
18884 %}
18885 
18886 instruct vneg2D(vecX dst, vecX src)
18887 %{
18888   predicate(n->as_Vector()->length() == 2);
18889   match(Set dst (NegVD src));
18890   ins_cost(INSN_COST * 3);
18891   format %{ "fneg  $dst,$src\t# vector (2D)" %}
18892   ins_encode %{
18893     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
18894             as_FloatRegister($src$$reg));
18895   %}
18896   ins_pipe(vunop_fp128);
18897 %}
18898 
18899 // --------------------------------- NOT --------------------------------------
18900 
18901 instruct vnot8B(vecD dst, vecD src)
18902 %{
18903   predicate(n->as_Vector()->length_in_bytes() == 8);
18904   match(Set dst (NotV src));
18905   ins_cost(INSN_COST);
18906   format %{ "not  $dst,$src\t# vector (8B)" %}
18907   ins_encode %{
18908     __ notr(as_FloatRegister($dst$$reg), __ T8B,
18909             as_FloatRegister($src$$reg));
18910   %}
18911   ins_pipe(vlogical64);
18912 %}
18913 
18914 instruct vnot16B(vecX dst, vecX src)
18915 %{
18916   predicate(n->as_Vector()->length_in_bytes() == 16);
18917   match(Set dst (NotV src));
18918   ins_cost(INSN_COST);
18919   format %{ "not  $dst,$src\t# vector (16B)" %}
18920   ins_encode %{
18921     __ notr(as_FloatRegister($dst$$reg), __ T16B,
18922             as_FloatRegister($src$$reg));
18923   %}
18924   ins_pipe(vlogical128);
18925 %}
18926 
18927 // --------------------------------- AND --------------------------------------
18928 
18929 instruct vand8B(vecD dst, vecD src1, vecD src2)
18930 %{
18931   predicate(n->as_Vector()->length_in_bytes() == 4 ||
18932             n->as_Vector()->length_in_bytes() == 8);
18933   match(Set dst (AndV src1 src2));
18934   ins_cost(INSN_COST);
18935   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
18936   ins_encode %{
18937     __ andr(as_FloatRegister($dst$$reg), __ T8B,
18938             as_FloatRegister($src1$$reg),
18939             as_FloatRegister($src2$$reg));
18940   %}
18941   ins_pipe(vlogical64);
18942 %}
18943 
18944 instruct vand16B(vecX dst, vecX src1, vecX src2)
18945 %{
18946   predicate(n->as_Vector()->length_in_bytes() == 16);
18947   match(Set dst (AndV src1 src2));
18948   ins_cost(INSN_COST);
18949   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
18950   ins_encode %{
18951     __ andr(as_FloatRegister($dst$$reg), __ T16B,
18952             as_FloatRegister($src1$$reg),
18953             as_FloatRegister($src2$$reg));
18954   %}
18955   ins_pipe(vlogical128);
18956 %}
18957 
18958 // --------------------------------- OR ---------------------------------------
18959 
18960 instruct vor8B(vecD dst, vecD src1, vecD src2)
18961 %{
18962   predicate(n->as_Vector()->length_in_bytes() == 4 ||
18963             n->as_Vector()->length_in_bytes() == 8);
18964   match(Set dst (OrV src1 src2));
18965   ins_cost(INSN_COST);
18966   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
18967   ins_encode %{
18968     __ orr(as_FloatRegister($dst$$reg), __ T8B,
18969             as_FloatRegister($src1$$reg),
18970             as_FloatRegister($src2$$reg));
18971   %}
18972   ins_pipe(vlogical64);
18973 %}
18974 
18975 instruct vor16B(vecX dst, vecX src1, vecX src2)
18976 %{
18977   predicate(n->as_Vector()->length_in_bytes() == 16);
18978   match(Set dst (OrV src1 src2));
18979   ins_cost(INSN_COST);
18980   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
18981   ins_encode %{
18982     __ orr(as_FloatRegister($dst$$reg), __ T16B,
18983             as_FloatRegister($src1$$reg),
18984             as_FloatRegister($src2$$reg));
18985   %}
18986   ins_pipe(vlogical128);
18987 %}
18988 
18989 // --------------------------------- XOR --------------------------------------
18990 
18991 instruct vxor8B(vecD dst, vecD src1, vecD src2)
18992 %{
18993   predicate(n->as_Vector()->length_in_bytes() == 4 ||
18994             n->as_Vector()->length_in_bytes() == 8);
18995   match(Set dst (XorV src1 src2));
18996   ins_cost(INSN_COST);
18997   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
18998   ins_encode %{
18999     __ eor(as_FloatRegister($dst$$reg), __ T8B,
19000             as_FloatRegister($src1$$reg),
19001             as_FloatRegister($src2$$reg));
19002   %}
19003   ins_pipe(vlogical64);
19004 %}
19005 
19006 instruct vxor16B(vecX dst, vecX src1, vecX src2)
19007 %{
19008   predicate(n->as_Vector()->length_in_bytes() == 16);
19009   match(Set dst (XorV src1 src2));
19010   ins_cost(INSN_COST);
19011   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
19012   ins_encode %{
19013     __ eor(as_FloatRegister($dst$$reg), __ T16B,
19014             as_FloatRegister($src1$$reg),
19015             as_FloatRegister($src2$$reg));
19016   %}
19017   ins_pipe(vlogical128);
19018 %}
19019 
19020 instruct vround2D_reg(vecX dst, vecX src, immI rmode) %{
19021   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19022   match(Set dst (RoundDoubleModeV src rmode));
19023   format %{ "frint  $dst, $src, $rmode" %}
19024   ins_encode %{
19025     switch ($rmode$$constant) {
19026       case RoundDoubleModeNode::rmode_rint:
19027         __ frintn(as_FloatRegister($dst$$reg), __ T2D,
19028                   as_FloatRegister($src$$reg));
19029         break;
19030       case RoundDoubleModeNode::rmode_floor:
19031         __ frintm(as_FloatRegister($dst$$reg), __ T2D,
19032                   as_FloatRegister($src$$reg));
19033         break;
19034       case RoundDoubleModeNode::rmode_ceil:
19035         __ frintp(as_FloatRegister($dst$$reg), __ T2D,
19036                   as_FloatRegister($src$$reg));
19037         break;
19038     }
19039   %}
19040   ins_pipe(vdop_fp128);
19041 %}
19042 
19043 // ------------------------------ Max ---------------------------------------
19044 
19045 instruct vmax8B(vecD dst, vecD src1, vecD src2)
19046 %{
19047   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19048   match(Set dst (MaxV src1 src2));
19049   ins_cost(INSN_COST);
19050   format %{ "maxv  $dst,$src1,$src2\t# vector (8B)" %}
19051   ins_encode %{
19052     __ maxv(as_FloatRegister($dst$$reg), __ T8B,
19053             as_FloatRegister($src1$$reg),
19054             as_FloatRegister($src2$$reg));
19055   %}
19056   ins_pipe(vdop64);
19057 %}
19058 
19059 instruct vmax16B(vecX dst, vecX src1, vecX src2)
19060 %{
19061   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19062   match(Set dst (MaxV src1 src2));
19063   ins_cost(INSN_COST);
19064   format %{ "maxv  $dst,$src1,$src2\t# vector (16B)" %}
19065   ins_encode %{
19066     __ maxv(as_FloatRegister($dst$$reg), __ T16B,
19067             as_FloatRegister($src1$$reg),
19068             as_FloatRegister($src2$$reg));
19069   %}
19070   ins_pipe(vdop128);
19071 %}
19072 
19073 instruct vmax4S(vecD dst, vecD src1, vecD src2)
19074 %{
19075   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19076   match(Set dst (MaxV src1 src2));
19077   ins_cost(INSN_COST);
19078   format %{ "maxv  $dst,$src1,$src2\t# vector (4H)" %}
19079   ins_encode %{
19080     __ maxv(as_FloatRegister($dst$$reg), __ T4H,
19081             as_FloatRegister($src1$$reg),
19082             as_FloatRegister($src2$$reg));
19083   %}
19084   ins_pipe(vdop64);
19085 %}
19086 
19087 instruct vmax8S(vecX dst, vecX src1, vecX src2)
19088 %{
19089   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19090   match(Set dst (MaxV src1 src2));
19091   ins_cost(INSN_COST);
19092   format %{ "maxv  $dst,$src1,$src2\t# vector (8H)" %}
19093   ins_encode %{
19094     __ maxv(as_FloatRegister($dst$$reg), __ T8H,
19095             as_FloatRegister($src1$$reg),
19096             as_FloatRegister($src2$$reg));
19097   %}
19098   ins_pipe(vdop128);
19099 %}
19100 
19101 instruct vmax2I(vecD dst, vecD src1, vecD src2)
19102 %{
19103   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
19104   match(Set dst (MaxV src1 src2));
19105   ins_cost(INSN_COST);
19106   format %{ "maxv  $dst,$src1,$src2\t# vector (2S)" %}
19107   ins_encode %{
19108     __ maxv(as_FloatRegister($dst$$reg), __ T2S,
19109             as_FloatRegister($src1$$reg),
19110             as_FloatRegister($src2$$reg));
19111   %}
19112   ins_pipe(vdop64);
19113 %}
19114 
19115 instruct vmax4I(vecX dst, vecX src1, vecX src2)
19116 %{
19117   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
19118   match(Set dst (MaxV src1 src2));
19119   ins_cost(INSN_COST);
19120   format %{ "maxv  $dst,$src1,$src2\t# vector (4S)" %}
19121   ins_encode %{
19122     __ maxv(as_FloatRegister($dst$$reg), __ T4S,
19123             as_FloatRegister($src1$$reg),
19124             as_FloatRegister($src2$$reg));
19125   %}
19126   ins_pipe(vdop128);
19127 %}
19128 
19129 instruct vmax2L(vecX dst, vecX src1, vecX src2)
19130 %{
19131   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19132   match(Set dst (MaxV src1 src2));
19133   ins_cost(INSN_COST);
19134   effect(TEMP dst);
19135   format %{ "cmgt  $dst,$src1,$src2\t# vector (2D)"
19136             "bsl  $dst,$src1,$src2\t# vector (16B)" %}
19137   ins_encode %{
19138     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
19139             as_FloatRegister($src1$$reg),
19140             as_FloatRegister($src2$$reg));
19141     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
19142            as_FloatRegister($src1$$reg),
19143            as_FloatRegister($src2$$reg));
19144   %}
19145   ins_pipe(vdop128);
19146 %}
19147 
19148 instruct vmax2F(vecD dst, vecD src1, vecD src2)
19149 %{
19150   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19151   match(Set dst (MaxV src1 src2));
19152   ins_cost(INSN_COST);
19153   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
19154   ins_encode %{
19155     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
19156             as_FloatRegister($src1$$reg),
19157             as_FloatRegister($src2$$reg));
19158   %}
19159   ins_pipe(vdop_fp64);
19160 %}
19161 
19162 instruct vmax4F(vecX dst, vecX src1, vecX src2)
19163 %{
19164   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19165   match(Set dst (MaxV src1 src2));
19166   ins_cost(INSN_COST);
19167   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
19168   ins_encode %{
19169     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
19170             as_FloatRegister($src1$$reg),
19171             as_FloatRegister($src2$$reg));
19172   %}
19173   ins_pipe(vdop_fp128);
19174 %}
19175 
19176 instruct vmax2D(vecX dst, vecX src1, vecX src2)
19177 %{
19178   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19179   match(Set dst (MaxV src1 src2));
19180   ins_cost(INSN_COST);
19181   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
19182   ins_encode %{
19183     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
19184             as_FloatRegister($src1$$reg),
19185             as_FloatRegister($src2$$reg));
19186   %}
19187   ins_pipe(vdop_fp128);
19188 %}
19189 
19190 // ------------------------------ Min ---------------------------------------
19191 
19192 instruct vmin8B(vecD dst, vecD src1, vecD src2)
19193 %{
19194   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19195   match(Set dst (MinV src1 src2));
19196   ins_cost(INSN_COST);
19197   format %{ "minv  $dst,$src1,$src2\t# vector (8B)" %}
19198   ins_encode %{
19199     __ minv(as_FloatRegister($dst$$reg), __ T8B,
19200             as_FloatRegister($src1$$reg),
19201             as_FloatRegister($src2$$reg));
19202   %}
19203   ins_pipe(vdop64);
19204 %}
19205 
19206 instruct vmin16B(vecX dst, vecX src1, vecX src2)
19207 %{
19208   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19209   match(Set dst (MinV src1 src2));
19210   ins_cost(INSN_COST);
19211   format %{ "minv  $dst,$src1,$src2\t# vector (16B)" %}
19212   ins_encode %{
19213     __ minv(as_FloatRegister($dst$$reg), __ T16B,
19214             as_FloatRegister($src1$$reg),
19215             as_FloatRegister($src2$$reg));
19216   %}
19217   ins_pipe(vdop128);
19218 %}
19219 
19220 instruct vmin4S(vecD dst, vecD src1, vecD src2)
19221 %{
19222   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19223   match(Set dst (MinV src1 src2));
19224   ins_cost(INSN_COST);
19225   format %{ "minv  $dst,$src1,$src2\t# vector (4H)" %}
19226   ins_encode %{
19227     __ minv(as_FloatRegister($dst$$reg), __ T4H,
19228             as_FloatRegister($src1$$reg),
19229             as_FloatRegister($src2$$reg));
19230   %}
19231   ins_pipe(vdop64);
19232 %}
19233 
19234 instruct vmin8S(vecX dst, vecX src1, vecX src2)
19235 %{
19236   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19237   match(Set dst (MinV src1 src2));
19238   ins_cost(INSN_COST);
19239   format %{ "minv  $dst,$src1,$src2\t# vector (8H)" %}
19240   ins_encode %{
19241     __ minv(as_FloatRegister($dst$$reg), __ T8H,
19242             as_FloatRegister($src1$$reg),
19243             as_FloatRegister($src2$$reg));
19244   %}
19245   ins_pipe(vdop128);
19246 %}
19247 
19248 instruct vmin2I(vecD dst, vecD src1, vecD src2)
19249 %{
19250   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
19251   match(Set dst (MinV src1 src2));
19252   ins_cost(INSN_COST);
19253   format %{ "minv  $dst,$src1,$src2\t# vector (2S)" %}
19254   ins_encode %{
19255     __ minv(as_FloatRegister($dst$$reg), __ T2S,
19256             as_FloatRegister($src1$$reg),
19257             as_FloatRegister($src2$$reg));
19258   %}
19259   ins_pipe(vdop64);
19260 %}
19261 
19262 instruct vmin4I(vecX dst, vecX src1, vecX src2)
19263 %{
19264   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
19265   match(Set dst (MinV src1 src2));
19266   ins_cost(INSN_COST);
19267   format %{ "minv  $dst,$src1,$src2\t# vector (4S)" %}
19268   ins_encode %{
19269     __ minv(as_FloatRegister($dst$$reg), __ T4S,
19270             as_FloatRegister($src1$$reg),
19271             as_FloatRegister($src2$$reg));
19272   %}
19273   ins_pipe(vdop128);
19274 %}
19275 
19276 instruct vmin2L(vecX dst, vecX src1, vecX src2)
19277 %{
19278   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19279   match(Set dst (MinV src1 src2));
19280   ins_cost(INSN_COST);
19281   effect(TEMP dst);
19282   format %{ "cmgt  $dst,$src1,$src2\t# vector (2D)"
19283             "bsl  $dst,$src2,$src1\t# vector (16B)" %}
19284   ins_encode %{
19285     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
19286             as_FloatRegister($src1$$reg),
19287             as_FloatRegister($src2$$reg));
19288     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
19289            as_FloatRegister($src2$$reg),
19290            as_FloatRegister($src1$$reg));
19291   %}
19292   ins_pipe(vdop128);
19293 %}
19294 
19295 instruct vmin2F(vecD dst, vecD src1, vecD src2)
19296 %{
19297   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19298   match(Set dst (MinV src1 src2));
19299   ins_cost(INSN_COST);
19300   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
19301   ins_encode %{
19302     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
19303             as_FloatRegister($src1$$reg),
19304             as_FloatRegister($src2$$reg));
19305   %}
19306   ins_pipe(vdop_fp64);
19307 %}
19308 
19309 instruct vmin4F(vecX dst, vecX src1, vecX src2)
19310 %{
19311   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19312   match(Set dst (MinV src1 src2));
19313   ins_cost(INSN_COST);
19314   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
19315   ins_encode %{
19316     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
19317             as_FloatRegister($src1$$reg),
19318             as_FloatRegister($src2$$reg));
19319   %}
19320   ins_pipe(vdop_fp128);
19321 %}
19322 
19323 instruct vmin2D(vecX dst, vecX src1, vecX src2)
19324 %{
19325   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19326   match(Set dst (MinV src1 src2));
19327   ins_cost(INSN_COST);
19328   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
19329   ins_encode %{
19330     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
19331             as_FloatRegister($src1$$reg),
19332             as_FloatRegister($src2$$reg));
19333   %}
19334   ins_pipe(vdop_fp128);
19335 %}
19336 
19337 // ------------------------------ Comparison ---------------------------------
19338 
19339 instruct vcmeq8B(vecD dst, vecD src1, vecD src2)
19340 %{
19341   predicate(n->as_Vector()->length() == 8 &&
19342             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19343             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19344   match(Set dst (VectorMaskCmp src1 src2));
19345   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (8B)" %}
19346   ins_cost(INSN_COST);
19347   ins_encode %{
19348     __ cmeq(as_FloatRegister($dst$$reg), __ T8B,
19349             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19350   %}
19351   ins_pipe(vdop64);
19352 %}
19353 
19354 instruct vcmeq16B(vecX dst, vecX src1, vecX src2)
19355 %{
19356   predicate(n->as_Vector()->length() == 16 &&
19357             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19358             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19359   match(Set dst (VectorMaskCmp src1 src2));
19360   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (16B)" %}
19361   ins_cost(INSN_COST);
19362   ins_encode %{
19363     __ cmeq(as_FloatRegister($dst$$reg), __ T16B,
19364             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19365   %}
19366   ins_pipe(vdop128);
19367 %}
19368 
19369 instruct vcmeq4S(vecD dst, vecD src1, vecD src2)
19370 %{
19371   predicate(n->as_Vector()->length() == 4 &&
19372             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19373             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19374   match(Set dst (VectorMaskCmp src1 src2));
19375   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (4S)" %}
19376   ins_cost(INSN_COST);
19377   ins_encode %{
19378     __ cmeq(as_FloatRegister($dst$$reg), __ T4H,
19379             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19380   %}
19381   ins_pipe(vdop64);
19382 %}
19383 
19384 instruct vcmeq8S(vecX dst, vecX src1, vecX src2)
19385 %{
19386   predicate(n->as_Vector()->length() == 8 &&
19387             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19388             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19389   match(Set dst (VectorMaskCmp src1 src2));
19390   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (8S)" %}
19391   ins_cost(INSN_COST);
19392   ins_encode %{
19393     __ cmeq(as_FloatRegister($dst$$reg), __ T8H,
19394             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19395   %}
19396   ins_pipe(vdop128);
19397 %}
19398 
19399 instruct vcmeq2I(vecD dst, vecD src1, vecD src2)
19400 %{
19401   predicate(n->as_Vector()->length() == 2 &&
19402             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19403             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19404   match(Set dst (VectorMaskCmp src1 src2));
19405   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (2I)" %}
19406   ins_cost(INSN_COST);
19407   ins_encode %{
19408     __ cmeq(as_FloatRegister($dst$$reg), __ T2S,
19409             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19410   %}
19411   ins_pipe(vdop64);
19412 %}
19413 
19414 instruct vcmeq4I(vecX dst, vecX src1, vecX src2)
19415 %{
19416   predicate(n->as_Vector()->length() == 4 &&
19417             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19418             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19419   match(Set dst (VectorMaskCmp src1 src2));
19420   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (4I)" %}
19421   ins_cost(INSN_COST);
19422   ins_encode %{
19423     __ cmeq(as_FloatRegister($dst$$reg), __ T4S,
19424             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19425   %}
19426   ins_pipe(vdop128);
19427 %}
19428 
19429 instruct vcmeq2L(vecX dst, vecX src1, vecX src2)
19430 %{
19431   predicate(n->as_Vector()->length() == 2 &&
19432             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19433             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19434   match(Set dst (VectorMaskCmp src1 src2));
19435   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (2L)" %}
19436   ins_cost(INSN_COST);
19437   ins_encode %{
19438     __ cmeq(as_FloatRegister($dst$$reg), __ T2D,
19439             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19440   %}
19441   ins_pipe(vdop128);
19442 %}
19443 
19444 instruct vcmeq2F(vecD dst, vecD src1, vecD src2)
19445 %{
19446   predicate(n->as_Vector()->length() == 2 &&
19447             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19448             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19449   match(Set dst (VectorMaskCmp src1 src2));
19450   format %{ "fcmeq  $dst,$src1,$src2\t# vector cmp (2F)" %}
19451   ins_cost(INSN_COST);
19452   ins_encode %{
19453     __ fcmeq(as_FloatRegister($dst$$reg), __ T2S,
19454              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19455   %}
19456   ins_pipe(vdop_fp64);
19457 %}
19458 
19459 instruct vcmeq4F(vecX dst, vecX src1, vecX src2)
19460 %{
19461   predicate(n->as_Vector()->length() == 4 &&
19462             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19463             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19464   match(Set dst (VectorMaskCmp src1 src2));
19465   format %{ "fcmeq  $dst,$src1,$src2\t# vector cmp (4F)" %}
19466   ins_cost(INSN_COST);
19467   ins_encode %{
19468     __ fcmeq(as_FloatRegister($dst$$reg), __ T4S,
19469              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19470   %}
19471   ins_pipe(vdop_fp128);
19472 %}
19473 
19474 instruct vcmeq2D(vecX dst, vecX src1, vecX src2)
19475 %{
19476   predicate(n->as_Vector()->length() == 2 &&
19477             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19478             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19479   match(Set dst (VectorMaskCmp src1 src2));
19480   format %{ "fcmeq  $dst,$src1,$src2\t# vector cmp (2D)" %}
19481   ins_cost(INSN_COST);
19482   ins_encode %{
19483     __ fcmeq(as_FloatRegister($dst$$reg), __ T2D,
19484              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19485   %}
19486   ins_pipe(vdop_fp128);
19487 %}
19488 
19489 instruct vcmne8B(vecD dst, vecD src1, vecD src2)
19490 %{
19491   predicate(n->as_Vector()->length() == 8 &&
19492             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19493             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19494   match(Set dst (VectorMaskCmp src1 src2));
19495   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (8B)"
19496             "not   $dst,$dst\t" %}
19497   ins_cost(INSN_COST);
19498   ins_encode %{
19499     __ cmeq(as_FloatRegister($dst$$reg), __ T8B,
19500             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19501     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
19502   %}
19503   ins_pipe(vdop64);
19504 %}
19505 
19506 instruct vcmne16B(vecX dst, vecX src1, vecX src2)
19507 %{
19508   predicate(n->as_Vector()->length() == 16 &&
19509             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19510             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19511   match(Set dst (VectorMaskCmp src1 src2));
19512   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (16B)"
19513             "not   $dst,$dst\t" %}
19514   ins_cost(INSN_COST);
19515   ins_encode %{
19516     __ cmeq(as_FloatRegister($dst$$reg), __ T16B,
19517             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19518     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19519   %}
19520   ins_pipe(vdop128);
19521 %}
19522 
19523 instruct vcmne4S(vecD dst, vecD src1, vecD src2)
19524 %{
19525   predicate(n->as_Vector()->length() == 4 &&
19526             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19527             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19528   match(Set dst (VectorMaskCmp src1 src2));
19529   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (4S)"
19530             "not   $dst,$dst\t" %}
19531   ins_cost(INSN_COST);
19532   ins_encode %{
19533     __ cmeq(as_FloatRegister($dst$$reg), __ T4H,
19534             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19535     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19536   %}
19537   ins_pipe(vdop64);
19538 %}
19539 
19540 instruct vcmne8S(vecX dst, vecX src1, vecX src2)
19541 %{
19542   predicate(n->as_Vector()->length() == 8 &&
19543             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19544             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19545   match(Set dst (VectorMaskCmp src1 src2));
19546   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (8S)"
19547             "not   $dst,$dst\t" %}
19548   ins_cost(INSN_COST);
19549   ins_encode %{
19550     __ cmeq(as_FloatRegister($dst$$reg), __ T8H,
19551             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19552     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19553   %}
19554   ins_pipe(vdop128);
19555 %}
19556 
19557 instruct vcmne2I(vecD dst, vecD src1, vecD src2)
19558 %{
19559   predicate(n->as_Vector()->length() == 2 &&
19560             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19561             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19562   match(Set dst (VectorMaskCmp src1 src2));
19563   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (2I)"
19564             "not   $dst,$dst\t" %}
19565   ins_cost(INSN_COST);
19566   ins_encode %{
19567     __ cmeq(as_FloatRegister($dst$$reg), __ T2S,
19568             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19569     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
19570   %}
19571   ins_pipe(vdop64);
19572 %}
19573 
19574 instruct vcmne4I(vecX dst, vecX src1, vecX src2)
19575 %{
19576   predicate(n->as_Vector()->length() == 4 &&
19577             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19578             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19579   match(Set dst (VectorMaskCmp src1 src2));
19580   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (4I)"
19581             "not   $dst,$dst\t" %}
19582   ins_cost(INSN_COST);
19583   ins_encode %{
19584     __ cmeq(as_FloatRegister($dst$$reg), __ T4S,
19585             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19586     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19587   %}
19588   ins_pipe(vdop128);
19589 %}
19590 
19591 instruct vcmne2L(vecX dst, vecX src1, vecX src2)
19592 %{
19593   predicate(n->as_Vector()->length() == 2 &&
19594             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19595             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19596   match(Set dst (VectorMaskCmp src1 src2));
19597   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (2L)"
19598             "not   $dst,$dst\t" %}
19599   ins_cost(INSN_COST);
19600   ins_encode %{
19601     __ cmeq(as_FloatRegister($dst$$reg), __ T2D,
19602             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19603     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19604   %}
19605   ins_pipe(vdop128);
19606 %}
19607 
19608 instruct vcmne2F(vecD dst, vecD src1, vecD src2)
19609 %{
19610   predicate(n->as_Vector()->length() == 2 &&
19611             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19612             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19613   match(Set dst (VectorMaskCmp src1 src2));
19614   format %{ "fcmeq  $dst,$src1,$src2\n\t# vector cmp (2F)"
19615             "not    $dst,$dst\t" %}
19616   ins_cost(INSN_COST);
19617   ins_encode %{
19618     __ fcmeq(as_FloatRegister($dst$$reg), __ T2S,
19619              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19620     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
19621   %}
19622   ins_pipe(vdop_fp64);
19623 %}
19624 
19625 instruct vcmne4F(vecX dst, vecX src1, vecX src2)
19626 %{
19627   predicate(n->as_Vector()->length() == 4 &&
19628             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19629             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19630   match(Set dst (VectorMaskCmp src1 src2));
19631   format %{ "fcmeq  $dst,$src1,$src2\n\t# vector cmp (4F)"
19632             "not    $dst,$dst\t" %}
19633   ins_cost(INSN_COST);
19634   ins_encode %{
19635     __ fcmeq(as_FloatRegister($dst$$reg), __ T4S,
19636              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19637     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19638   %}
19639   ins_pipe(vdop_fp128);
19640 %}
19641 
19642 instruct vcmne2D(vecX dst, vecX src1, vecX src2)
19643 %{
19644   predicate(n->as_Vector()->length() == 2 &&
19645             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
19646             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19647   match(Set dst (VectorMaskCmp src1 src2));
19648   format %{ "fcmeq  $dst,$src1,$src2\n\t# vector cmp (2D)"
19649             "not    $dst,$dst\t" %}
19650   ins_cost(INSN_COST);
19651   ins_encode %{
19652     __ fcmeq(as_FloatRegister($dst$$reg), __ T2D,
19653              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19654     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
19655   %}
19656   ins_pipe(vdop_fp128);
19657 %}
19658 
19659 instruct vcmlt8B(vecD dst, vecD src1, vecD src2)
19660 %{
19661   predicate(n->as_Vector()->length() == 8 &&
19662             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19663             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19664   match(Set dst (VectorMaskCmp src1 src2));
19665   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (8B)" %}
19666   ins_cost(INSN_COST);
19667   ins_encode %{
19668     __ cmgt(as_FloatRegister($dst$$reg), __ T8B,
19669             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19670   %}
19671   ins_pipe(vdop64);
19672 %}
19673 
19674 instruct vcmlt16B(vecX dst, vecX src1, vecX src2)
19675 %{
19676   predicate(n->as_Vector()->length() == 16 &&
19677             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19678             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19679   match(Set dst (VectorMaskCmp src1 src2));
19680   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (16B)" %}
19681   ins_cost(INSN_COST);
19682   ins_encode %{
19683     __ cmgt(as_FloatRegister($dst$$reg), __ T16B,
19684             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19685   %}
19686   ins_pipe(vdop128);
19687 %}
19688 
19689 instruct vcmlt4S(vecD dst, vecD src1, vecD src2)
19690 %{
19691   predicate(n->as_Vector()->length() == 4 &&
19692             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19693             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19694   match(Set dst (VectorMaskCmp src1 src2));
19695   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (4S)" %}
19696   ins_cost(INSN_COST);
19697   ins_encode %{
19698     __ cmgt(as_FloatRegister($dst$$reg), __ T4H,
19699             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19700   %}
19701   ins_pipe(vdop64);
19702 %}
19703 
19704 instruct vcmlt8S(vecX dst, vecX src1, vecX src2)
19705 %{
19706   predicate(n->as_Vector()->length() == 8 &&
19707             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19708             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19709   match(Set dst (VectorMaskCmp src1 src2));
19710   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (8S)" %}
19711   ins_cost(INSN_COST);
19712   ins_encode %{
19713     __ cmgt(as_FloatRegister($dst$$reg), __ T8H,
19714             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19715   %}
19716   ins_pipe(vdop128);
19717 %}
19718 
19719 instruct vcmlt2I(vecD dst, vecD src1, vecD src2)
19720 %{
19721   predicate(n->as_Vector()->length() == 2 &&
19722             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19723             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19724   match(Set dst (VectorMaskCmp src1 src2));
19725   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (2I)" %}
19726   ins_cost(INSN_COST);
19727   ins_encode %{
19728     __ cmgt(as_FloatRegister($dst$$reg), __ T2S,
19729             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19730   %}
19731   ins_pipe(vdop64);
19732 %}
19733 
19734 instruct vcmlt4I(vecX dst, vecX src1, vecX src2)
19735 %{
19736   predicate(n->as_Vector()->length() == 4 &&
19737             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19738             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19739   match(Set dst (VectorMaskCmp src1 src2));
19740   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (4I)" %}
19741   ins_cost(INSN_COST);
19742   ins_encode %{
19743     __ cmgt(as_FloatRegister($dst$$reg), __ T4S,
19744             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19745   %}
19746   ins_pipe(vdop128);
19747 %}
19748 
19749 instruct vcmlt2L(vecX dst, vecX src1, vecX src2)
19750 %{
19751   predicate(n->as_Vector()->length() == 2 &&
19752             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19753             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19754   match(Set dst (VectorMaskCmp src1 src2));
19755   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (2L)" %}
19756   ins_cost(INSN_COST);
19757   ins_encode %{
19758     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
19759             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19760   %}
19761   ins_pipe(vdop128);
19762 %}
19763 
19764 instruct vcmlt2F(vecD dst, vecD src1, vecD src2)
19765 %{
19766   predicate(n->as_Vector()->length() == 2 &&
19767             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19768             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19769   match(Set dst (VectorMaskCmp src1 src2));
19770   format %{ "fcmgt  $dst,$src2,$src1\t# vector cmp (2F)" %}
19771   ins_cost(INSN_COST);
19772   ins_encode %{
19773     __ fcmgt(as_FloatRegister($dst$$reg), __ T2S,
19774              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19775   %}
19776   ins_pipe(vdop_fp64);
19777 %}
19778 
19779 instruct vcmlt4F(vecX dst, vecX src1, vecX src2)
19780 %{
19781   predicate(n->as_Vector()->length() == 4 &&
19782             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19783             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19784   match(Set dst (VectorMaskCmp src1 src2));
19785   format %{ "fcmgt  $dst,$src2,$src1\t# vector cmp (4F)" %}
19786   ins_cost(INSN_COST);
19787   ins_encode %{
19788     __ fcmgt(as_FloatRegister($dst$$reg), __ T4S,
19789              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19790   %}
19791   ins_pipe(vdop_fp128);
19792 %}
19793 
19794 instruct vcmlt2D(vecX dst, vecX src1, vecX src2)
19795 %{
19796   predicate(n->as_Vector()->length() == 2 &&
19797             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
19798             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19799   match(Set dst (VectorMaskCmp src1 src2));
19800   format %{ "fcmgt  $dst,$src2,$src1\t# vector cmp (2D)" %}
19801   ins_cost(INSN_COST);
19802   ins_encode %{
19803     __ fcmgt(as_FloatRegister($dst$$reg), __ T2D,
19804              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19805   %}
19806   ins_pipe(vdop_fp128);
19807 %}
19808 
19809 instruct vcmle8B(vecD dst, vecD src1, vecD src2)
19810 %{
19811   predicate(n->as_Vector()->length() == 8 &&
19812             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19813             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19814   match(Set dst (VectorMaskCmp src1 src2));
19815   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (8B)" %}
19816   ins_cost(INSN_COST);
19817   ins_encode %{
19818     __ cmge(as_FloatRegister($dst$$reg), __ T8B,
19819             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19820   %}
19821   ins_pipe(vdop64);
19822 %}
19823 
19824 instruct vcmle16B(vecX dst, vecX src1, vecX src2)
19825 %{
19826   predicate(n->as_Vector()->length() == 16 &&
19827             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19828             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19829   match(Set dst (VectorMaskCmp src1 src2));
19830   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (16B)" %}
19831   ins_cost(INSN_COST);
19832   ins_encode %{
19833     __ cmge(as_FloatRegister($dst$$reg), __ T16B,
19834             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19835   %}
19836   ins_pipe(vdop128);
19837 %}
19838 
19839 instruct vcmle4S(vecD dst, vecD src1, vecD src2)
19840 %{
19841   predicate(n->as_Vector()->length() == 4 &&
19842             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19843             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19844   match(Set dst (VectorMaskCmp src1 src2));
19845   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (4S)" %}
19846   ins_cost(INSN_COST);
19847   ins_encode %{
19848     __ cmge(as_FloatRegister($dst$$reg), __ T4H,
19849             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19850   %}
19851   ins_pipe(vdop64);
19852 %}
19853 
19854 instruct vcmle8S(vecX dst, vecX src1, vecX src2)
19855 %{
19856   predicate(n->as_Vector()->length() == 8 &&
19857             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19858             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19859   match(Set dst (VectorMaskCmp src1 src2));
19860   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (8S)" %}
19861   ins_cost(INSN_COST);
19862   ins_encode %{
19863     __ cmge(as_FloatRegister($dst$$reg), __ T8H,
19864             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19865   %}
19866   ins_pipe(vdop128);
19867 %}
19868 
19869 instruct vcmle2I(vecD dst, vecD src1, vecD src2)
19870 %{
19871   predicate(n->as_Vector()->length() == 2 &&
19872             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19873             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19874   match(Set dst (VectorMaskCmp src1 src2));
19875   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (2I)" %}
19876   ins_cost(INSN_COST);
19877   ins_encode %{
19878     __ cmge(as_FloatRegister($dst$$reg), __ T2S,
19879             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19880   %}
19881   ins_pipe(vdop64);
19882 %}
19883 
19884 instruct vcmle4I(vecX dst, vecX src1, vecX src2)
19885 %{
19886   predicate(n->as_Vector()->length() == 4 &&
19887             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19888             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19889   match(Set dst (VectorMaskCmp src1 src2));
19890   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (4I)" %}
19891   ins_cost(INSN_COST);
19892   ins_encode %{
19893     __ cmge(as_FloatRegister($dst$$reg), __ T4S,
19894             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19895   %}
19896   ins_pipe(vdop128);
19897 %}
19898 
19899 instruct vcmle2L(vecX dst, vecX src1, vecX src2)
19900 %{
19901   predicate(n->as_Vector()->length() == 2 &&
19902             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19903             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19904   match(Set dst (VectorMaskCmp src1 src2));
19905   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (2L)" %}
19906   ins_cost(INSN_COST);
19907   ins_encode %{
19908     __ cmge(as_FloatRegister($dst$$reg), __ T2D,
19909             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19910   %}
19911   ins_pipe(vdop128);
19912 %}
19913 
19914 instruct vcmle2F(vecD dst, vecD src1, vecD src2)
19915 %{
19916   predicate(n->as_Vector()->length() == 2 &&
19917             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19918             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19919   match(Set dst (VectorMaskCmp src1 src2));
19920   format %{ "fcmge  $dst,$src2,$src1\t# vector cmp (2F)" %}
19921   ins_cost(INSN_COST);
19922   ins_encode %{
19923     __ fcmge(as_FloatRegister($dst$$reg), __ T2S,
19924              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19925   %}
19926   ins_pipe(vdop_fp64);
19927 %}
19928 
19929 instruct vcmle4F(vecX dst, vecX src1, vecX src2)
19930 %{
19931   predicate(n->as_Vector()->length() == 4 &&
19932             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19933             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19934   match(Set dst (VectorMaskCmp src1 src2));
19935   format %{ "fcmge  $dst,$src2,$src1\t# vector cmp (4F)" %}
19936   ins_cost(INSN_COST);
19937   ins_encode %{
19938     __ fcmge(as_FloatRegister($dst$$reg), __ T4S,
19939              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19940   %}
19941   ins_pipe(vdop_fp128);
19942 %}
19943 
19944 instruct vcmle2D(vecX dst, vecX src1, vecX src2)
19945 %{
19946   predicate(n->as_Vector()->length() == 2 &&
19947             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
19948             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19949   match(Set dst (VectorMaskCmp src1 src2));
19950   format %{ "fcmge  $dst,$src2,$src1\t# vector cmp (2D)" %}
19951   ins_cost(INSN_COST);
19952   ins_encode %{
19953     __ fcmge(as_FloatRegister($dst$$reg), __ T2D,
19954              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19955   %}
19956   ins_pipe(vdop_fp128);
19957 %}
19958 
19959 instruct vcmgt8B(vecD dst, vecD src1, vecD src2)
19960 %{
19961   predicate(n->as_Vector()->length() == 8 &&
19962             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
19963             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19964   match(Set dst (VectorMaskCmp src1 src2));
19965   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (8B)" %}
19966   ins_cost(INSN_COST);
19967   ins_encode %{
19968     __ cmgt(as_FloatRegister($dst$$reg), __ T8B,
19969             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19970   %}
19971   ins_pipe(vdop64);
19972 %}
19973 
19974 instruct vcmgt16B(vecX dst, vecX src1, vecX src2)
19975 %{
19976   predicate(n->as_Vector()->length() == 16 &&
19977             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
19978             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19979   match(Set dst (VectorMaskCmp src1 src2));
19980   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (16B)" %}
19981   ins_cost(INSN_COST);
19982   ins_encode %{
19983     __ cmgt(as_FloatRegister($dst$$reg), __ T16B,
19984             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19985   %}
19986   ins_pipe(vdop128);
19987 %}
19988 
19989 instruct vcmgt4S(vecD dst, vecD src1, vecD src2)
19990 %{
19991   predicate(n->as_Vector()->length() == 4 &&
19992             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
19993             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19994   match(Set dst (VectorMaskCmp src1 src2));
19995   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (4S)" %}
19996   ins_cost(INSN_COST);
19997   ins_encode %{
19998     __ cmgt(as_FloatRegister($dst$$reg), __ T4H,
19999             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20000   %}
20001   ins_pipe(vdop64);
20002 %}
20003 
20004 instruct vcmgt8S(vecX dst, vecX src1, vecX src2)
20005 %{
20006   predicate(n->as_Vector()->length() == 8 &&
20007             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20008             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20009   match(Set dst (VectorMaskCmp src1 src2));
20010   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (8S)" %}
20011   ins_cost(INSN_COST);
20012   ins_encode %{
20013     __ cmgt(as_FloatRegister($dst$$reg), __ T8H,
20014             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20015   %}
20016   ins_pipe(vdop128);
20017 %}
20018 
20019 instruct vcmgt2I(vecD dst, vecD src1, vecD src2)
20020 %{
20021   predicate(n->as_Vector()->length() == 2 &&
20022             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20023             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20024   match(Set dst (VectorMaskCmp src1 src2));
20025   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (2I)" %}
20026   ins_cost(INSN_COST);
20027   ins_encode %{
20028     __ cmgt(as_FloatRegister($dst$$reg), __ T2S,
20029             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20030   %}
20031   ins_pipe(vdop64);
20032 %}
20033 
20034 instruct vcmgt4I(vecX dst, vecX src1, vecX src2)
20035 %{
20036   predicate(n->as_Vector()->length() == 4 &&
20037             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20038             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20039   match(Set dst (VectorMaskCmp src1 src2));
20040   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (4I)" %}
20041   ins_cost(INSN_COST);
20042   ins_encode %{
20043     __ cmgt(as_FloatRegister($dst$$reg), __ T4S,
20044             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20045   %}
20046   ins_pipe(vdop128);
20047 %}
20048 
20049 instruct vcmgt2L(vecX dst, vecX src1, vecX src2)
20050 %{
20051   predicate(n->as_Vector()->length() == 2 &&
20052             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20053             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
20054   match(Set dst (VectorMaskCmp src1 src2));
20055   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (2L)" %}
20056   ins_cost(INSN_COST);
20057   ins_encode %{
20058     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
20059             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20060   %}
20061   ins_pipe(vdop128);
20062 %}
20063 
20064 instruct vcmgt2F(vecD dst, vecD src1, vecD src2)
20065 %{
20066   predicate(n->as_Vector()->length() == 2 &&
20067             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20068             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20069   match(Set dst (VectorMaskCmp src1 src2));
20070   format %{ "fcmgt  $dst,$src1,$src2\t# vector cmp (2F)" %}
20071   ins_cost(INSN_COST);
20072   ins_encode %{
20073     __ fcmgt(as_FloatRegister($dst$$reg), __ T2S,
20074              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20075   %}
20076   ins_pipe(vdop_fp64);
20077 %}
20078 
20079 instruct vcmgt4F(vecX dst, vecX src1, vecX src2)
20080 %{
20081   predicate(n->as_Vector()->length() == 4 &&
20082             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20083             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20084   match(Set dst (VectorMaskCmp src1 src2));
20085   format %{ "fcmgt  $dst,$src1,$src2\t# vector cmp (4F)" %}
20086   ins_cost(INSN_COST);
20087   ins_encode %{
20088     __ fcmgt(as_FloatRegister($dst$$reg), __ T4S,
20089              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20090   %}
20091   ins_pipe(vdop_fp128);
20092 %}
20093 
20094 instruct vcmgt2D(vecX dst, vecX src1, vecX src2)
20095 %{
20096   predicate(n->as_Vector()->length() == 2 &&
20097             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20098             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
20099   match(Set dst (VectorMaskCmp src1 src2));
20100   format %{ "fcmgt  $dst,$src1,$src2\t# vector cmp (2D)" %}
20101   ins_cost(INSN_COST);
20102   ins_encode %{
20103     __ fcmgt(as_FloatRegister($dst$$reg), __ T2D,
20104              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20105   %}
20106   ins_pipe(vdop_fp128);
20107 %}
20108 
20109 instruct vcmge8B(vecD dst, vecD src1, vecD src2)
20110 %{
20111   predicate(n->as_Vector()->length() == 8 &&
20112             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20113             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20114   match(Set dst (VectorMaskCmp src1 src2));
20115   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (8B)" %}
20116   ins_cost(INSN_COST);
20117   ins_encode %{
20118     __ cmge(as_FloatRegister($dst$$reg), __ T8B,
20119             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20120   %}
20121   ins_pipe(vdop64);
20122 %}
20123 
20124 instruct vcmge16B(vecX dst, vecX src1, vecX src2)
20125 %{
20126   predicate(n->as_Vector()->length() == 16 &&
20127             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20128             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20129   match(Set dst (VectorMaskCmp src1 src2));
20130   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (16B)" %}
20131   ins_cost(INSN_COST);
20132   ins_encode %{
20133     __ cmge(as_FloatRegister($dst$$reg), __ T16B,
20134             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20135   %}
20136   ins_pipe(vdop128);
20137 %}
20138 
20139 instruct vcmge4S(vecD dst, vecD src1, vecD src2)
20140 %{
20141   predicate(n->as_Vector()->length() == 4 &&
20142             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20143             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20144   match(Set dst (VectorMaskCmp src1 src2));
20145   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (4S)" %}
20146   ins_cost(INSN_COST);
20147   ins_encode %{
20148     __ cmge(as_FloatRegister($dst$$reg), __ T4H,
20149             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20150   %}
20151   ins_pipe(vdop64);
20152 %}
20153 
20154 instruct vcmge8S(vecX dst, vecX src1, vecX src2)
20155 %{
20156   predicate(n->as_Vector()->length() == 8 &&
20157             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20158             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20159   match(Set dst (VectorMaskCmp src1 src2));
20160   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (8S)" %}
20161   ins_cost(INSN_COST);
20162   ins_encode %{
20163     __ cmge(as_FloatRegister($dst$$reg), __ T8H,
20164             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20165   %}
20166   ins_pipe(vdop128);
20167 %}
20168 
20169 instruct vcmge2I(vecD dst, vecD src1, vecD src2)
20170 %{
20171   predicate(n->as_Vector()->length() == 2 &&
20172             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20173             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20174   match(Set dst (VectorMaskCmp src1 src2));
20175   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (2I)" %}
20176   ins_cost(INSN_COST);
20177   ins_encode %{
20178     __ cmge(as_FloatRegister($dst$$reg), __ T2S,
20179             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20180   %}
20181   ins_pipe(vdop64);
20182 %}
20183 
20184 instruct vcmge4I(vecX dst, vecX src1, vecX src2)
20185 %{
20186   predicate(n->as_Vector()->length() == 4 &&
20187             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20188             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20189   match(Set dst (VectorMaskCmp src1 src2));
20190   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (4I)" %}
20191   ins_cost(INSN_COST);
20192   ins_encode %{
20193     __ cmge(as_FloatRegister($dst$$reg), __ T4S,
20194             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20195   %}
20196   ins_pipe(vdop128);
20197 %}
20198 
20199 instruct vcmge2L(vecX dst, vecX src1, vecX src2)
20200 %{
20201   predicate(n->as_Vector()->length() == 2 &&
20202             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20203             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
20204   match(Set dst (VectorMaskCmp src1 src2));
20205   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (2L)" %}
20206   ins_cost(INSN_COST);
20207   ins_encode %{
20208     __ cmge(as_FloatRegister($dst$$reg), __ T2D,
20209             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20210   %}
20211   ins_pipe(vdop128);
20212 %}
20213 
20214 instruct vcmge2F(vecD dst, vecD src1, vecD src2)
20215 %{
20216   predicate(n->as_Vector()->length() == 2 &&
20217             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20218             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20219   match(Set dst (VectorMaskCmp src1 src2));
20220   format %{ "fcmge  $dst,$src1,$src2\t# vector cmp (2F)" %}
20221   ins_cost(INSN_COST);
20222   ins_encode %{
20223     __ fcmge(as_FloatRegister($dst$$reg), __ T2S,
20224              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20225   %}
20226   ins_pipe(vdop_fp64);
20227 %}
20228 
20229 instruct vcmge4F(vecX dst, vecX src1, vecX src2)
20230 %{
20231   predicate(n->as_Vector()->length() == 4 &&
20232             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20233             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20234   match(Set dst (VectorMaskCmp src1 src2));
20235   format %{ "fcmge  $dst,$src1,$src2\t# vector cmp (4F)" %}
20236   ins_cost(INSN_COST);
20237   ins_encode %{
20238     __ fcmge(as_FloatRegister($dst$$reg), __ T4S,
20239              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20240   %}
20241   ins_pipe(vdop_fp128);
20242 %}
20243 
20244 instruct vcmge2D(vecX dst, vecX src1, vecX src2)
20245 %{
20246   predicate(n->as_Vector()->length() == 2 &&
20247             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20248             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
20249   match(Set dst (VectorMaskCmp src1 src2));
20250   format %{ "fcmge  $dst,$src1,$src2\t# vector cmp (2D)" %}
20251   ins_cost(INSN_COST);
20252   ins_encode %{
20253     __ fcmge(as_FloatRegister($dst$$reg), __ T2D,
20254              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20255   %}
20256   ins_pipe(vdop_fp128);
20257 %}
20258 
20259 // --------------------------------- blend (bsl) ----------------------------
20260 
20261 instruct vbsl8B(vecD dst, vecD src1, vecD src2)
20262 %{
20263   predicate(n->as_Vector()->length_in_bytes() == 8);
20264   match(Set dst (VectorBlend (Binary src1 src2) dst));
20265   ins_cost(INSN_COST);
20266   format %{ "bsl  $dst,$src2,$src1\t# vector (8B)" %}
20267   ins_encode %{
20268     __ bsl(as_FloatRegister($dst$$reg), __ T8B,
20269            as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20270   %}
20271   ins_pipe(vlogical64);
20272 %}
20273 
20274 instruct vbsl16B(vecX dst, vecX src1, vecX src2)
20275 %{
20276   predicate(n->as_Vector()->length_in_bytes() == 16);
20277   match(Set dst (VectorBlend (Binary src1 src2) dst));
20278   ins_cost(INSN_COST);
20279   format %{ "bsl  $dst,$src2,$src1\t# vector (16B)" %}
20280   ins_encode %{
20281     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
20282            as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20283   %}
20284   ins_pipe(vlogical128);
20285 %}
20286 
20287 instruct loadmask8B(vecD dst, vecD src) %{
20288   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20289   match(Set dst (VectorLoadMask src));
20290   ins_cost(INSN_COST);
20291   format %{ "neg   $dst,$src\t# load mask (8B to 8B)" %}
20292   ins_encode %{
20293     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
20294   %}
20295   ins_pipe(vdop64);
20296 %}
20297 
20298 instruct loadmask16B(vecX dst, vecX src) %{
20299   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20300   match(Set dst (VectorLoadMask src));
20301   ins_cost(INSN_COST);
20302   format %{ "neg   $dst,$src\t# load mask (16B to 16B)" %}
20303   ins_encode %{
20304     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
20305   %}
20306   ins_pipe(vdop128);
20307 %}
20308 
20309 instruct loadmask4S(vecD dst, vecD src) %{
20310   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20311   match(Set dst (VectorLoadMask src));
20312   ins_cost(INSN_COST);
20313   format %{ "uxtl  $dst,$dst\n\t"
20314             "neg   $dst,$src\t# load mask (4B to 4S)" %}
20315   ins_encode %{
20316     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20317     __ negr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($dst$$reg));
20318   %}
20319   ins_pipe(vdop64);
20320 %}
20321 
20322 instruct loadmask8S(vecX dst, vecD src) %{
20323   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20324   match(Set dst (VectorLoadMask src));
20325   ins_cost(INSN_COST);
20326   format %{ "uxtl  $dst,$dst\n\t"
20327             "neg   $dst,$src\t# load mask (8B to 8S)" %}
20328   ins_encode %{
20329     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20330     __ negr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($dst$$reg));
20331   %}
20332   ins_pipe(vdop128);
20333 %}
20334 
20335 instruct loadmask2I(vecD dst, vecD src) %{
20336   predicate(n->as_Vector()->length() == 2 &&
20337             (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
20338              n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
20339   match(Set dst (VectorLoadMask src));
20340   ins_cost(INSN_COST);
20341   format %{ "uxtl  $dst,$src\n\t# 2B to 2S"
20342             "uxtl  $dst,$dst\n\t# 2S to 2I"
20343             "neg   $dst,$dst\t# load mask (2B to 2I)" %}
20344   ins_encode %{
20345     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20346     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
20347     __ negr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($dst$$reg));
20348   %}
20349   ins_pipe(vdop128);
20350 %}
20351 
20352 instruct loadmask4I(vecX dst, vecD src) %{
20353   predicate(n->as_Vector()->length() == 4 &&
20354             (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
20355              n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
20356   match(Set dst (VectorLoadMask src));
20357   ins_cost(INSN_COST);
20358   format %{ "uxtl  $dst,$src\n\t# 4B to 4S"
20359             "uxtl  $dst,$dst\n\t# 4S to 4I"
20360             "neg   $dst,$dst\t# load mask (4B to 4I)" %}
20361   ins_encode %{
20362     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20363     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
20364     __ negr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg));
20365   %}
20366   ins_pipe(vdop128);
20367 %}
20368 
20369 instruct loadmask2L(vecX dst, vecD src) %{
20370   predicate(n->as_Vector()->length() == 2 &&
20371             (n->bottom_type()->is_vect()->element_basic_type() == T_LONG ||
20372              n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE));
20373   match(Set dst (VectorLoadMask src));
20374   ins_cost(INSN_COST);
20375   format %{ "uxtl  $dst,$src\n\t# 2B to 2S"
20376             "uxtl  $dst,$dst\n\t# 2S to 2I"
20377             "uxtl  $dst,$dst\n\t# 2I to 2L"
20378             "neg   $dst,$dst\t# load mask (2B to 2L)" %}
20379   ins_encode %{
20380     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20381     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
20382     __ uxtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($dst$$reg), __ T2S);
20383     __ negr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($dst$$reg));
20384   %}
20385   ins_pipe(vdop128);
20386 %}
20387 
20388 instruct storemask8B(vecD dst, vecD src, immI_1 size) %{
20389   predicate(n->as_Vector()->length() == 8);
20390   match(Set dst (VectorStoreMask src size));
20391   ins_cost(INSN_COST);
20392   format %{ "negr  $dst,$src\t# store mask (8B to 8B)" %}
20393   ins_encode %{
20394     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
20395   %}
20396   ins_pipe(vdop64);
20397 %}
20398 
20399 instruct storemask16B(vecX dst, vecX src, immI_1 size) %{
20400   predicate(n->as_Vector()->length() == 16);
20401   match(Set dst (VectorStoreMask src size));
20402   ins_cost(INSN_COST);
20403   format %{ "negr  $dst,$src\t# store mask (16B to 16B)" %}
20404   ins_encode %{
20405     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
20406   %}
20407   ins_pipe(vdop128);
20408 %}
20409 
20410 instruct storemask4S(vecD dst, vecD src, immI_2 size) %{
20411   predicate(n->as_Vector()->length() == 4);
20412   match(Set dst (VectorStoreMask src size));
20413   ins_cost(INSN_COST);
20414   format %{ "xtn  $dst,$src\n\t"
20415             "neg  $dst,$dst\t# store mask (4S to 4B)" %}
20416   ins_encode %{
20417     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
20418     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20419   %}
20420   ins_pipe(vdop64);
20421 %}
20422 
20423 instruct storemask8S(vecD dst, vecX src, immI_2 size) %{
20424   predicate(n->as_Vector()->length() == 8);
20425   match(Set dst (VectorStoreMask src size));
20426   ins_cost(INSN_COST);
20427   format %{ "xtn  $dst,$src\n\t"
20428             "neg  $dst,$dst\t# store mask (8S to 8B)" %}
20429   ins_encode %{
20430     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
20431     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20432   %}
20433   ins_pipe(vdop128);
20434 %}
20435 
20436 instruct storemask2I(vecD dst, vecD src, immI_4 size) %{
20437   predicate(n->as_Vector()->length() == 2);
20438   match(Set dst (VectorStoreMask src size));
20439   ins_cost(INSN_COST);
20440   format %{ "xtn  $dst,$src\n\t# 2I to 2S"
20441             "xtn  $dst,$dst\n\t# 2S to 2B"
20442             "neg  $dst,$dst\t# store mask (2I to 2B)" %}
20443   ins_encode %{
20444     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
20445     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
20446     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20447   %}
20448   ins_pipe(vdop64);
20449 %}
20450 
20451 instruct storemask4I(vecD dst, vecX src, immI_4 size) %{
20452   predicate(n->as_Vector()->length() == 4);
20453   match(Set dst (VectorStoreMask src size));
20454   ins_cost(INSN_COST);
20455   format %{ "xtn  $dst,$src\n\t# 4I to 4S"
20456             "xtn  $dst,$dst\n\t# 4S to 4B"
20457             "neg  $dst,$dst\t# store mask (4I to 4B)" %}
20458   ins_encode %{
20459     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
20460     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
20461     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20462   %}
20463   ins_pipe(vdop128);
20464 %}
20465 
20466 instruct storemask2L(vecD dst, vecX src, immI_8 size) %{
20467   predicate(n->as_Vector()->length() == 2);
20468   match(Set dst (VectorStoreMask src size));
20469   ins_cost(INSN_COST);
20470   format %{ "xtn  $dst,$src\n\t# 2L to 2I"
20471             "xtn  $dst,$dst\n\t# 2I to 2S"
20472             "xtn  $dst,$dst\n\t# 2S to 2B"
20473             "neg  $dst,$dst\t# store mask (2L to 2B)" %}
20474   ins_encode %{
20475     __ xtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg), __ T2D);
20476     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($dst$$reg), __ T4S);
20477     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
20478     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20479   %}
20480   ins_pipe(vdop128);
20481 %}
20482 
20483 //-------------------------------- LOAD_IOTA_INDICES----------------------------------
20484 
20485 instruct loadcon8B(vecD dst, immI0 src) %{
20486   predicate((n->as_Vector()->length() == 2 || n->as_Vector()->length() == 4 ||
20487              n->as_Vector()->length() == 8) &&
20488              n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20489   match(Set dst (VectorLoadConst src));
20490   ins_cost(INSN_COST);
20491   format %{ "ldr $dst,CONSTANT_MEMORY\t# load iota indices" %}
20492   ins_encode %{
20493     __ lea(rscratch1, ExternalAddress(StubRoutines::aarch64::vector_iota_indices()));
20494     __ ldrd(as_FloatRegister($dst$$reg), rscratch1);
20495   %}
20496   ins_pipe(pipe_class_memory);
20497 %}
20498 
20499 instruct loadcon16B(vecX dst, immI0 src) %{
20500   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20501   match(Set dst (VectorLoadConst src));
20502   ins_cost(INSN_COST);
20503   format %{ "ldr $dst,CONSTANT_MEMORY\t# load iota indices" %}
20504   ins_encode %{
20505     __ lea(rscratch1, ExternalAddress(StubRoutines::aarch64::vector_iota_indices()));
20506     __ ldrq(as_FloatRegister($dst$$reg), rscratch1);
20507   %}
20508   ins_pipe(pipe_class_memory);
20509 %}
20510 
20511 //-------------------------------- LOAD_SHUFFLE ----------------------------------
20512 
20513 instruct loadshuffle8B(vecD dst, vecD src) %{
20514   predicate(n->as_Vector()->length() == 8 &&
20515             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20516   match(Set dst (VectorLoadShuffle src));
20517   ins_cost(INSN_COST);
20518   format %{ "mov  $dst, $src\t# get 8B shuffle" %}
20519   ins_encode %{
20520     __ orr(as_FloatRegister($dst$$reg), __ T8B,
20521            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
20522   %}
20523   ins_pipe(pipe_class_default);
20524 %}
20525 
20526 instruct loadshuffle16B(vecX dst, vecX src) %{
20527   predicate(n->as_Vector()->length() == 16 &&
20528             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20529   match(Set dst (VectorLoadShuffle src));
20530   ins_cost(INSN_COST);
20531   format %{ "mov  $dst, $src\t# get 16B shuffle" %}
20532   ins_encode %{
20533     __ orr(as_FloatRegister($dst$$reg), __ T16B,
20534            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
20535   %}
20536   ins_pipe(pipe_class_default);
20537 %}
20538 
20539 instruct loadshuffle4S(vecD dst, vecD src) %{
20540   predicate(n->as_Vector()->length() == 4 &&
20541             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20542   match(Set dst (VectorLoadShuffle src));
20543   ins_cost(INSN_COST);
20544   format %{ "uxtl  $dst, $src\n\t# 4B to 4H" %}
20545   ins_encode %{
20546     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20547   %}
20548   ins_pipe(pipe_class_default);
20549 %}
20550 
20551 instruct loadshuffle8S(vecX dst, vecD src) %{
20552   predicate(n->as_Vector()->length() == 8 &&
20553             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20554   match(Set dst (VectorLoadShuffle src));
20555   ins_cost(INSN_COST);
20556   format %{ "uxtl  $dst, $src\n\t# 8B to 8H" %}
20557   ins_encode %{
20558     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20559   %}
20560   ins_pipe(pipe_class_default);
20561 %}
20562 
20563 instruct loadshuffle2I(vecD dst, vecD src) %{
20564   predicate(n->as_Vector()->length() == 2 &&
20565            (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
20566             n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
20567   match(Set dst (VectorLoadShuffle src));
20568   ins_cost(INSN_COST);
20569   format %{ "uxtl  $dst, $src\t# 2B to 2H \n\t"
20570             "uxtl  $dst, $dst\t# 2H to 2S" %}
20571   ins_encode %{
20572     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20573     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
20574   %}
20575   ins_pipe(pipe_class_default);
20576 %}
20577 
20578 instruct loadshuffle4I(vecX dst, vecD src) %{
20579   predicate(n->as_Vector()->length() == 4 &&
20580            (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
20581             n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
20582   match(Set dst (VectorLoadShuffle src));
20583   ins_cost(INSN_COST);
20584   format %{ "uxtl  $dst, $src\t# 4B to 4H \n\t"
20585             "uxtl  $dst, $dst\t# 4H to 4S" %}
20586   ins_encode %{
20587     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20588     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
20589   %}
20590   ins_pipe(pipe_class_default);
20591 %}
20592 
20593 instruct loadshuffle2L(vecX dst, vecD src) %{
20594   predicate(n->as_Vector()->length() == 2 &&
20595            (n->bottom_type()->is_vect()->element_basic_type() == T_LONG ||
20596             n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE));
20597   match(Set dst (VectorLoadShuffle src));
20598   ins_cost(INSN_COST);
20599   format %{ "uxtl  $dst, $src\t# 2B to 2H \n\t"
20600             "uxtl  $dst, $dst\t# 2H to 2S \n\t"
20601             "uxtl  $dst, $dst\t# 2S to 4D" %}
20602   ins_encode %{
20603     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
20604     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
20605     __ uxtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($dst$$reg), __ T2S);
20606   %}
20607   ins_pipe(pipe_class_default);
20608 %}
20609 
20610 //-------------------------------- Rearrange -------------------------------------
20611 // Here is an example that rearranges a NEON vector with 4 ints:
20612 // Rearrange V1 int[a0, a1, a2, a3] to V2 int[a2, a3, a0, a1]
20613 //   1. Get the indices of V1 and store them as Vi byte[0, 1, 2, 3].
20614 //   2. Convert Vi byte[0, 1, 2, 3] to the indices of V2 and also store them as Vi byte[2, 3, 0, 1].
20615 //   3. Unsigned extend Long Vi from byte[2, 3, 0, 1] to int[2, 3, 0, 1].
20616 //   4. Multiply Vi int[2, 3, 0, 1] with constant int[0x04040404, 0x04040404, 0x04040404, 0x04040404]
20617 //      and get tbl base Vm int[0x08080808, 0x0c0c0c0c, 0x00000000, 0x04040404].
20618 //   5. Add Vm with constant int[0x03020100, 0x03020100, 0x03020100, 0x03020100]
20619 //      and get tbl index Vm int[0x0b0a0908, 0x0f0e0d0c, 0x03020100, 0x07060504]
20620 //   6. Use Vm as index register, and use V1 as table register.
20621 //      Then get V2 as the result by tbl NEON instructions.
20622 // Notes:
20623 //   Step 1 matches VectorLoadConst.
20624 //   Step 3 matches VectorLoadShuffle.
20625 //   Step 4, 5, 6 match VectorRearrange.
20626 //   For VectorRearrange short/int, the reason why such complex calculation is
20627 //   required is because NEON tbl supports bytes table only, so for short/int, we
20628 //   need to lookup 2/4 bytes as a group. For VectorRearrange long, we use bsl
20629 //   to implement rearrange.
20630 
20631 instruct rearrange8B(vecD dst, vecD src, vecD shuffle) %{
20632   predicate(n->as_Vector()->length() == 8 &&
20633             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20634   match(Set dst (VectorRearrange src shuffle));
20635   ins_cost(INSN_COST);
20636   effect(TEMP_DEF dst);
20637   format %{ "tbl $dst, {$dst}, $shuffle\t# rearrange 8B" %}
20638   ins_encode %{
20639     __ tbl(as_FloatRegister($dst$$reg), __ T8B,
20640            as_FloatRegister($src$$reg), 1, as_FloatRegister($shuffle$$reg));
20641   %}
20642   ins_pipe(pipe_class_default);
20643 %}
20644 
20645 instruct rearrange16B(vecX dst, vecX src, vecX shuffle) %{
20646   predicate(n->as_Vector()->length() == 16 &&
20647             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20648   match(Set dst (VectorRearrange src shuffle));
20649   ins_cost(INSN_COST);
20650   effect(TEMP_DEF dst);
20651   format %{ "tbl $dst, {$dst}, $shuffle\t# rearrange 16B" %}
20652   ins_encode %{
20653     __ tbl(as_FloatRegister($dst$$reg), __ T16B,
20654            as_FloatRegister($src$$reg), 1, as_FloatRegister($shuffle$$reg));
20655   %}
20656   ins_pipe(pipe_class_default);
20657 %}
20658 
20659 instruct rearrange4S(vecD dst, vecD src, vecD shuffle, vecD vtmp0, vecD vtmp1) %{
20660   predicate(n->as_Vector()->length() == 4 &&
20661             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20662   match(Set dst (VectorRearrange src shuffle));
20663   ins_cost(INSN_COST);
20664   effect(TEMP_DEF dst, TEMP vtmp0, TEMP vtmp1);
20665   format %{ "mov   $vtmp0, CONSTANT\t# constant 0x0202020202020202 \n\t"
20666             "mov   $vtmp1, CONSTANT\t# constant 0x0100010001000100 \n\t"
20667             "mulv  $dst, T4H, $shuffle, $vtmp0\n\t"
20668             "addv  $dst, T8B, $dst, $vtmp1\n\t"
20669             "tbl   $dst, {$src}, $dst\t# rearrange 4S" %}
20670   ins_encode %{
20671     __ mov(as_FloatRegister($vtmp0$$reg), __ T8B, 0x02);
20672     __ mov(as_FloatRegister($vtmp1$$reg), __ T4H, 0x0100);
20673     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
20674             as_FloatRegister($shuffle$$reg), as_FloatRegister($vtmp0$$reg));
20675     __ addv(as_FloatRegister($dst$$reg), __ T8B,
20676             as_FloatRegister($dst$$reg), as_FloatRegister($vtmp1$$reg));
20677     __ tbl(as_FloatRegister($dst$$reg), __ T8B,
20678            as_FloatRegister($src$$reg), 1, as_FloatRegister($dst$$reg));
20679   %}
20680   ins_pipe(pipe_class_default);
20681 %}
20682 
20683 instruct rearrange8S(vecX dst, vecX src, vecX shuffle, vecX vtmp0, vecX vtmp1) %{
20684   predicate(n->as_Vector()->length() == 8 &&
20685             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20686   match(Set dst (VectorRearrange src shuffle));
20687   ins_cost(INSN_COST);
20688   effect(TEMP_DEF dst, TEMP vtmp0, TEMP vtmp1);
20689   format %{ "mov   $vtmp0, CONSTANT\t# constant 0x0202020202020202 \n\t"
20690             "mov   $vtmp1, CONSTANT\t# constant 0x0100010001000100 \n\t"
20691             "mulv  $dst, T8H, $shuffle, $vtmp0\n\t"
20692             "addv  $dst, T16B, $dst, $vtmp1\n\t"
20693             "tbl   $dst, {$src}, $dst\t# rearrange 8S" %}
20694   ins_encode %{
20695     __ mov(as_FloatRegister($vtmp0$$reg), __ T16B, 0x02);
20696     __ mov(as_FloatRegister($vtmp1$$reg), __ T8H, 0x0100);
20697     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
20698             as_FloatRegister($shuffle$$reg), as_FloatRegister($vtmp0$$reg));
20699     __ addv(as_FloatRegister($dst$$reg), __ T16B,
20700             as_FloatRegister($dst$$reg), as_FloatRegister($vtmp1$$reg));
20701     __ tbl(as_FloatRegister($dst$$reg), __ T16B,
20702            as_FloatRegister($src$$reg), 1, as_FloatRegister($dst$$reg));
20703   %}
20704   ins_pipe(pipe_class_default);
20705 %}
20706 
20707 instruct rearrange2I(vecD dst, vecD src, vecD shuffle, vecD vtmp0, vecD vtmp1) %{
20708   predicate(n->as_Vector()->length() == 2 &&
20709            (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
20710             n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
20711   match(Set dst (VectorRearrange src shuffle));
20712   ins_cost(INSN_COST);
20713   effect(TEMP_DEF dst, TEMP vtmp0, TEMP vtmp1);
20714   format %{ "negr $dst, $shuffle\n\t"
20715             "dup  $vtmp0, T2S, $src, 0\n\t"
20716             "dup  $vtmp1, T2S, $src, 1\n\t"
20717             "bsl  $dst, $vtmp1, $vtmp0\t# rearrange 2L" %}
20718   ins_encode %{
20719     __ negr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($shuffle$$reg));
20720     __ dup(as_FloatRegister($vtmp0$$reg), __ T2S, as_FloatRegister($src$$reg), 0);
20721     __ dup(as_FloatRegister($vtmp1$$reg), __ T2S, as_FloatRegister($src$$reg), 1);
20722     __ bsl(as_FloatRegister($dst$$reg), __ T8B,
20723            as_FloatRegister($vtmp1$$reg), as_FloatRegister($vtmp0$$reg));
20724   %}
20725   ins_pipe(pipe_class_default);
20726 %}
20727 
20728 instruct rearrange4I(vecX dst, vecX src, vecX shuffle, vecX vtmp0, vecX vtmp1) %{
20729   predicate(n->as_Vector()->length() == 4 &&
20730            (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
20731             n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
20732   match(Set dst (VectorRearrange src shuffle));
20733   ins_cost(INSN_COST);
20734   effect(TEMP_DEF dst, TEMP vtmp0, TEMP vtmp1);
20735   format %{ "mov   $vtmp0, CONSTANT\t# constant 0x0404040404040404 \n\t"
20736             "mov   $vtmp1, CONSTANT\t# constant 0x0302010003020100 \n\t"
20737             "mulv  $dst, T8H, $shuffle, $vtmp0\n\t"
20738             "addv  $dst, T16B, $dst, $vtmp1\n\t"
20739             "tbl   $dst, {$src}, $dst\t# rearrange 4I" %}
20740   ins_encode %{
20741     __ mov(as_FloatRegister($vtmp0$$reg), __ T16B, 0x04);
20742     __ mov(as_FloatRegister($vtmp1$$reg), __ T4S, 0x03020100);
20743     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
20744             as_FloatRegister($shuffle$$reg), as_FloatRegister($vtmp0$$reg));
20745     __ addv(as_FloatRegister($dst$$reg), __ T16B,
20746             as_FloatRegister($dst$$reg), as_FloatRegister($vtmp1$$reg));
20747     __ tbl(as_FloatRegister($dst$$reg), __ T16B,
20748            as_FloatRegister($src$$reg), 1, as_FloatRegister($dst$$reg));
20749   %}
20750   ins_pipe(pipe_class_default);
20751 %}
20752 
20753 instruct rearrange2L(vecX dst, vecX src, vecX shuffle, vecX vtmp0, vecX vtmp1) %{
20754   predicate(n->as_Vector()->length() == 2 &&
20755            (n->bottom_type()->is_vect()->element_basic_type() == T_LONG ||
20756             n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE));
20757   match(Set dst (VectorRearrange src shuffle));
20758   ins_cost(INSN_COST);
20759   effect(TEMP_DEF dst, TEMP vtmp0, TEMP vtmp1);
20760   format %{ "negr $dst, $shuffle\n\t"
20761             "dup  $vtmp0, T2D, $src, 0\n\t"
20762             "dup  $vtmp1, T2D, $src, 1\n\t"
20763             "bsl  $dst, $vtmp1, $vtmp0\t# rearrange 2L" %}
20764   ins_encode %{
20765     __ negr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($shuffle$$reg));
20766     __ dup(as_FloatRegister($vtmp0$$reg), __ T2D, as_FloatRegister($src$$reg), 0);
20767     __ dup(as_FloatRegister($vtmp1$$reg), __ T2D, as_FloatRegister($src$$reg), 1);
20768     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
20769            as_FloatRegister($vtmp1$$reg), as_FloatRegister($vtmp0$$reg));
20770   %}
20771   ins_pipe(pipe_class_default);
20772 %}
20773 
20774 instruct anytrue_in_mask8B(iRegINoSp dst, vecD src1, vecD src2, vecD tmp, rFlagsReg cr) %{
20775   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::ne);
20776   match(Set dst (VectorTest src1 src2 ));
20777   ins_cost(INSN_COST);
20778   effect(TEMP tmp, KILL cr);
20779   format %{ "addv  $tmp, T8B, $src1\n\t # src1 and src2 are the same"
20780             "umov  $dst, $tmp, B, 0\n\t"
20781             "cmp   $dst, 0\n\t"
20782             "cset  $dst\t" %}
20783   ins_encode %{
20784     __ addv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src1$$reg));
20785     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
20786     __ cmpw($dst$$Register, zr);
20787     __ csetw($dst$$Register, Assembler::NE);
20788   %}
20789   ins_pipe(pipe_class_default);
20790 %}
20791 
20792 instruct anytrue_in_mask16B(iRegINoSp dst, vecX src1, vecX src2, vecX tmp, rFlagsReg cr) %{
20793   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::ne);
20794   match(Set dst (VectorTest src1 src2 ));
20795   ins_cost(INSN_COST);
20796   effect(TEMP tmp, KILL cr);
20797   format %{ "addv  $tmp, T16B, $src1\n\t # src1 and src2 are the same"
20798             "umov  $dst, $tmp, B, 0\n\t"
20799             "cmp   $dst, 0\n\t"
20800             "cset  $dst\t" %}
20801   ins_encode %{
20802     __ addv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src1$$reg));
20803     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
20804     __ cmpw($dst$$Register, zr);
20805     __ csetw($dst$$Register, Assembler::NE);
20806   %}
20807   ins_pipe(pipe_class_default);
20808 %}
20809 
20810 instruct alltrue_in_mask8B(iRegINoSp dst, vecD src1, vecD src2, vecD tmp, rFlagsReg cr) %{
20811   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::overflow);
20812   match(Set dst (VectorTest src1 src2 ));
20813   ins_cost(INSN_COST);
20814   effect(TEMP tmp, KILL cr);
20815   format %{ "andr  $tmp, T8B, $src1, $src2\n\t # src2 is maskAllTrue"
20816             "notr  $tmp, T8B, $tmp\n\t"
20817             "addv  $tmp, T8B, $tmp\n\t"
20818             "umov  $dst, $tmp, B, 0\n\t"
20819             "cmp   $dst, 0\n\t"
20820             "cset  $dst\t" %}
20821   ins_encode %{
20822     __ andr(as_FloatRegister($tmp$$reg), __ T8B,
20823             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20824     __ notr(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($tmp$$reg));
20825     __ addv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($tmp$$reg));
20826     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
20827     __ cmpw($dst$$Register, zr);
20828     __ csetw($dst$$Register, Assembler::EQ);
20829   %}
20830   ins_pipe(pipe_class_default);
20831 %}
20832 
20833 instruct alltrue_in_mask16B(iRegINoSp dst, vecX src1, vecX src2, vecX tmp, rFlagsReg cr) %{
20834   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::overflow);
20835   match(Set dst (VectorTest src1 src2 ));
20836   ins_cost(INSN_COST);
20837   effect(TEMP tmp, KILL cr);
20838   format %{ "andr  $tmp, T16B, $src1, $src2\n\t # src2 is maskAllTrue"
20839             "notr  $tmp, T16B, $tmp\n\t"
20840             "addv  $tmp, T16B, $tmp\n\t"
20841             "umov  $dst, $tmp, B, 0\n\t"
20842             "cmp   $dst, 0\n\t"
20843             "cset  $dst\t" %}
20844   ins_encode %{
20845     __ andr(as_FloatRegister($tmp$$reg), __ T16B,
20846             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20847     __ notr(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($tmp$$reg));
20848     __ addv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($tmp$$reg));
20849     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
20850     __ cmpw($dst$$Register, zr);
20851     __ csetw($dst$$Register, Assembler::EQ);
20852   %}
20853   ins_pipe(pipe_class_default);
20854 %}
20855 
20856 // ------------------------------ Shift ---------------------------------------
20857 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
20858   predicate(n->as_Vector()->length_in_bytes() == 4 ||
20859             n->as_Vector()->length_in_bytes() == 8);
20860   match(Set dst (LShiftCntV cnt));
20861   match(Set dst (RShiftCntV cnt));
20862   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
20863   ins_encode %{
20864     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
20865   %}
20866   ins_pipe(vdup_reg_reg64);
20867 %}
20868 
20869 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
20870   predicate(n->as_Vector()->length_in_bytes() == 16);
20871   match(Set dst (LShiftCntV cnt));
20872   match(Set dst (RShiftCntV cnt));
20873   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
20874   ins_encode %{
20875     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
20876   %}
20877   ins_pipe(vdup_reg_reg128);
20878 %}
20879 
20880 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
20881   predicate(n->as_Vector()->length() == 4 ||
20882             n->as_Vector()->length() == 8);
20883   match(Set dst (LShiftVB src shift));
20884   ins_cost(INSN_COST);
20885   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
20886   ins_encode %{
20887     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
20888             as_FloatRegister($src$$reg),
20889             as_FloatRegister($shift$$reg));
20890   %}
20891   ins_pipe(vshift64);
20892 %}
20893 
20894 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
20895   predicate(n->as_Vector()->length() == 16);
20896   match(Set dst (LShiftVB src shift));
20897   ins_cost(INSN_COST);
20898   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
20899   ins_encode %{
20900     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
20901             as_FloatRegister($src$$reg),
20902             as_FloatRegister($shift$$reg));
20903   %}
20904   ins_pipe(vshift128);
20905 %}
20906 
20907 // Right shifts with vector shift count on aarch64 SIMD are implemented
20908 // as left shift by negative shift count.
20909 // There are two cases for vector shift count.
20910 //
20911 // Case 1: The vector shift count is from replication.
20912 //        |            |
20913 //    LoadVector  RShiftCntV
20914 //        |       /
20915 //     RShiftVI
20916 // Note: In inner loop, multiple neg instructions are used, which can be
20917 // moved to outer loop and merge into one neg instruction.
20918 //
20919 // Case 2: The vector shift count is from loading.
20920 // This case isn't supported by middle-end now. But it's supported by
20921 // panama/vectorIntrinsics(JEP 338: Vector API).
20922 //        |            |
20923 //    LoadVector  LoadVector
20924 //        |       /
20925 //     RShiftVI
20926 //
20927 
20928 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
20929   predicate(n->as_Vector()->length() == 4 ||
20930             n->as_Vector()->length() == 8);
20931   match(Set dst (RShiftVB src shift));
20932   ins_cost(INSN_COST);
20933   effect(TEMP tmp);
20934   format %{ "negr  $tmp,$shift\t"
20935             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
20936   ins_encode %{
20937     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
20938             as_FloatRegister($shift$$reg));
20939     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
20940             as_FloatRegister($src$$reg),
20941             as_FloatRegister($tmp$$reg));
20942   %}
20943   ins_pipe(vshift64);
20944 %}
20945 
20946 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
20947   predicate(n->as_Vector()->length() == 16);
20948   match(Set dst (RShiftVB src shift));
20949   ins_cost(INSN_COST);
20950   effect(TEMP tmp);
20951   format %{ "negr  $tmp,$shift\t"
20952             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
20953   ins_encode %{
20954     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
20955             as_FloatRegister($shift$$reg));
20956     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
20957             as_FloatRegister($src$$reg),
20958             as_FloatRegister($tmp$$reg));
20959   %}
20960   ins_pipe(vshift128);
20961 %}
20962 
20963 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
20964   predicate(n->as_Vector()->length() == 4 ||
20965             n->as_Vector()->length() == 8);
20966   match(Set dst (URShiftVB src shift));
20967   ins_cost(INSN_COST);
20968   effect(TEMP tmp);
20969   format %{ "negr  $tmp,$shift\t"
20970             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
20971   ins_encode %{
20972     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
20973             as_FloatRegister($shift$$reg));
20974     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
20975             as_FloatRegister($src$$reg),
20976             as_FloatRegister($tmp$$reg));
20977   %}
20978   ins_pipe(vshift64);
20979 %}
20980 
20981 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
20982   predicate(n->as_Vector()->length() == 16);
20983   match(Set dst (URShiftVB src shift));
20984   ins_cost(INSN_COST);
20985   effect(TEMP tmp);
20986   format %{ "negr  $tmp,$shift\t"
20987             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
20988   ins_encode %{
20989     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
20990             as_FloatRegister($shift$$reg));
20991     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
20992             as_FloatRegister($src$$reg),
20993             as_FloatRegister($tmp$$reg));
20994   %}
20995   ins_pipe(vshift128);
20996 %}
20997 
20998 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
20999   predicate(n->as_Vector()->length() == 4 ||
21000             n->as_Vector()->length() == 8);
21001   match(Set dst (LShiftVB src (LShiftCntV shift)));
21002   ins_cost(INSN_COST);
21003   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
21004   ins_encode %{
21005     int sh = (int)$shift$$constant;
21006     if (sh >= 8) {
21007       __ eor(as_FloatRegister($dst$$reg), __ T8B,
21008              as_FloatRegister($src$$reg),
21009              as_FloatRegister($src$$reg));
21010     } else {
21011       __ shl(as_FloatRegister($dst$$reg), __ T8B,
21012              as_FloatRegister($src$$reg), sh);
21013     }
21014   %}
21015   ins_pipe(vshift64_imm);
21016 %}
21017 
21018 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
21019   predicate(n->as_Vector()->length() == 16);
21020   match(Set dst (LShiftVB src (LShiftCntV shift)));
21021   ins_cost(INSN_COST);
21022   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
21023   ins_encode %{
21024     int sh = (int)$shift$$constant;
21025     if (sh >= 8) {
21026       __ eor(as_FloatRegister($dst$$reg), __ T16B,
21027              as_FloatRegister($src$$reg),
21028              as_FloatRegister($src$$reg));
21029     } else {
21030       __ shl(as_FloatRegister($dst$$reg), __ T16B,
21031              as_FloatRegister($src$$reg), sh);
21032     }
21033   %}
21034   ins_pipe(vshift128_imm);
21035 %}
21036 
21037 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
21038   predicate(n->as_Vector()->length() == 4 ||
21039             n->as_Vector()->length() == 8);
21040   match(Set dst (RShiftVB src (RShiftCntV shift)));
21041   ins_cost(INSN_COST);
21042   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
21043   ins_encode %{
21044     int sh = (int)$shift$$constant;
21045     if (sh >= 8) sh = 7;
21046     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
21047            as_FloatRegister($src$$reg), sh);
21048   %}
21049   ins_pipe(vshift64_imm);
21050 %}
21051 
21052 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
21053   predicate(n->as_Vector()->length() == 16);
21054   match(Set dst (RShiftVB src (RShiftCntV shift)));
21055   ins_cost(INSN_COST);
21056   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
21057   ins_encode %{
21058     int sh = (int)$shift$$constant;
21059     if (sh >= 8) sh = 7;
21060     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
21061            as_FloatRegister($src$$reg), sh);
21062   %}
21063   ins_pipe(vshift128_imm);
21064 %}
21065 
21066 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
21067   predicate(n->as_Vector()->length() == 4 ||
21068             n->as_Vector()->length() == 8);
21069   match(Set dst (URShiftVB src (RShiftCntV shift)));
21070   ins_cost(INSN_COST);
21071   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
21072   ins_encode %{
21073     int sh = (int)$shift$$constant;
21074     if (sh >= 8) {
21075       __ eor(as_FloatRegister($dst$$reg), __ T8B,
21076              as_FloatRegister($src$$reg),
21077              as_FloatRegister($src$$reg));
21078     } else {
21079       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
21080              as_FloatRegister($src$$reg), sh);
21081     }
21082   %}
21083   ins_pipe(vshift64_imm);
21084 %}
21085 
21086 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
21087   predicate(n->as_Vector()->length() == 16);
21088   match(Set dst (URShiftVB src (RShiftCntV shift)));
21089   ins_cost(INSN_COST);
21090   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
21091   ins_encode %{
21092     int sh = (int)$shift$$constant;
21093     if (sh >= 8) {
21094       __ eor(as_FloatRegister($dst$$reg), __ T16B,
21095              as_FloatRegister($src$$reg),
21096              as_FloatRegister($src$$reg));
21097     } else {
21098       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
21099              as_FloatRegister($src$$reg), sh);
21100     }
21101   %}
21102   ins_pipe(vshift128_imm);
21103 %}
21104 
21105 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
21106   predicate(n->as_Vector()->length() == 2 ||
21107             n->as_Vector()->length() == 4);
21108   match(Set dst (LShiftVS src shift));
21109   ins_cost(INSN_COST);
21110   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
21111   ins_encode %{
21112     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
21113             as_FloatRegister($src$$reg),
21114             as_FloatRegister($shift$$reg));
21115   %}
21116   ins_pipe(vshift64);
21117 %}
21118 
21119 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
21120   predicate(n->as_Vector()->length() == 8);
21121   match(Set dst (LShiftVS src shift));
21122   ins_cost(INSN_COST);
21123   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
21124   ins_encode %{
21125     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
21126             as_FloatRegister($src$$reg),
21127             as_FloatRegister($shift$$reg));
21128   %}
21129   ins_pipe(vshift128);
21130 %}
21131 
21132 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
21133   predicate(n->as_Vector()->length() == 2 ||
21134             n->as_Vector()->length() == 4);
21135   match(Set dst (RShiftVS src shift));
21136   ins_cost(INSN_COST);
21137   effect(TEMP tmp);
21138   format %{ "negr  $tmp,$shift\t"
21139             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
21140   ins_encode %{
21141     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
21142             as_FloatRegister($shift$$reg));
21143     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
21144             as_FloatRegister($src$$reg),
21145             as_FloatRegister($tmp$$reg));
21146   %}
21147   ins_pipe(vshift64);
21148 %}
21149 
21150 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
21151   predicate(n->as_Vector()->length() == 8);
21152   match(Set dst (RShiftVS src shift));
21153   ins_cost(INSN_COST);
21154   effect(TEMP tmp);
21155   format %{ "negr  $tmp,$shift\t"
21156             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
21157   ins_encode %{
21158     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21159             as_FloatRegister($shift$$reg));
21160     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
21161             as_FloatRegister($src$$reg),
21162             as_FloatRegister($tmp$$reg));
21163   %}
21164   ins_pipe(vshift128);
21165 %}
21166 
21167 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
21168   predicate(n->as_Vector()->length() == 2 ||
21169             n->as_Vector()->length() == 4);
21170   match(Set dst (URShiftVS src shift));
21171   ins_cost(INSN_COST);
21172   effect(TEMP tmp);
21173   format %{ "negr  $tmp,$shift\t"
21174             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
21175   ins_encode %{
21176     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
21177             as_FloatRegister($shift$$reg));
21178     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
21179             as_FloatRegister($src$$reg),
21180             as_FloatRegister($tmp$$reg));
21181   %}
21182   ins_pipe(vshift64);
21183 %}
21184 
21185 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
21186   predicate(n->as_Vector()->length() == 8);
21187   match(Set dst (URShiftVS src shift));
21188   ins_cost(INSN_COST);
21189   effect(TEMP tmp);
21190   format %{ "negr  $tmp,$shift\t"
21191             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
21192   ins_encode %{
21193     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21194             as_FloatRegister($shift$$reg));
21195     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
21196             as_FloatRegister($src$$reg),
21197             as_FloatRegister($tmp$$reg));
21198   %}
21199   ins_pipe(vshift128);
21200 %}
21201 
21202 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
21203   predicate(n->as_Vector()->length() == 2 ||
21204             n->as_Vector()->length() == 4);
21205   match(Set dst (LShiftVS src (LShiftCntV shift)));
21206   ins_cost(INSN_COST);
21207   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
21208   ins_encode %{
21209     int sh = (int)$shift$$constant;
21210     if (sh >= 16) {
21211       __ eor(as_FloatRegister($dst$$reg), __ T8B,
21212              as_FloatRegister($src$$reg),
21213              as_FloatRegister($src$$reg));
21214     } else {
21215       __ shl(as_FloatRegister($dst$$reg), __ T4H,
21216              as_FloatRegister($src$$reg), sh);
21217     }
21218   %}
21219   ins_pipe(vshift64_imm);
21220 %}
21221 
21222 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
21223   predicate(n->as_Vector()->length() == 8);
21224   match(Set dst (LShiftVS src (LShiftCntV shift)));
21225   ins_cost(INSN_COST);
21226   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
21227   ins_encode %{
21228     int sh = (int)$shift$$constant;
21229     if (sh >= 16) {
21230       __ eor(as_FloatRegister($dst$$reg), __ T16B,
21231              as_FloatRegister($src$$reg),
21232              as_FloatRegister($src$$reg));
21233     } else {
21234       __ shl(as_FloatRegister($dst$$reg), __ T8H,
21235              as_FloatRegister($src$$reg), sh);
21236     }
21237   %}
21238   ins_pipe(vshift128_imm);
21239 %}
21240 
21241 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
21242   predicate(n->as_Vector()->length() == 2 ||
21243             n->as_Vector()->length() == 4);
21244   match(Set dst (RShiftVS src (RShiftCntV shift)));
21245   ins_cost(INSN_COST);
21246   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
21247   ins_encode %{
21248     int sh = (int)$shift$$constant;
21249     if (sh >= 16) sh = 15;
21250     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
21251            as_FloatRegister($src$$reg), sh);
21252   %}
21253   ins_pipe(vshift64_imm);
21254 %}
21255 
21256 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
21257   predicate(n->as_Vector()->length() == 8);
21258   match(Set dst (RShiftVS src (RShiftCntV shift)));
21259   ins_cost(INSN_COST);
21260   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
21261   ins_encode %{
21262     int sh = (int)$shift$$constant;
21263     if (sh >= 16) sh = 15;
21264     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
21265            as_FloatRegister($src$$reg), sh);
21266   %}
21267   ins_pipe(vshift128_imm);
21268 %}
21269 
21270 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
21271   predicate(n->as_Vector()->length() == 2 ||
21272             n->as_Vector()->length() == 4);
21273   match(Set dst (URShiftVS src (RShiftCntV shift)));
21274   ins_cost(INSN_COST);
21275   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
21276   ins_encode %{
21277     int sh = (int)$shift$$constant;
21278     if (sh >= 16) {
21279       __ eor(as_FloatRegister($dst$$reg), __ T8B,
21280              as_FloatRegister($src$$reg),
21281              as_FloatRegister($src$$reg));
21282     } else {
21283       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
21284              as_FloatRegister($src$$reg), sh);
21285     }
21286   %}
21287   ins_pipe(vshift64_imm);
21288 %}
21289 
21290 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
21291   predicate(n->as_Vector()->length() == 8);
21292   match(Set dst (URShiftVS src (RShiftCntV shift)));
21293   ins_cost(INSN_COST);
21294   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
21295   ins_encode %{
21296     int sh = (int)$shift$$constant;
21297     if (sh >= 16) {
21298       __ eor(as_FloatRegister($dst$$reg), __ T16B,
21299              as_FloatRegister($src$$reg),
21300              as_FloatRegister($src$$reg));
21301     } else {
21302       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
21303              as_FloatRegister($src$$reg), sh);
21304     }
21305   %}
21306   ins_pipe(vshift128_imm);
21307 %}
21308 
21309 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
21310   predicate(n->as_Vector()->length() == 2);
21311   match(Set dst (LShiftVI src shift));
21312   ins_cost(INSN_COST);
21313   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
21314   ins_encode %{
21315     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
21316             as_FloatRegister($src$$reg),
21317             as_FloatRegister($shift$$reg));
21318   %}
21319   ins_pipe(vshift64);
21320 %}
21321 
21322 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
21323   predicate(n->as_Vector()->length() == 4);
21324   match(Set dst (LShiftVI src shift));
21325   ins_cost(INSN_COST);
21326   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
21327   ins_encode %{
21328     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
21329             as_FloatRegister($src$$reg),
21330             as_FloatRegister($shift$$reg));
21331   %}
21332   ins_pipe(vshift128);
21333 %}
21334 
21335 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
21336   predicate(n->as_Vector()->length() == 2);
21337   match(Set dst (RShiftVI src shift));
21338   ins_cost(INSN_COST);
21339   effect(TEMP tmp);
21340   format %{ "negr  $tmp,$shift\t"
21341             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
21342   ins_encode %{
21343     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
21344             as_FloatRegister($shift$$reg));
21345     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
21346             as_FloatRegister($src$$reg),
21347             as_FloatRegister($tmp$$reg));
21348   %}
21349   ins_pipe(vshift64);
21350 %}
21351 
21352 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
21353   predicate(n->as_Vector()->length() == 4);
21354   match(Set dst (RShiftVI src shift));
21355   ins_cost(INSN_COST);
21356   effect(TEMP tmp);
21357   format %{ "negr  $tmp,$shift\t"
21358             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
21359   ins_encode %{
21360     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21361             as_FloatRegister($shift$$reg));
21362     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
21363             as_FloatRegister($src$$reg),
21364             as_FloatRegister($tmp$$reg));
21365   %}
21366   ins_pipe(vshift128);
21367 %}
21368 
21369 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
21370   predicate(n->as_Vector()->length() == 2);
21371   match(Set dst (URShiftVI src shift));
21372   ins_cost(INSN_COST);
21373   effect(TEMP tmp);
21374   format %{ "negr  $tmp,$shift\t"
21375             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
21376   ins_encode %{
21377     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
21378             as_FloatRegister($shift$$reg));
21379     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
21380             as_FloatRegister($src$$reg),
21381             as_FloatRegister($tmp$$reg));
21382   %}
21383   ins_pipe(vshift64);
21384 %}
21385 
21386 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
21387   predicate(n->as_Vector()->length() == 4);
21388   match(Set dst (URShiftVI src shift));
21389   ins_cost(INSN_COST);
21390   effect(TEMP tmp);
21391   format %{ "negr  $tmp,$shift\t"
21392             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
21393   ins_encode %{
21394     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21395             as_FloatRegister($shift$$reg));
21396     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
21397             as_FloatRegister($src$$reg),
21398             as_FloatRegister($tmp$$reg));
21399   %}
21400   ins_pipe(vshift128);
21401 %}
21402 
21403 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
21404   predicate(n->as_Vector()->length() == 2);
21405   match(Set dst (LShiftVI src (LShiftCntV shift)));
21406   ins_cost(INSN_COST);
21407   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
21408   ins_encode %{
21409     __ shl(as_FloatRegister($dst$$reg), __ T2S,
21410            as_FloatRegister($src$$reg),
21411            (int)$shift$$constant);
21412   %}
21413   ins_pipe(vshift64_imm);
21414 %}
21415 
21416 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
21417   predicate(n->as_Vector()->length() == 4);
21418   match(Set dst (LShiftVI src (LShiftCntV shift)));
21419   ins_cost(INSN_COST);
21420   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
21421   ins_encode %{
21422     __ shl(as_FloatRegister($dst$$reg), __ T4S,
21423            as_FloatRegister($src$$reg),
21424            (int)$shift$$constant);
21425   %}
21426   ins_pipe(vshift128_imm);
21427 %}
21428 
21429 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
21430   predicate(n->as_Vector()->length() == 2);
21431   match(Set dst (RShiftVI src (RShiftCntV shift)));
21432   ins_cost(INSN_COST);
21433   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
21434   ins_encode %{
21435     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
21436             as_FloatRegister($src$$reg),
21437             (int)$shift$$constant);
21438   %}
21439   ins_pipe(vshift64_imm);
21440 %}
21441 
21442 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
21443   predicate(n->as_Vector()->length() == 4);
21444   match(Set dst (RShiftVI src (RShiftCntV shift)));
21445   ins_cost(INSN_COST);
21446   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
21447   ins_encode %{
21448     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
21449             as_FloatRegister($src$$reg),
21450             (int)$shift$$constant);
21451   %}
21452   ins_pipe(vshift128_imm);
21453 %}
21454 
21455 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
21456   predicate(n->as_Vector()->length() == 2);
21457   match(Set dst (URShiftVI src (RShiftCntV shift)));
21458   ins_cost(INSN_COST);
21459   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
21460   ins_encode %{
21461     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
21462             as_FloatRegister($src$$reg),
21463             (int)$shift$$constant);
21464   %}
21465   ins_pipe(vshift64_imm);
21466 %}
21467 
21468 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
21469   predicate(n->as_Vector()->length() == 4);
21470   match(Set dst (URShiftVI src (RShiftCntV shift)));
21471   ins_cost(INSN_COST);
21472   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
21473   ins_encode %{
21474     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
21475             as_FloatRegister($src$$reg),
21476             (int)$shift$$constant);
21477   %}
21478   ins_pipe(vshift128_imm);
21479 %}
21480 
21481 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
21482   predicate(n->as_Vector()->length() == 2);
21483   match(Set dst (LShiftVL src shift));
21484   ins_cost(INSN_COST);
21485   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
21486   ins_encode %{
21487     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
21488             as_FloatRegister($src$$reg),
21489             as_FloatRegister($shift$$reg));
21490   %}
21491   ins_pipe(vshift128);
21492 %}
21493 
21494 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
21495   predicate(n->as_Vector()->length() == 2);
21496   match(Set dst (RShiftVL src shift));
21497   ins_cost(INSN_COST);
21498   effect(TEMP tmp);
21499   format %{ "negr  $tmp,$shift\t"
21500             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
21501   ins_encode %{
21502     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21503             as_FloatRegister($shift$$reg));
21504     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
21505             as_FloatRegister($src$$reg),
21506             as_FloatRegister($tmp$$reg));
21507   %}
21508   ins_pipe(vshift128);
21509 %}
21510 
21511 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
21512   predicate(n->as_Vector()->length() == 2);
21513   match(Set dst (URShiftVL src shift));
21514   ins_cost(INSN_COST);
21515   effect(TEMP tmp);
21516   format %{ "negr  $tmp,$shift\t"
21517             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
21518   ins_encode %{
21519     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
21520             as_FloatRegister($shift$$reg));
21521     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
21522             as_FloatRegister($src$$reg),
21523             as_FloatRegister($tmp$$reg));
21524   %}
21525   ins_pipe(vshift128);
21526 %}
21527 
21528 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
21529   predicate(n->as_Vector()->length() == 2);
21530   match(Set dst (LShiftVL src (LShiftCntV shift)));
21531   ins_cost(INSN_COST);
21532   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
21533   ins_encode %{
21534     __ shl(as_FloatRegister($dst$$reg), __ T2D,
21535            as_FloatRegister($src$$reg),
21536            (int)$shift$$constant);
21537   %}
21538   ins_pipe(vshift128_imm);
21539 %}
21540 
21541 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
21542   predicate(n->as_Vector()->length() == 2);
21543   match(Set dst (RShiftVL src (RShiftCntV shift)));
21544   ins_cost(INSN_COST);
21545   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
21546   ins_encode %{
21547     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
21548             as_FloatRegister($src$$reg),
21549             (int)$shift$$constant);
21550   %}
21551   ins_pipe(vshift128_imm);
21552 %}
21553 
21554 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
21555   predicate(n->as_Vector()->length() == 2);
21556   match(Set dst (URShiftVL src (RShiftCntV shift)));
21557   ins_cost(INSN_COST);
21558   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
21559   ins_encode %{
21560     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
21561             as_FloatRegister($src$$reg),
21562             (int)$shift$$constant);
21563   %}
21564   ins_pipe(vshift128_imm);
21565 %}
21566 
21567 //----------PEEPHOLE RULES-----------------------------------------------------
21568 // These must follow all instruction definitions as they use the names
21569 // defined in the instructions definitions.
21570 //
21571 // peepmatch ( root_instr_name [preceding_instruction]* );
21572 //
21573 // peepconstraint %{
21574 // (instruction_number.operand_name relational_op instruction_number.operand_name
21575 //  [, ...] );
21576 // // instruction numbers are zero-based using left to right order in peepmatch
21577 //
21578 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
21579 // // provide an instruction_number.operand_name for each operand that appears
21580 // // in the replacement instruction's match rule
21581 //
21582 // ---------VM FLAGS---------------------------------------------------------
21583 //
21584 // All peephole optimizations can be turned off using -XX:-OptoPeephole
21585 //
21586 // Each peephole rule is given an identifying number starting with zero and
21587 // increasing by one in the order seen by the parser.  An individual peephole
21588 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
21589 // on the command-line.
21590 //
21591 // ---------CURRENT LIMITATIONS----------------------------------------------
21592 //
21593 // Only match adjacent instructions in same basic block
21594 // Only equality constraints
21595 // Only constraints between operands, not (0.dest_reg == RAX_enc)
21596 // Only one replacement instruction
21597 //
21598 // ---------EXAMPLE----------------------------------------------------------
21599 //
21600 // // pertinent parts of existing instructions in architecture description
21601 // instruct movI(iRegINoSp dst, iRegI src)
21602 // %{
21603 //   match(Set dst (CopyI src));
21604 // %}
21605 //
21606 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
21607 // %{
21608 //   match(Set dst (AddI dst src));
21609 //   effect(KILL cr);
21610 // %}
21611 //
21612 // // Change (inc mov) to lea
21613 // peephole %{
21614 //   // increment preceeded by register-register move
21615 //   peepmatch ( incI_iReg movI );
21616 //   // require that the destination register of the increment
21617 //   // match the destination register of the move
21618 //   peepconstraint ( 0.dst == 1.dst );
21619 //   // construct a replacement instruction that sets
21620 //   // the destination to ( move's source register + one )
21621 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
21622 // %}
21623 //
21624 
21625 // Implementation no longer uses movX instructions since
21626 // machine-independent system no longer uses CopyX nodes.
21627 //
21628 // peephole
21629 // %{
21630 //   peepmatch (incI_iReg movI);
21631 //   peepconstraint (0.dst == 1.dst);
21632 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
21633 // %}
21634 
21635 // peephole
21636 // %{
21637 //   peepmatch (decI_iReg movI);
21638 //   peepconstraint (0.dst == 1.dst);
21639 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
21640 // %}
21641 
21642 // peephole
21643 // %{
21644 //   peepmatch (addI_iReg_imm movI);
21645 //   peepconstraint (0.dst == 1.dst);
21646 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
21647 // %}
21648 
21649 // peephole
21650 // %{
21651 //   peepmatch (incL_iReg movL);
21652 //   peepconstraint (0.dst == 1.dst);
21653 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
21654 // %}
21655 
21656 // peephole
21657 // %{
21658 //   peepmatch (decL_iReg movL);
21659 //   peepconstraint (0.dst == 1.dst);
21660 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
21661 // %}
21662 
21663 // peephole
21664 // %{
21665 //   peepmatch (addL_iReg_imm movL);
21666 //   peepconstraint (0.dst == 1.dst);
21667 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
21668 // %}
21669 
21670 // peephole
21671 // %{
21672 //   peepmatch (addP_iReg_imm movP);
21673 //   peepconstraint (0.dst == 1.dst);
21674 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
21675 // %}
21676 
21677 // // Change load of spilled value to only a spill
21678 // instruct storeI(memory mem, iRegI src)
21679 // %{
21680 //   match(Set mem (StoreI mem src));
21681 // %}
21682 //
21683 // instruct loadI(iRegINoSp dst, memory mem)
21684 // %{
21685 //   match(Set dst (LoadI mem));
21686 // %}
21687 //
21688 
21689 //----------SMARTSPILL RULES---------------------------------------------------
21690 // These must follow all instruction definitions as they use the names
21691 // defined in the instructions definitions.
21692 
21693 // Local Variables:
21694 // mode: c++
21695 // End: