1 //
   2 // Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2020, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit general purpose registers
 439 reg_class all_reg32(
 440     R0,
 441     R1,
 442     R2,
 443     R3,
 444     R4,
 445     R5,
 446     R6,
 447     R7,
 448     R10,
 449     R11,
 450     R12,
 451     R13,
 452     R14,
 453     R15,
 454     R16,
 455     R17,
 456     R18,
 457     R19,
 458     R20,
 459     R21,
 460     R22,
 461     R23,
 462     R24,
 463     R25,
 464     R26,
 465     R27,
 466     R28,
 467     R29,
 468     R30,
 469     R31
 470 );
 471 
 472 
 473 // Class for all 32 bit integer registers (excluding SP which
 474 // will never be used as an integer register)
 475 reg_class any_reg32 %{
 476   return _ANY_REG32_mask;
 477 %}
 478 
 479 // Singleton class for R0 int register
 480 reg_class int_r0_reg(R0);
 481 
 482 // Singleton class for R2 int register
 483 reg_class int_r2_reg(R2);
 484 
 485 // Singleton class for R3 int register
 486 reg_class int_r3_reg(R3);
 487 
 488 // Singleton class for R4 int register
 489 reg_class int_r4_reg(R4);
 490 
 491 // Singleton class for R31 int register
 492 reg_class int_r31_reg(R31);
 493 
 494 // Class for all 64 bit general purpose registers
 495 reg_class all_reg(
 496     R0, R0_H,
 497     R1, R1_H,
 498     R2, R2_H,
 499     R3, R3_H,
 500     R4, R4_H,
 501     R5, R5_H,
 502     R6, R6_H,
 503     R7, R7_H,
 504     R10, R10_H,
 505     R11, R11_H,
 506     R12, R12_H,
 507     R13, R13_H,
 508     R14, R14_H,
 509     R15, R15_H,
 510     R16, R16_H,
 511     R17, R17_H,
 512     R18, R18_H,
 513     R19, R19_H,
 514     R20, R20_H,
 515     R21, R21_H,
 516     R22, R22_H,
 517     R23, R23_H,
 518     R24, R24_H,
 519     R25, R25_H,
 520     R26, R26_H,
 521     R27, R27_H,
 522     R28, R28_H,
 523     R29, R29_H,
 524     R30, R30_H,
 525     R31, R31_H
 526 );
 527 
 528 // Class for all long integer registers (including SP)
 529 reg_class any_reg %{
 530   return _ANY_REG_mask;
 531 %}
 532 
 533 // Class for non-allocatable 32 bit registers
 534 reg_class non_allocatable_reg32(
 535     R28,                        // thread
 536     R30,                        // lr
 537     R31                         // sp
 538 );
 539 
 540 // Class for non-allocatable 64 bit registers
 541 reg_class non_allocatable_reg(
 542     R28, R28_H,                 // thread
 543     R30, R30_H,                 // lr
 544     R31, R31_H                  // sp
 545 );
 546 
 547 // Class for all non-special integer registers
 548 reg_class no_special_reg32 %{
 549   return _NO_SPECIAL_REG32_mask;
 550 %}
 551 
 552 // Class for all non-special long integer registers
 553 reg_class no_special_reg %{
 554   return _NO_SPECIAL_REG_mask;
 555 %}
 556 
 557 // Class for 64 bit register r0
 558 reg_class r0_reg(
 559     R0, R0_H
 560 );
 561 
 562 // Class for 64 bit register r1
 563 reg_class r1_reg(
 564     R1, R1_H
 565 );
 566 
 567 // Class for 64 bit register r2
 568 reg_class r2_reg(
 569     R2, R2_H
 570 );
 571 
 572 // Class for 64 bit register r3
 573 reg_class r3_reg(
 574     R3, R3_H
 575 );
 576 
 577 // Class for 64 bit register r4
 578 reg_class r4_reg(
 579     R4, R4_H
 580 );
 581 
 582 // Class for 64 bit register r5
 583 reg_class r5_reg(
 584     R5, R5_H
 585 );
 586 
 587 // Class for 64 bit register r10
 588 reg_class r10_reg(
 589     R10, R10_H
 590 );
 591 
 592 // Class for 64 bit register r11
 593 reg_class r11_reg(
 594     R11, R11_H
 595 );
 596 
 597 // Class for method register
 598 reg_class method_reg(
 599     R12, R12_H
 600 );
 601 
 602 // Class for heapbase register
 603 reg_class heapbase_reg(
 604     R27, R27_H
 605 );
 606 
 607 // Class for thread register
 608 reg_class thread_reg(
 609     R28, R28_H
 610 );
 611 
 612 // Class for frame pointer register
 613 reg_class fp_reg(
 614     R29, R29_H
 615 );
 616 
 617 // Class for link register
 618 reg_class lr_reg(
 619     R30, R30_H
 620 );
 621 
 622 // Class for long sp register
 623 reg_class sp_reg(
 624   R31, R31_H
 625 );
 626 
 627 // Class for all pointer registers
 628 reg_class ptr_reg %{
 629   return _PTR_REG_mask;
 630 %}
 631 
 632 // Class for all non_special pointer registers
 633 reg_class no_special_ptr_reg %{
 634   return _NO_SPECIAL_PTR_REG_mask;
 635 %}
 636 
 637 // Class for all float registers
 638 reg_class float_reg(
 639     V0,
 640     V1,
 641     V2,
 642     V3,
 643     V4,
 644     V5,
 645     V6,
 646     V7,
 647     V8,
 648     V9,
 649     V10,
 650     V11,
 651     V12,
 652     V13,
 653     V14,
 654     V15,
 655     V16,
 656     V17,
 657     V18,
 658     V19,
 659     V20,
 660     V21,
 661     V22,
 662     V23,
 663     V24,
 664     V25,
 665     V26,
 666     V27,
 667     V28,
 668     V29,
 669     V30,
 670     V31
 671 );
 672 
 673 // Double precision float registers have virtual `high halves' that
 674 // are needed by the allocator.
 675 // Class for all double registers
 676 reg_class double_reg(
 677     V0, V0_H,
 678     V1, V1_H,
 679     V2, V2_H,
 680     V3, V3_H,
 681     V4, V4_H,
 682     V5, V5_H,
 683     V6, V6_H,
 684     V7, V7_H,
 685     V8, V8_H,
 686     V9, V9_H,
 687     V10, V10_H,
 688     V11, V11_H,
 689     V12, V12_H,
 690     V13, V13_H,
 691     V14, V14_H,
 692     V15, V15_H,
 693     V16, V16_H,
 694     V17, V17_H,
 695     V18, V18_H,
 696     V19, V19_H,
 697     V20, V20_H,
 698     V21, V21_H,
 699     V22, V22_H,
 700     V23, V23_H,
 701     V24, V24_H,
 702     V25, V25_H,
 703     V26, V26_H,
 704     V27, V27_H,
 705     V28, V28_H,
 706     V29, V29_H,
 707     V30, V30_H,
 708     V31, V31_H
 709 );
 710 
 711 // Class for all 64bit vector registers
 712 reg_class vectord_reg(
 713     V0, V0_H,
 714     V1, V1_H,
 715     V2, V2_H,
 716     V3, V3_H,
 717     V4, V4_H,
 718     V5, V5_H,
 719     V6, V6_H,
 720     V7, V7_H,
 721     V8, V8_H,
 722     V9, V9_H,
 723     V10, V10_H,
 724     V11, V11_H,
 725     V12, V12_H,
 726     V13, V13_H,
 727     V14, V14_H,
 728     V15, V15_H,
 729     V16, V16_H,
 730     V17, V17_H,
 731     V18, V18_H,
 732     V19, V19_H,
 733     V20, V20_H,
 734     V21, V21_H,
 735     V22, V22_H,
 736     V23, V23_H,
 737     V24, V24_H,
 738     V25, V25_H,
 739     V26, V26_H,
 740     V27, V27_H,
 741     V28, V28_H,
 742     V29, V29_H,
 743     V30, V30_H,
 744     V31, V31_H
 745 );
 746 
 747 // Class for all 128bit vector registers
 748 reg_class vectorx_reg(
 749     V0, V0_H, V0_J, V0_K,
 750     V1, V1_H, V1_J, V1_K,
 751     V2, V2_H, V2_J, V2_K,
 752     V3, V3_H, V3_J, V3_K,
 753     V4, V4_H, V4_J, V4_K,
 754     V5, V5_H, V5_J, V5_K,
 755     V6, V6_H, V6_J, V6_K,
 756     V7, V7_H, V7_J, V7_K,
 757     V8, V8_H, V8_J, V8_K,
 758     V9, V9_H, V9_J, V9_K,
 759     V10, V10_H, V10_J, V10_K,
 760     V11, V11_H, V11_J, V11_K,
 761     V12, V12_H, V12_J, V12_K,
 762     V13, V13_H, V13_J, V13_K,
 763     V14, V14_H, V14_J, V14_K,
 764     V15, V15_H, V15_J, V15_K,
 765     V16, V16_H, V16_J, V16_K,
 766     V17, V17_H, V17_J, V17_K,
 767     V18, V18_H, V18_J, V18_K,
 768     V19, V19_H, V19_J, V19_K,
 769     V20, V20_H, V20_J, V20_K,
 770     V21, V21_H, V21_J, V21_K,
 771     V22, V22_H, V22_J, V22_K,
 772     V23, V23_H, V23_J, V23_K,
 773     V24, V24_H, V24_J, V24_K,
 774     V25, V25_H, V25_J, V25_K,
 775     V26, V26_H, V26_J, V26_K,
 776     V27, V27_H, V27_J, V27_K,
 777     V28, V28_H, V28_J, V28_K,
 778     V29, V29_H, V29_J, V29_K,
 779     V30, V30_H, V30_J, V30_K,
 780     V31, V31_H, V31_J, V31_K
 781 );
 782 
 783 // Class for 128 bit register v0
 784 reg_class v0_reg(
 785     V0, V0_H
 786 );
 787 
 788 // Class for 128 bit register v1
 789 reg_class v1_reg(
 790     V1, V1_H
 791 );
 792 
 793 // Class for 128 bit register v2
 794 reg_class v2_reg(
 795     V2, V2_H
 796 );
 797 
 798 // Class for 128 bit register v3
 799 reg_class v3_reg(
 800     V3, V3_H
 801 );
 802 
 803 // Class for 128 bit register v4
 804 reg_class v4_reg(
 805     V4, V4_H
 806 );
 807 
 808 // Class for 128 bit register v5
 809 reg_class v5_reg(
 810     V5, V5_H
 811 );
 812 
 813 // Class for 128 bit register v6
 814 reg_class v6_reg(
 815     V6, V6_H
 816 );
 817 
 818 // Class for 128 bit register v7
 819 reg_class v7_reg(
 820     V7, V7_H
 821 );
 822 
 823 // Class for 128 bit register v8
 824 reg_class v8_reg(
 825     V8, V8_H
 826 );
 827 
 828 // Class for 128 bit register v9
 829 reg_class v9_reg(
 830     V9, V9_H
 831 );
 832 
 833 // Class for 128 bit register v10
 834 reg_class v10_reg(
 835     V10, V10_H
 836 );
 837 
 838 // Class for 128 bit register v11
 839 reg_class v11_reg(
 840     V11, V11_H
 841 );
 842 
 843 // Class for 128 bit register v12
 844 reg_class v12_reg(
 845     V12, V12_H
 846 );
 847 
 848 // Class for 128 bit register v13
 849 reg_class v13_reg(
 850     V13, V13_H
 851 );
 852 
 853 // Class for 128 bit register v14
 854 reg_class v14_reg(
 855     V14, V14_H
 856 );
 857 
 858 // Class for 128 bit register v15
 859 reg_class v15_reg(
 860     V15, V15_H
 861 );
 862 
 863 // Class for 128 bit register v16
 864 reg_class v16_reg(
 865     V16, V16_H
 866 );
 867 
 868 // Class for 128 bit register v17
 869 reg_class v17_reg(
 870     V17, V17_H
 871 );
 872 
 873 // Class for 128 bit register v18
 874 reg_class v18_reg(
 875     V18, V18_H
 876 );
 877 
 878 // Class for 128 bit register v19
 879 reg_class v19_reg(
 880     V19, V19_H
 881 );
 882 
 883 // Class for 128 bit register v20
 884 reg_class v20_reg(
 885     V20, V20_H
 886 );
 887 
 888 // Class for 128 bit register v21
 889 reg_class v21_reg(
 890     V21, V21_H
 891 );
 892 
 893 // Class for 128 bit register v22
 894 reg_class v22_reg(
 895     V22, V22_H
 896 );
 897 
 898 // Class for 128 bit register v23
 899 reg_class v23_reg(
 900     V23, V23_H
 901 );
 902 
 903 // Class for 128 bit register v24
 904 reg_class v24_reg(
 905     V24, V24_H
 906 );
 907 
 908 // Class for 128 bit register v25
 909 reg_class v25_reg(
 910     V25, V25_H
 911 );
 912 
 913 // Class for 128 bit register v26
 914 reg_class v26_reg(
 915     V26, V26_H
 916 );
 917 
 918 // Class for 128 bit register v27
 919 reg_class v27_reg(
 920     V27, V27_H
 921 );
 922 
 923 // Class for 128 bit register v28
 924 reg_class v28_reg(
 925     V28, V28_H
 926 );
 927 
 928 // Class for 128 bit register v29
 929 reg_class v29_reg(
 930     V29, V29_H
 931 );
 932 
 933 // Class for 128 bit register v30
 934 reg_class v30_reg(
 935     V30, V30_H
 936 );
 937 
 938 // Class for 128 bit register v31
 939 reg_class v31_reg(
 940     V31, V31_H
 941 );
 942 
 943 // Singleton class for condition codes
 944 reg_class int_flags(RFLAGS);
 945 
 946 %}
 947 
 948 //----------DEFINITION BLOCK---------------------------------------------------
 949 // Define name --> value mappings to inform the ADLC of an integer valued name
 950 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 951 // Format:
 952 //        int_def  <name>         ( <int_value>, <expression>);
 953 // Generated Code in ad_<arch>.hpp
 954 //        #define  <name>   (<expression>)
 955 //        // value == <int_value>
 956 // Generated code in ad_<arch>.cpp adlc_verification()
 957 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 958 //
 959 
 960 // we follow the ppc-aix port in using a simple cost model which ranks
 961 // register operations as cheap, memory ops as more expensive and
 962 // branches as most expensive. the first two have a low as well as a
 963 // normal cost. huge cost appears to be a way of saying don't do
 964 // something
 965 
 966 definitions %{
 967   // The default cost (of a register move instruction).
 968   int_def INSN_COST            (    100,     100);
 969   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 970   int_def CALL_COST            (    200,     2 * INSN_COST);
 971   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 972 %}
 973 
 974 
 975 //----------SOURCE BLOCK-------------------------------------------------------
 976 // This is a block of C++ code which provides values, functions, and
 977 // definitions necessary in the rest of the architecture description
 978 
 979 source_hpp %{
 980 
 981 #include "asm/macroAssembler.hpp"
 982 #include "gc/shared/cardTable.hpp"
 983 #include "gc/shared/cardTableBarrierSet.hpp"
 984 #include "gc/shared/collectedHeap.hpp"
 985 #include "opto/addnode.hpp"
 986 #include "opto/convertnode.hpp"
 987 
 988 extern RegMask _ANY_REG32_mask;
 989 extern RegMask _ANY_REG_mask;
 990 extern RegMask _PTR_REG_mask;
 991 extern RegMask _NO_SPECIAL_REG32_mask;
 992 extern RegMask _NO_SPECIAL_REG_mask;
 993 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 994 
 995 class CallStubImpl {
 996 
 997   //--------------------------------------------------------------
 998   //---<  Used for optimization in Compile::shorten_branches  >---
 999   //--------------------------------------------------------------
1000 
1001  public:
1002   // Size of call trampoline stub.
1003   static uint size_call_trampoline() {
1004     return 0; // no call trampolines on this platform
1005   }
1006 
1007   // number of relocations needed by a call trampoline stub
1008   static uint reloc_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 };
1012 
1013 class HandlerImpl {
1014 
1015  public:
1016 
1017   static int emit_exception_handler(CodeBuffer &cbuf);
1018   static int emit_deopt_handler(CodeBuffer& cbuf);
1019 
1020   static uint size_exception_handler() {
1021     return MacroAssembler::far_branch_size();
1022   }
1023 
1024   static uint size_deopt_handler() {
1025     // count one adr and one far branch instruction
1026     return 4 * NativeInstruction::instruction_size;
1027   }
1028 };
1029 
1030 class Node::PD {
1031 public:
1032   enum NodeFlags {
1033     _last_flag = Node::_last_flag
1034   };
1035 };
1036 
1037  bool is_CAS(int opcode, bool maybe_volatile);
1038 
1039   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1040 
1041   bool unnecessary_acquire(const Node *barrier);
1042   bool needs_acquiring_load(const Node *load);
1043 
1044   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1045 
1046   bool unnecessary_release(const Node *barrier);
1047   bool unnecessary_volatile(const Node *barrier);
1048   bool needs_releasing_store(const Node *store);
1049 
1050   // predicate controlling translation of CompareAndSwapX
1051   bool needs_acquiring_load_exclusive(const Node *load);
1052 
1053   // predicate controlling addressing modes
1054   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1055 %}
1056 
1057 source %{
1058 
1059   // Derived RegMask with conditionally allocatable registers
1060 
1061   void PhaseOutput::pd_perform_mach_node_analysis() {
1062   }
1063 
1064   int MachNode::pd_alignment_required() const {
1065     return 1;
1066   }
1067 
1068   int MachNode::compute_padding(int current_offset) const {
1069     return 0;
1070   }
1071 
1072   RegMask _ANY_REG32_mask;
1073   RegMask _ANY_REG_mask;
1074   RegMask _PTR_REG_mask;
1075   RegMask _NO_SPECIAL_REG32_mask;
1076   RegMask _NO_SPECIAL_REG_mask;
1077   RegMask _NO_SPECIAL_PTR_REG_mask;
1078 
1079   void reg_mask_init() {
1080     // We derive below RegMask(s) from the ones which are auto-generated from
1081     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
1082     // registers conditionally reserved.
1083 
1084     _ANY_REG32_mask = _ALL_REG32_mask;
1085     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
1086 
1087     _ANY_REG_mask = _ALL_REG_mask;
1088 
1089     _PTR_REG_mask = _ALL_REG_mask;
1090 
1091     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
1092     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
1093 
1094     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
1095     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
1096 
1097     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
1098     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
1099 
1100     // r27 is not allocatable when compressed oops is on and heapbase is not
1101     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
1102     if (UseCompressedOops && CompressedOops::ptrs_base() != NULL) {
1103       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
1104       _NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
1105       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
1106     }
1107 
1108     // r29 is not allocatable when PreserveFramePointer is on
1109     if (PreserveFramePointer) {
1110       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
1111       _NO_SPECIAL_REG_mask.SUBTRACT(_FP_REG_mask);
1112       _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_FP_REG_mask);
1113     }
1114   }
1115 
1116   // Optimizaton of volatile gets and puts
1117   // -------------------------------------
1118   //
1119   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1120   // use to implement volatile reads and writes. For a volatile read
1121   // we simply need
1122   //
1123   //   ldar<x>
1124   //
1125   // and for a volatile write we need
1126   //
1127   //   stlr<x>
1128   //
1129   // Alternatively, we can implement them by pairing a normal
1130   // load/store with a memory barrier. For a volatile read we need
1131   //
1132   //   ldr<x>
1133   //   dmb ishld
1134   //
1135   // for a volatile write
1136   //
1137   //   dmb ish
1138   //   str<x>
1139   //   dmb ish
1140   //
1141   // We can also use ldaxr and stlxr to implement compare and swap CAS
1142   // sequences. These are normally translated to an instruction
1143   // sequence like the following
1144   //
1145   //   dmb      ish
1146   // retry:
1147   //   ldxr<x>   rval raddr
1148   //   cmp       rval rold
1149   //   b.ne done
1150   //   stlxr<x>  rval, rnew, rold
1151   //   cbnz      rval retry
1152   // done:
1153   //   cset      r0, eq
1154   //   dmb ishld
1155   //
1156   // Note that the exclusive store is already using an stlxr
1157   // instruction. That is required to ensure visibility to other
1158   // threads of the exclusive write (assuming it succeeds) before that
1159   // of any subsequent writes.
1160   //
1161   // The following instruction sequence is an improvement on the above
1162   //
1163   // retry:
1164   //   ldaxr<x>  rval raddr
1165   //   cmp       rval rold
1166   //   b.ne done
1167   //   stlxr<x>  rval, rnew, rold
1168   //   cbnz      rval retry
1169   // done:
1170   //   cset      r0, eq
1171   //
1172   // We don't need the leading dmb ish since the stlxr guarantees
1173   // visibility of prior writes in the case that the swap is
1174   // successful. Crucially we don't have to worry about the case where
1175   // the swap is not successful since no valid program should be
1176   // relying on visibility of prior changes by the attempting thread
1177   // in the case where the CAS fails.
1178   //
1179   // Similarly, we don't need the trailing dmb ishld if we substitute
1180   // an ldaxr instruction since that will provide all the guarantees we
1181   // require regarding observation of changes made by other threads
1182   // before any change to the CAS address observed by the load.
1183   //
1184   // In order to generate the desired instruction sequence we need to
1185   // be able to identify specific 'signature' ideal graph node
1186   // sequences which i) occur as a translation of a volatile reads or
1187   // writes or CAS operations and ii) do not occur through any other
1188   // translation or graph transformation. We can then provide
1189   // alternative aldc matching rules which translate these node
1190   // sequences to the desired machine code sequences. Selection of the
1191   // alternative rules can be implemented by predicates which identify
1192   // the relevant node sequences.
1193   //
1194   // The ideal graph generator translates a volatile read to the node
1195   // sequence
1196   //
1197   //   LoadX[mo_acquire]
1198   //   MemBarAcquire
1199   //
1200   // As a special case when using the compressed oops optimization we
1201   // may also see this variant
1202   //
1203   //   LoadN[mo_acquire]
1204   //   DecodeN
1205   //   MemBarAcquire
1206   //
1207   // A volatile write is translated to the node sequence
1208   //
1209   //   MemBarRelease
1210   //   StoreX[mo_release] {CardMark}-optional
1211   //   MemBarVolatile
1212   //
1213   // n.b. the above node patterns are generated with a strict
1214   // 'signature' configuration of input and output dependencies (see
1215   // the predicates below for exact details). The card mark may be as
1216   // simple as a few extra nodes or, in a few GC configurations, may
1217   // include more complex control flow between the leading and
1218   // trailing memory barriers. However, whatever the card mark
1219   // configuration these signatures are unique to translated volatile
1220   // reads/stores -- they will not appear as a result of any other
1221   // bytecode translation or inlining nor as a consequence of
1222   // optimizing transforms.
1223   //
1224   // We also want to catch inlined unsafe volatile gets and puts and
1225   // be able to implement them using either ldar<x>/stlr<x> or some
1226   // combination of ldr<x>/stlr<x> and dmb instructions.
1227   //
1228   // Inlined unsafe volatiles puts manifest as a minor variant of the
1229   // normal volatile put node sequence containing an extra cpuorder
1230   // membar
1231   //
1232   //   MemBarRelease
1233   //   MemBarCPUOrder
1234   //   StoreX[mo_release] {CardMark}-optional
1235   //   MemBarCPUOrder
1236   //   MemBarVolatile
1237   //
1238   // n.b. as an aside, a cpuorder membar is not itself subject to
1239   // matching and translation by adlc rules.  However, the rule
1240   // predicates need to detect its presence in order to correctly
1241   // select the desired adlc rules.
1242   //
1243   // Inlined unsafe volatile gets manifest as a slightly different
1244   // node sequence to a normal volatile get because of the
1245   // introduction of some CPUOrder memory barriers to bracket the
1246   // Load. However, but the same basic skeleton of a LoadX feeding a
1247   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1248   // present
1249   //
1250   //   MemBarCPUOrder
1251   //        ||       \\
1252   //   MemBarCPUOrder LoadX[mo_acquire]
1253   //        ||            |
1254   //        ||       {DecodeN} optional
1255   //        ||       /
1256   //     MemBarAcquire
1257   //
1258   // In this case the acquire membar does not directly depend on the
1259   // load. However, we can be sure that the load is generated from an
1260   // inlined unsafe volatile get if we see it dependent on this unique
1261   // sequence of membar nodes. Similarly, given an acquire membar we
1262   // can know that it was added because of an inlined unsafe volatile
1263   // get if it is fed and feeds a cpuorder membar and if its feed
1264   // membar also feeds an acquiring load.
1265   //
1266   // Finally an inlined (Unsafe) CAS operation is translated to the
1267   // following ideal graph
1268   //
1269   //   MemBarRelease
1270   //   MemBarCPUOrder
1271   //   CompareAndSwapX {CardMark}-optional
1272   //   MemBarCPUOrder
1273   //   MemBarAcquire
1274   //
1275   // So, where we can identify these volatile read and write
1276   // signatures we can choose to plant either of the above two code
1277   // sequences. For a volatile read we can simply plant a normal
1278   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1279   // also choose to inhibit translation of the MemBarAcquire and
1280   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1281   //
1282   // When we recognise a volatile store signature we can choose to
1283   // plant at a dmb ish as a translation for the MemBarRelease, a
1284   // normal str<x> and then a dmb ish for the MemBarVolatile.
1285   // Alternatively, we can inhibit translation of the MemBarRelease
1286   // and MemBarVolatile and instead plant a simple stlr<x>
1287   // instruction.
1288   //
1289   // when we recognise a CAS signature we can choose to plant a dmb
1290   // ish as a translation for the MemBarRelease, the conventional
1291   // macro-instruction sequence for the CompareAndSwap node (which
1292   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1293   // Alternatively, we can elide generation of the dmb instructions
1294   // and plant the alternative CompareAndSwap macro-instruction
1295   // sequence (which uses ldaxr<x>).
1296   //
1297   // Of course, the above only applies when we see these signature
1298   // configurations. We still want to plant dmb instructions in any
1299   // other cases where we may see a MemBarAcquire, MemBarRelease or
1300   // MemBarVolatile. For example, at the end of a constructor which
1301   // writes final/volatile fields we will see a MemBarRelease
1302   // instruction and this needs a 'dmb ish' lest we risk the
1303   // constructed object being visible without making the
1304   // final/volatile field writes visible.
1305   //
1306   // n.b. the translation rules below which rely on detection of the
1307   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1308   // If we see anything other than the signature configurations we
1309   // always just translate the loads and stores to ldr<x> and str<x>
1310   // and translate acquire, release and volatile membars to the
1311   // relevant dmb instructions.
1312   //
1313 
1314   // is_CAS(int opcode, bool maybe_volatile)
1315   //
1316   // return true if opcode is one of the possible CompareAndSwapX
1317   // values otherwise false.
1318 
1319   bool is_CAS(int opcode, bool maybe_volatile)
1320   {
1321     switch(opcode) {
1322       // We handle these
1323     case Op_CompareAndSwapI:
1324     case Op_CompareAndSwapL:
1325     case Op_CompareAndSwapP:
1326     case Op_CompareAndSwapN:
1327     case Op_ShenandoahCompareAndSwapP:
1328     case Op_ShenandoahCompareAndSwapN:
1329     case Op_CompareAndSwapB:
1330     case Op_CompareAndSwapS:
1331     case Op_GetAndSetI:
1332     case Op_GetAndSetL:
1333     case Op_GetAndSetP:
1334     case Op_GetAndSetN:
1335     case Op_GetAndAddI:
1336     case Op_GetAndAddL:
1337       return true;
1338     case Op_CompareAndExchangeI:
1339     case Op_CompareAndExchangeN:
1340     case Op_CompareAndExchangeB:
1341     case Op_CompareAndExchangeS:
1342     case Op_CompareAndExchangeL:
1343     case Op_CompareAndExchangeP:
1344     case Op_WeakCompareAndSwapB:
1345     case Op_WeakCompareAndSwapS:
1346     case Op_WeakCompareAndSwapI:
1347     case Op_WeakCompareAndSwapL:
1348     case Op_WeakCompareAndSwapP:
1349     case Op_WeakCompareAndSwapN:
1350     case Op_ShenandoahWeakCompareAndSwapP:
1351     case Op_ShenandoahWeakCompareAndSwapN:
1352     case Op_ShenandoahCompareAndExchangeP:
1353     case Op_ShenandoahCompareAndExchangeN:
1354       return maybe_volatile;
1355     default:
1356       return false;
1357     }
1358   }
1359 
1360   // helper to determine the maximum number of Phi nodes we may need to
1361   // traverse when searching from a card mark membar for the merge mem
1362   // feeding a trailing membar or vice versa
1363 
1364 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1365 
1366 bool unnecessary_acquire(const Node *barrier)
1367 {
1368   assert(barrier->is_MemBar(), "expecting a membar");
1369 
1370   if (UseBarriersForVolatile) {
1371     // we need to plant a dmb
1372     return false;
1373   }
1374 
1375   MemBarNode* mb = barrier->as_MemBar();
1376 
1377   if (mb->trailing_load()) {
1378     return true;
1379   }
1380 
1381   if (mb->trailing_load_store()) {
1382     Node* load_store = mb->in(MemBarNode::Precedent);
1383     assert(load_store->is_LoadStore(), "unexpected graph shape");
1384     return is_CAS(load_store->Opcode(), true);
1385   }
1386 
1387   return false;
1388 }
1389 
1390 bool needs_acquiring_load(const Node *n)
1391 {
1392   assert(n->is_Load(), "expecting a load");
1393   if (UseBarriersForVolatile) {
1394     // we use a normal load and a dmb
1395     return false;
1396   }
1397 
1398   LoadNode *ld = n->as_Load();
1399 
1400   return ld->is_acquire();
1401 }
1402 
1403 bool unnecessary_release(const Node *n)
1404 {
1405   assert((n->is_MemBar() &&
1406           n->Opcode() == Op_MemBarRelease),
1407          "expecting a release membar");
1408 
1409   if (UseBarriersForVolatile) {
1410     // we need to plant a dmb
1411     return false;
1412   }
1413 
1414   MemBarNode *barrier = n->as_MemBar();
1415   if (!barrier->leading()) {
1416     return false;
1417   } else {
1418     Node* trailing = barrier->trailing_membar();
1419     MemBarNode* trailing_mb = trailing->as_MemBar();
1420     assert(trailing_mb->trailing(), "Not a trailing membar?");
1421     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1422 
1423     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1424     if (mem->is_Store()) {
1425       assert(mem->as_Store()->is_release(), "");
1426       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1427       return true;
1428     } else {
1429       assert(mem->is_LoadStore(), "");
1430       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1431       return is_CAS(mem->Opcode(), true);
1432     }
1433   }
1434   return false;
1435 }
1436 
1437 bool unnecessary_volatile(const Node *n)
1438 {
1439   // assert n->is_MemBar();
1440   if (UseBarriersForVolatile) {
1441     // we need to plant a dmb
1442     return false;
1443   }
1444 
1445   MemBarNode *mbvol = n->as_MemBar();
1446 
1447   bool release = mbvol->trailing_store();
1448   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1449 #ifdef ASSERT
1450   if (release) {
1451     Node* leading = mbvol->leading_membar();
1452     assert(leading->Opcode() == Op_MemBarRelease, "");
1453     assert(leading->as_MemBar()->leading_store(), "");
1454     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1455   }
1456 #endif
1457 
1458   return release;
1459 }
1460 
1461 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1462 
1463 bool needs_releasing_store(const Node *n)
1464 {
1465   // assert n->is_Store();
1466   if (UseBarriersForVolatile) {
1467     // we use a normal store and dmb combination
1468     return false;
1469   }
1470 
1471   StoreNode *st = n->as_Store();
1472 
1473   return st->trailing_membar() != NULL;
1474 }
1475 
1476 // predicate controlling translation of CAS
1477 //
1478 // returns true if CAS needs to use an acquiring load otherwise false
1479 
1480 bool needs_acquiring_load_exclusive(const Node *n)
1481 {
1482   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1483   if (UseBarriersForVolatile) {
1484     return false;
1485   }
1486 
1487   LoadStoreNode* ldst = n->as_LoadStore();
1488   if (is_CAS(n->Opcode(), false)) {
1489     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1490   } else {
1491     return ldst->trailing_membar() != NULL;
1492   }
1493 
1494   // so we can just return true here
1495   return true;
1496 }
1497 
1498 #define __ _masm.
1499 
1500 // advance declarations for helper functions to convert register
1501 // indices to register objects
1502 
1503 // the ad file has to provide implementations of certain methods
1504 // expected by the generic code
1505 //
1506 // REQUIRED FUNCTIONALITY
1507 
1508 //=============================================================================
1509 
1510 // !!!!! Special hack to get all types of calls to specify the byte offset
1511 //       from the start of the call to the point where the return address
1512 //       will point.
1513 
1514 int MachCallStaticJavaNode::ret_addr_offset()
1515 {
1516   // call should be a simple bl
1517   int off = 4;
1518   return off;
1519 }
1520 
1521 int MachCallDynamicJavaNode::ret_addr_offset()
1522 {
1523   return 16; // movz, movk, movk, bl
1524 }
1525 
1526 int MachCallRuntimeNode::ret_addr_offset() {
1527   // for generated stubs the call will be
1528   //   far_call(addr)
1529   // for real runtime callouts it will be six instructions
1530   // see aarch64_enc_java_to_runtime
1531   //   adr(rscratch2, retaddr)
1532   //   lea(rscratch1, RuntimeAddress(addr)
1533   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1534   //   blr(rscratch1)
1535   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1536   if (cb) {
1537     return MacroAssembler::far_branch_size();
1538   } else {
1539     return 6 * NativeInstruction::instruction_size;
1540   }
1541 }
1542 
1543 // Indicate if the safepoint node needs the polling page as an input
1544 
1545 // the shared code plants the oop data at the start of the generated
1546 // code for the safepoint node and that needs ot be at the load
1547 // instruction itself. so we cannot plant a mov of the safepoint poll
1548 // address followed by a load. setting this to true means the mov is
1549 // scheduled as a prior instruction. that's better for scheduling
1550 // anyway.
1551 
1552 bool SafePointNode::needs_polling_address_input()
1553 {
1554   return true;
1555 }
1556 
1557 //=============================================================================
1558 
1559 #ifndef PRODUCT
1560 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1561   st->print("BREAKPOINT");
1562 }
1563 #endif
1564 
1565 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1566   C2_MacroAssembler _masm(&cbuf);
1567   __ brk(0);
1568 }
1569 
1570 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1571   return MachNode::size(ra_);
1572 }
1573 
1574 //=============================================================================
1575 
1576 #ifndef PRODUCT
1577   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1578     st->print("nop \t# %d bytes pad for loops and calls", _count);
1579   }
1580 #endif
1581 
1582   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1583     C2_MacroAssembler _masm(&cbuf);
1584     for (int i = 0; i < _count; i++) {
1585       __ nop();
1586     }
1587   }
1588 
1589   uint MachNopNode::size(PhaseRegAlloc*) const {
1590     return _count * NativeInstruction::instruction_size;
1591   }
1592 
1593 //=============================================================================
1594 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1595 
1596 int ConstantTable::calculate_table_base_offset() const {
1597   return 0;  // absolute addressing, no offset
1598 }
1599 
1600 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1601 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1602   ShouldNotReachHere();
1603 }
1604 
1605 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1606   // Empty encoding
1607 }
1608 
1609 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1610   return 0;
1611 }
1612 
1613 #ifndef PRODUCT
1614 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1615   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1616 }
1617 #endif
1618 
1619 #ifndef PRODUCT
1620 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1621   Compile* C = ra_->C;
1622 
1623   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1624 
1625   if (C->output()->need_stack_bang(framesize))
1626     st->print("# stack bang size=%d\n\t", framesize);
1627 
1628   if (framesize < ((1 << 9) + 2 * wordSize)) {
1629     st->print("sub  sp, sp, #%d\n\t", framesize);
1630     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1631     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1632   } else {
1633     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1634     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1635     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1636     st->print("sub  sp, sp, rscratch1");
1637   }
1638   if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
1639     st->print("\n\t");
1640     st->print("ldr  rscratch1, [guard]\n\t");
1641     st->print("dmb ishld\n\t");
1642     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
1643     st->print("cmp  rscratch1, rscratch2\n\t");
1644     st->print("b.eq skip");
1645     st->print("\n\t");
1646     st->print("blr #nmethod_entry_barrier_stub\n\t");
1647     st->print("b skip\n\t");
1648     st->print("guard: int\n\t");
1649     st->print("\n\t");
1650     st->print("skip:\n\t");
1651   }
1652 }
1653 #endif
1654 
1655 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1656   Compile* C = ra_->C;
1657   C2_MacroAssembler _masm(&cbuf);
1658 
1659   // n.b. frame size includes space for return pc and rfp
1660   const long framesize = C->output()->frame_size_in_bytes();
1661   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1662 
1663   // insert a nop at the start of the prolog so we can patch in a
1664   // branch if we need to invalidate the method later
1665   __ nop();
1666 
1667   if (C->clinit_barrier_on_entry()) {
1668     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
1669 
1670     Label L_skip_barrier;
1671 
1672     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
1673     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1674     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1675     __ bind(L_skip_barrier);
1676   }
1677 
1678   int bangsize = C->output()->bang_size_in_bytes();
1679   if (C->output()->need_stack_bang(bangsize) && UseStackBanging)
1680     __ generate_stack_overflow_check(bangsize);
1681 
1682   __ build_frame(framesize);
1683 
1684   if (C->stub_function() == NULL) {
1685     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1686     bs->nmethod_entry_barrier(&_masm);
1687   }
1688 
1689   if (VerifyStackAtCalls) {
1690     Unimplemented();
1691   }
1692 
1693   C->output()->set_frame_complete(cbuf.insts_size());
1694 
1695   if (C->has_mach_constant_base_node()) {
1696     // NOTE: We set the table base offset here because users might be
1697     // emitted before MachConstantBaseNode.
1698     ConstantTable& constant_table = C->output()->constant_table();
1699     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1700   }
1701 }
1702 
1703 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1704 {
1705   return MachNode::size(ra_); // too many variables; just compute it
1706                               // the hard way
1707 }
1708 
1709 int MachPrologNode::reloc() const
1710 {
1711   return 0;
1712 }
1713 
1714 //=============================================================================
1715 
1716 #ifndef PRODUCT
1717 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1718   Compile* C = ra_->C;
1719   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1720 
1721   st->print("# pop frame %d\n\t",framesize);
1722 
1723   if (framesize == 0) {
1724     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1725   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1726     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1727     st->print("add  sp, sp, #%d\n\t", framesize);
1728   } else {
1729     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1730     st->print("add  sp, sp, rscratch1\n\t");
1731     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1732   }
1733 
1734   if (do_polling() && C->is_method_compilation()) {
1735     st->print("# touch polling page\n\t");
1736     st->print("ldr rscratch1, [rthread],#polling_page_offset\n\t");
1737     st->print("ldr zr, [rscratch1]");
1738   }
1739 }
1740 #endif
1741 
1742 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1743   Compile* C = ra_->C;
1744   C2_MacroAssembler _masm(&cbuf);
1745   int framesize = C->output()->frame_slots() << LogBytesPerInt;
1746 
1747   __ remove_frame(framesize);
1748 
1749   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1750     __ reserved_stack_check();
1751   }
1752 
1753   if (do_polling() && C->is_method_compilation()) {
1754     __ fetch_and_read_polling_page(rscratch1, relocInfo::poll_return_type);
1755   }
1756 }
1757 
1758 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1759   // Variable size. Determine dynamically.
1760   return MachNode::size(ra_);
1761 }
1762 
1763 int MachEpilogNode::reloc() const {
1764   // Return number of relocatable values contained in this instruction.
1765   return 1; // 1 for polling page.
1766 }
1767 
1768 const Pipeline * MachEpilogNode::pipeline() const {
1769   return MachNode::pipeline_class();
1770 }
1771 
1772 //=============================================================================
1773 
1774 // Figure out which register class each belongs in: rc_int, rc_float or
1775 // rc_stack.
1776 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1777 
1778 static enum RC rc_class(OptoReg::Name reg) {
1779 
1780   if (reg == OptoReg::Bad) {
1781     return rc_bad;
1782   }
1783 
1784   // we have 30 int registers * 2 halves
1785   // (rscratch1 and rscratch2 are omitted)
1786   int slots_of_int_registers = RegisterImpl::max_slots_per_register * (RegisterImpl::number_of_registers - 2);
1787 
1788   if (reg < slots_of_int_registers) {
1789     return rc_int;
1790   }
1791 
1792   // we have 32 float register * 4 halves
1793   if (reg < slots_of_int_registers + FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers) {
1794     return rc_float;
1795   }
1796 
1797   // Between float regs & stack is the flags regs.
1798   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1799 
1800   return rc_stack;
1801 }
1802 
1803 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1804   Compile* C = ra_->C;
1805 
1806   // Get registers to move.
1807   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1808   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1809   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1810   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1811 
1812   enum RC src_hi_rc = rc_class(src_hi);
1813   enum RC src_lo_rc = rc_class(src_lo);
1814   enum RC dst_hi_rc = rc_class(dst_hi);
1815   enum RC dst_lo_rc = rc_class(dst_lo);
1816 
1817   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1818 
1819   if (src_hi != OptoReg::Bad) {
1820     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1821            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1822            "expected aligned-adjacent pairs");
1823   }
1824 
1825   if (src_lo == dst_lo && src_hi == dst_hi) {
1826     return 0;            // Self copy, no move.
1827   }
1828 
1829   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1830               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1831   int src_offset = ra_->reg2offset(src_lo);
1832   int dst_offset = ra_->reg2offset(dst_lo);
1833 
1834   if (bottom_type()->isa_vect() != NULL) {
1835     uint ireg = ideal_reg();
1836     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1837     if (cbuf) {
1838       C2_MacroAssembler _masm(cbuf);
1839       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1840       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1841         // stack->stack
1842         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1843         if (ireg == Op_VecD) {
1844           __ unspill(rscratch1, true, src_offset);
1845           __ spill(rscratch1, true, dst_offset);
1846         } else {
1847           __ spill_copy128(src_offset, dst_offset);
1848         }
1849       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1850         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1851                ireg == Op_VecD ? __ T8B : __ T16B,
1852                as_FloatRegister(Matcher::_regEncode[src_lo]));
1853       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1854         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1855                        ireg == Op_VecD ? __ D : __ Q,
1856                        ra_->reg2offset(dst_lo));
1857       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1858         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1859                        ireg == Op_VecD ? __ D : __ Q,
1860                        ra_->reg2offset(src_lo));
1861       } else {
1862         ShouldNotReachHere();
1863       }
1864     }
1865   } else if (cbuf) {
1866     C2_MacroAssembler _masm(cbuf);
1867     switch (src_lo_rc) {
1868     case rc_int:
1869       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1870         if (is64) {
1871             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1872                    as_Register(Matcher::_regEncode[src_lo]));
1873         } else {
1874             C2_MacroAssembler _masm(cbuf);
1875             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1876                     as_Register(Matcher::_regEncode[src_lo]));
1877         }
1878       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1879         if (is64) {
1880             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1881                      as_Register(Matcher::_regEncode[src_lo]));
1882         } else {
1883             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1884                      as_Register(Matcher::_regEncode[src_lo]));
1885         }
1886       } else {                    // gpr --> stack spill
1887         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1888         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1889       }
1890       break;
1891     case rc_float:
1892       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1893         if (is64) {
1894             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1895                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1896         } else {
1897             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1898                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1899         }
1900       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1901           if (cbuf) {
1902             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1903                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1904         } else {
1905             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1906                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1907         }
1908       } else {                    // fpr --> stack spill
1909         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1910         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1911                  is64 ? __ D : __ S, dst_offset);
1912       }
1913       break;
1914     case rc_stack:
1915       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1916         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1917       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1918         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1919                    is64 ? __ D : __ S, src_offset);
1920       } else {                    // stack --> stack copy
1921         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1922         __ unspill(rscratch1, is64, src_offset);
1923         __ spill(rscratch1, is64, dst_offset);
1924       }
1925       break;
1926     default:
1927       assert(false, "bad rc_class for spill");
1928       ShouldNotReachHere();
1929     }
1930   }
1931 
1932   if (st) {
1933     st->print("spill ");
1934     if (src_lo_rc == rc_stack) {
1935       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1936     } else {
1937       st->print("%s -> ", Matcher::regName[src_lo]);
1938     }
1939     if (dst_lo_rc == rc_stack) {
1940       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1941     } else {
1942       st->print("%s", Matcher::regName[dst_lo]);
1943     }
1944     if (bottom_type()->isa_vect() != NULL) {
1945       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1946     } else {
1947       st->print("\t# spill size = %d", is64 ? 64:32);
1948     }
1949   }
1950 
1951   return 0;
1952 
1953 }
1954 
1955 #ifndef PRODUCT
1956 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1957   if (!ra_)
1958     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1959   else
1960     implementation(NULL, ra_, false, st);
1961 }
1962 #endif
1963 
1964 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1965   implementation(&cbuf, ra_, false, NULL);
1966 }
1967 
1968 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1969   return MachNode::size(ra_);
1970 }
1971 
1972 //=============================================================================
1973 
1974 #ifndef PRODUCT
1975 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1976   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1977   int reg = ra_->get_reg_first(this);
1978   st->print("add %s, rsp, #%d]\t# box lock",
1979             Matcher::regName[reg], offset);
1980 }
1981 #endif
1982 
1983 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1984   C2_MacroAssembler _masm(&cbuf);
1985 
1986   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1987   int reg    = ra_->get_encode(this);
1988 
1989   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1990     __ add(as_Register(reg), sp, offset);
1991   } else {
1992     ShouldNotReachHere();
1993   }
1994 }
1995 
1996 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1997   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1998   return 4;
1999 }
2000 
2001 //=============================================================================
2002 
2003 #ifndef PRODUCT
2004 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2005 {
2006   st->print_cr("# MachUEPNode");
2007   if (UseCompressedClassPointers) {
2008     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2009     if (CompressedKlassPointers::shift() != 0) {
2010       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
2011     }
2012   } else {
2013    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2014   }
2015   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
2016   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2017 }
2018 #endif
2019 
2020 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2021 {
2022   // This is the unverified entry point.
2023   C2_MacroAssembler _masm(&cbuf);
2024 
2025   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2026   Label skip;
2027   // TODO
2028   // can we avoid this skip and still use a reloc?
2029   __ br(Assembler::EQ, skip);
2030   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2031   __ bind(skip);
2032 }
2033 
2034 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2035 {
2036   return MachNode::size(ra_);
2037 }
2038 
2039 // REQUIRED EMIT CODE
2040 
2041 //=============================================================================
2042 
2043 // Emit exception handler code.
2044 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2045 {
2046   // mov rscratch1 #exception_blob_entry_point
2047   // br rscratch1
2048   // Note that the code buffer's insts_mark is always relative to insts.
2049   // That's why we must use the macroassembler to generate a handler.
2050   C2_MacroAssembler _masm(&cbuf);
2051   address base = __ start_a_stub(size_exception_handler());
2052   if (base == NULL) {
2053     ciEnv::current()->record_failure("CodeCache is full");
2054     return 0;  // CodeBuffer::expand failed
2055   }
2056   int offset = __ offset();
2057   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2058   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2059   __ end_a_stub();
2060   return offset;
2061 }
2062 
2063 // Emit deopt handler code.
2064 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2065 {
2066   // Note that the code buffer's insts_mark is always relative to insts.
2067   // That's why we must use the macroassembler to generate a handler.
2068   C2_MacroAssembler _masm(&cbuf);
2069   address base = __ start_a_stub(size_deopt_handler());
2070   if (base == NULL) {
2071     ciEnv::current()->record_failure("CodeCache is full");
2072     return 0;  // CodeBuffer::expand failed
2073   }
2074   int offset = __ offset();
2075 
2076   __ adr(lr, __ pc());
2077   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2078 
2079   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2080   __ end_a_stub();
2081   return offset;
2082 }
2083 
2084 // REQUIRED MATCHER CODE
2085 
2086 //=============================================================================
2087 
2088 const bool Matcher::match_rule_supported(int opcode) {
2089   if (!has_match_rule(opcode))
2090     return false;
2091 
2092   bool ret_value = true;
2093   switch (opcode) {
2094     case Op_CacheWB:
2095     case Op_CacheWBPreSync:
2096     case Op_CacheWBPostSync:
2097       if (!VM_Version::supports_data_cache_line_flush()) {
2098         ret_value = false;
2099       }
2100       break;
2101   }
2102 
2103   return ret_value; // Per default match rules are supported.
2104 }
2105 
2106 // Identify extra cases that we might want to provide match rules for vector nodes and
2107 // other intrinsics guarded with vector length (vlen) and element type (bt).
2108 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
2109   if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
2110     return false;
2111   }
2112 
2113   // Special cases which require vector length
2114   switch (opcode) {
2115     case Op_MulAddVS2VI: {
2116       if (vlen != 4) {
2117         return false;
2118       }
2119       break;
2120     }
2121     case Op_VectorLoadShuffle:
2122     case Op_VectorRearrange:
2123       if (vlen < 4) {
2124         return false;
2125       }
2126       break;
2127   }
2128 
2129   return true; // Per default match rules are supported.
2130 }
2131 
2132 const bool Matcher::has_predicated_vectors(void) {
2133   return false;
2134 }
2135 
2136 bool Matcher::supports_vector_variable_shifts(void) {
2137   return true;
2138 }
2139 
2140 const int Matcher::float_pressure(int default_pressure_threshold) {
2141   return default_pressure_threshold;
2142 }
2143 
2144 int Matcher::regnum_to_fpu_offset(int regnum)
2145 {
2146   Unimplemented();
2147   return 0;
2148 }
2149 
2150 // Is this branch offset short enough that a short branch can be used?
2151 //
2152 // NOTE: If the platform does not provide any short branch variants, then
2153 //       this method should return false for offset 0.
2154 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2155   // The passed offset is relative to address of the branch.
2156 
2157   return (-32768 <= offset && offset < 32768);
2158 }
2159 
2160 const bool Matcher::isSimpleConstant64(jlong value) {
2161   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2162   // Probably always true, even if a temp register is required.
2163   return true;
2164 }
2165 
2166 // true just means we have fast l2f conversion
2167 const bool Matcher::convL2FSupported(void) {
2168   return true;
2169 }
2170 
2171 // Vector width in bytes.
2172 const int Matcher::vector_width_in_bytes(BasicType bt) {
2173   int size = MIN2(16,(int)MaxVectorSize);
2174   // Minimum 2 values in vector
2175   if (size < 2*type2aelembytes(bt)) size = 0;
2176   // But never < 4
2177   if (size < 4) size = 0;
2178   return size;
2179 }
2180 
2181 // Limits on vector size (number of elements) loaded into vector.
2182 const int Matcher::max_vector_size(const BasicType bt) {
2183   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2184 }
2185 const int Matcher::min_vector_size(const BasicType bt) {
2186   int max_size = max_vector_size(bt);
2187   // Limit the vector size to 8 bytes
2188   int size = 8 / type2aelembytes(bt);
2189   if (bt == T_BYTE) {
2190     // To support vector api shuffle/rearrange.
2191     size = 4;
2192   } else if (bt == T_BOOLEAN) {
2193     // To support vector api load/store mask.
2194     size = 2;
2195   }
2196   if (size < 2) size = 2;
2197   return MIN2(size,max_size);
2198 }
2199 
2200 // Vector ideal reg.
2201 const uint Matcher::vector_ideal_reg(int len) {
2202   switch(len) {
2203     // For 16-bit/32-bit mask vector, reuse VecD.
2204     case  2:
2205     case  4:
2206     case  8: return Op_VecD;
2207     case 16: return Op_VecX;
2208   }
2209   ShouldNotReachHere();
2210   return 0;
2211 }
2212 
2213 // AES support not yet implemented
2214 const bool Matcher::pass_original_key_for_aes() {
2215   return false;
2216 }
2217 
2218 // aarch64 supports misaligned vectors store/load.
2219 const bool Matcher::misaligned_vectors_ok() {
2220   return true;
2221 }
2222 
2223 // false => size gets scaled to BytesPerLong, ok.
2224 const bool Matcher::init_array_count_is_in_bytes = false;
2225 
2226 // Use conditional move (CMOVL)
2227 const int Matcher::long_cmove_cost() {
2228   // long cmoves are no more expensive than int cmoves
2229   return 0;
2230 }
2231 
2232 const int Matcher::float_cmove_cost() {
2233   // float cmoves are no more expensive than int cmoves
2234   return 0;
2235 }
2236 
2237 // Does the CPU require late expand (see block.cpp for description of late expand)?
2238 const bool Matcher::require_postalloc_expand = false;
2239 
2240 // Do we need to mask the count passed to shift instructions or does
2241 // the cpu only look at the lower 5/6 bits anyway?
2242 const bool Matcher::need_masked_shift_count = false;
2243 
2244 // No support for generic vector operands.
2245 const bool Matcher::supports_generic_vector_operands  = false;
2246 
2247 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
2248   ShouldNotReachHere(); // generic vector operands not supported
2249   return NULL;
2250 }
2251 
2252 bool Matcher::is_generic_reg2reg_move(MachNode* m) {
2253   ShouldNotReachHere();  // generic vector operands not supported
2254   return false;
2255 }
2256 
2257 bool Matcher::is_generic_vector(MachOper* opnd)  {
2258   ShouldNotReachHere();  // generic vector operands not supported
2259   return false;
2260 }
2261 
2262 // This affects two different things:
2263 //  - how Decode nodes are matched
2264 //  - how ImplicitNullCheck opportunities are recognized
2265 // If true, the matcher will try to remove all Decodes and match them
2266 // (as operands) into nodes. NullChecks are not prepared to deal with
2267 // Decodes by final_graph_reshaping().
2268 // If false, final_graph_reshaping() forces the decode behind the Cmp
2269 // for a NullCheck. The matcher matches the Decode node into a register.
2270 // Implicit_null_check optimization moves the Decode along with the
2271 // memory operation back up before the NullCheck.
2272 bool Matcher::narrow_oop_use_complex_address() {
2273   return CompressedOops::shift() == 0;
2274 }
2275 
2276 bool Matcher::narrow_klass_use_complex_address() {
2277 // TODO
2278 // decide whether we need to set this to true
2279   return false;
2280 }
2281 
2282 bool Matcher::const_oop_prefer_decode() {
2283   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2284   return CompressedOops::base() == NULL;
2285 }
2286 
2287 bool Matcher::const_klass_prefer_decode() {
2288   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2289   return CompressedKlassPointers::base() == NULL;
2290 }
2291 
2292 // Is it better to copy float constants, or load them directly from
2293 // memory?  Intel can load a float constant from a direct address,
2294 // requiring no extra registers.  Most RISCs will have to materialize
2295 // an address into a register first, so they would do better to copy
2296 // the constant from stack.
2297 const bool Matcher::rematerialize_float_constants = false;
2298 
2299 // If CPU can load and store mis-aligned doubles directly then no
2300 // fixup is needed.  Else we split the double into 2 integer pieces
2301 // and move it piece-by-piece.  Only happens when passing doubles into
2302 // C code as the Java calling convention forces doubles to be aligned.
2303 const bool Matcher::misaligned_doubles_ok = true;
2304 
2305 // No-op on amd64
2306 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2307   Unimplemented();
2308 }
2309 
2310 // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
2311 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2312 
2313 // Are floats converted to double when stored to stack during
2314 // deoptimization?
2315 bool Matcher::float_in_double() { return false; }
2316 
2317 // Do ints take an entire long register or just half?
2318 // The relevant question is how the int is callee-saved:
2319 // the whole long is written but de-opt'ing will have to extract
2320 // the relevant 32 bits.
2321 const bool Matcher::int_in_long = true;
2322 
2323 // Return whether or not this register is ever used as an argument.
2324 // This function is used on startup to build the trampoline stubs in
2325 // generateOptoStub.  Registers not mentioned will be killed by the VM
2326 // call in the trampoline, and arguments in those registers not be
2327 // available to the callee.
2328 bool Matcher::can_be_java_arg(int reg)
2329 {
2330   return
2331     reg ==  R0_num || reg == R0_H_num ||
2332     reg ==  R1_num || reg == R1_H_num ||
2333     reg ==  R2_num || reg == R2_H_num ||
2334     reg ==  R3_num || reg == R3_H_num ||
2335     reg ==  R4_num || reg == R4_H_num ||
2336     reg ==  R5_num || reg == R5_H_num ||
2337     reg ==  R6_num || reg == R6_H_num ||
2338     reg ==  R7_num || reg == R7_H_num ||
2339     reg ==  V0_num || reg == V0_H_num ||
2340     reg ==  V1_num || reg == V1_H_num ||
2341     reg ==  V2_num || reg == V2_H_num ||
2342     reg ==  V3_num || reg == V3_H_num ||
2343     reg ==  V4_num || reg == V4_H_num ||
2344     reg ==  V5_num || reg == V5_H_num ||
2345     reg ==  V6_num || reg == V6_H_num ||
2346     reg ==  V7_num || reg == V7_H_num;
2347 }
2348 
2349 bool Matcher::is_spillable_arg(int reg)
2350 {
2351   return can_be_java_arg(reg);
2352 }
2353 
2354 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2355   return false;
2356 }
2357 
2358 RegMask Matcher::divI_proj_mask() {
2359   ShouldNotReachHere();
2360   return RegMask();
2361 }
2362 
2363 // Register for MODI projection of divmodI.
2364 RegMask Matcher::modI_proj_mask() {
2365   ShouldNotReachHere();
2366   return RegMask();
2367 }
2368 
2369 // Register for DIVL projection of divmodL.
2370 RegMask Matcher::divL_proj_mask() {
2371   ShouldNotReachHere();
2372   return RegMask();
2373 }
2374 
2375 // Register for MODL projection of divmodL.
2376 RegMask Matcher::modL_proj_mask() {
2377   ShouldNotReachHere();
2378   return RegMask();
2379 }
2380 
2381 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2382   return FP_REG_mask();
2383 }
2384 
2385 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2386   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2387     Node* u = addp->fast_out(i);
2388     if (u->is_Mem()) {
2389       int opsize = u->as_Mem()->memory_size();
2390       assert(opsize > 0, "unexpected memory operand size");
2391       if (u->as_Mem()->memory_size() != (1<<shift)) {
2392         return false;
2393       }
2394     }
2395   }
2396   return true;
2397 }
2398 
2399 const bool Matcher::convi2l_type_required = false;
2400 
2401 // Should the matcher clone input 'm' of node 'n'?
2402 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2403   if (is_vshift_con_pattern(n, m)) { // ShiftV src (ShiftCntV con)
2404     mstack.push(m, Visit);           // m = ShiftCntV
2405     return true;
2406   }
2407   return false;
2408 }
2409 
2410 // Should the Matcher clone shifts on addressing modes, expecting them
2411 // to be subsumed into complex addressing expressions or compute them
2412 // into registers?
2413 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2414   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2415     return true;
2416   }
2417 
2418   Node *off = m->in(AddPNode::Offset);
2419   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2420       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2421       // Are there other uses besides address expressions?
2422       !is_visited(off)) {
2423     address_visited.set(off->_idx); // Flag as address_visited
2424     mstack.push(off->in(2), Visit);
2425     Node *conv = off->in(1);
2426     if (conv->Opcode() == Op_ConvI2L &&
2427         // Are there other uses besides address expressions?
2428         !is_visited(conv)) {
2429       address_visited.set(conv->_idx); // Flag as address_visited
2430       mstack.push(conv->in(1), Pre_Visit);
2431     } else {
2432       mstack.push(conv, Pre_Visit);
2433     }
2434     address_visited.test_set(m->_idx); // Flag as address_visited
2435     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2436     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2437     return true;
2438   } else if (off->Opcode() == Op_ConvI2L &&
2439              // Are there other uses besides address expressions?
2440              !is_visited(off)) {
2441     address_visited.test_set(m->_idx); // Flag as address_visited
2442     address_visited.set(off->_idx); // Flag as address_visited
2443     mstack.push(off->in(1), Pre_Visit);
2444     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2445     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2446     return true;
2447   }
2448   return false;
2449 }
2450 
2451 void Compile::reshape_address(AddPNode* addp) {
2452 }
2453 
2454 
2455 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2456   C2_MacroAssembler _masm(&cbuf);                                       \
2457   {                                                                     \
2458     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2459     guarantee(DISP == 0, "mode not permitted for volatile");            \
2460     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2461     __ INSN(REG, as_Register(BASE));                                    \
2462   }
2463 
2464 
2465 static Address mem2address(int opcode, Register base, int index, int size, int disp)
2466   {
2467     Address::extend scale;
2468 
2469     // Hooboy, this is fugly.  We need a way to communicate to the
2470     // encoder that the index needs to be sign extended, so we have to
2471     // enumerate all the cases.
2472     switch (opcode) {
2473     case INDINDEXSCALEDI2L:
2474     case INDINDEXSCALEDI2LN:
2475     case INDINDEXI2L:
2476     case INDINDEXI2LN:
2477       scale = Address::sxtw(size);
2478       break;
2479     default:
2480       scale = Address::lsl(size);
2481     }
2482 
2483     if (index == -1) {
2484       return Address(base, disp);
2485     } else {
2486       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2487       return Address(base, as_Register(index), scale);
2488     }
2489   }
2490 
2491 
2492 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2493 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
2494 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2495 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2496                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2497 
2498   // Used for all non-volatile memory accesses.  The use of
2499   // $mem->opcode() to discover whether this pattern uses sign-extended
2500   // offsets is something of a kludge.
2501   static void loadStore(C2_MacroAssembler masm, mem_insn insn,
2502                         Register reg, int opcode,
2503                         Register base, int index, int scale, int disp,
2504                         int size_in_memory)
2505   {
2506     Address addr = mem2address(opcode, base, index, scale, disp);
2507     if (addr.getMode() == Address::base_plus_offset) {
2508       /* If we get an out-of-range offset it is a bug in the compiler,
2509          so we assert here. */
2510       assert(Address::offset_ok_for_immed(addr.offset(), exact_log2(size_in_memory)),
2511              "c2 compiler bug");
2512       /* Fix up any out-of-range offsets. */
2513       assert_different_registers(rscratch1, base);
2514       assert_different_registers(rscratch1, reg);
2515       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
2516     }
2517     (masm.*insn)(reg, addr);
2518   }
2519 
2520   static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
2521                         FloatRegister reg, int opcode,
2522                         Register base, int index, int size, int disp,
2523                         int size_in_memory)
2524   {
2525     Address::extend scale;
2526 
2527     switch (opcode) {
2528     case INDINDEXSCALEDI2L:
2529     case INDINDEXSCALEDI2LN:
2530       scale = Address::sxtw(size);
2531       break;
2532     default:
2533       scale = Address::lsl(size);
2534     }
2535 
2536     if (index == -1) {
2537       /* If we get an out-of-range offset it is a bug in the compiler,
2538          so we assert here. */
2539       assert(Address::offset_ok_for_immed(disp, exact_log2(size_in_memory)), "c2 compiler bug");
2540       /* Fix up any out-of-range offsets. */
2541       assert_different_registers(rscratch1, base);
2542       Address addr = Address(base, disp);
2543       addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
2544       (masm.*insn)(reg, addr);
2545     } else {
2546       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2547       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2548     }
2549   }
2550 
2551   static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
2552                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2553                         int opcode, Register base, int index, int size, int disp)
2554   {
2555     if (index == -1) {
2556       (masm.*insn)(reg, T, Address(base, disp));
2557     } else {
2558       assert(disp == 0, "unsupported address mode");
2559       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2560     }
2561   }
2562 
2563 %}
2564 
2565 
2566 
2567 //----------ENCODING BLOCK-----------------------------------------------------
2568 // This block specifies the encoding classes used by the compiler to
2569 // output byte streams.  Encoding classes are parameterized macros
2570 // used by Machine Instruction Nodes in order to generate the bit
2571 // encoding of the instruction.  Operands specify their base encoding
2572 // interface with the interface keyword.  There are currently
2573 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2574 // COND_INTER.  REG_INTER causes an operand to generate a function
2575 // which returns its register number when queried.  CONST_INTER causes
2576 // an operand to generate a function which returns the value of the
2577 // constant when queried.  MEMORY_INTER causes an operand to generate
2578 // four functions which return the Base Register, the Index Register,
2579 // the Scale Value, and the Offset Value of the operand when queried.
2580 // COND_INTER causes an operand to generate six functions which return
2581 // the encoding code (ie - encoding bits for the instruction)
2582 // associated with each basic boolean condition for a conditional
2583 // instruction.
2584 //
2585 // Instructions specify two basic values for encoding.  Again, a
2586 // function is available to check if the constant displacement is an
2587 // oop. They use the ins_encode keyword to specify their encoding
2588 // classes (which must be a sequence of enc_class names, and their
2589 // parameters, specified in the encoding block), and they use the
2590 // opcode keyword to specify, in order, their primary, secondary, and
2591 // tertiary opcode.  Only the opcode sections which a particular
2592 // instruction needs for encoding need to be specified.
2593 encode %{
2594   // Build emit functions for each basic byte or larger field in the
2595   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2596   // from C++ code in the enc_class source block.  Emit functions will
2597   // live in the main source block for now.  In future, we can
2598   // generalize this by adding a syntax that specifies the sizes of
2599   // fields in an order, so that the adlc can build the emit functions
2600   // automagically
2601 
2602   // catch all for unimplemented encodings
2603   enc_class enc_unimplemented %{
2604     C2_MacroAssembler _masm(&cbuf);
2605     __ unimplemented("C2 catch all");
2606   %}
2607 
2608   // BEGIN Non-volatile memory access
2609 
2610   // This encoding class is generated automatically from ad_encode.m4.
2611   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2612   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
2613     Register dst_reg = as_Register($dst$$reg);
2614     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2615                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2616   %}
2617 
2618   // This encoding class is generated automatically from ad_encode.m4.
2619   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2620   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
2621     Register dst_reg = as_Register($dst$$reg);
2622     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2623                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2624   %}
2625 
2626   // This encoding class is generated automatically from ad_encode.m4.
2627   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2628   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
2629     Register dst_reg = as_Register($dst$$reg);
2630     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2631                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2632   %}
2633 
2634   // This encoding class is generated automatically from ad_encode.m4.
2635   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2636   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
2637     Register dst_reg = as_Register($dst$$reg);
2638     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2639                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2640   %}
2641 
2642   // This encoding class is generated automatically from ad_encode.m4.
2643   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2644   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
2645     Register dst_reg = as_Register($dst$$reg);
2646     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2647                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2648   %}
2649 
2650   // This encoding class is generated automatically from ad_encode.m4.
2651   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2652   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
2653     Register dst_reg = as_Register($dst$$reg);
2654     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2655                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2656   %}
2657 
2658   // This encoding class is generated automatically from ad_encode.m4.
2659   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2660   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
2661     Register dst_reg = as_Register($dst$$reg);
2662     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2663                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2664   %}
2665 
2666   // This encoding class is generated automatically from ad_encode.m4.
2667   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2668   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
2669     Register dst_reg = as_Register($dst$$reg);
2670     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2671                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2672   %}
2673 
2674   // This encoding class is generated automatically from ad_encode.m4.
2675   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2676   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
2677     Register dst_reg = as_Register($dst$$reg);
2678     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2679                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2680   %}
2681 
2682   // This encoding class is generated automatically from ad_encode.m4.
2683   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2684   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
2685     Register dst_reg = as_Register($dst$$reg);
2686     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2687                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2688   %}
2689 
2690   // This encoding class is generated automatically from ad_encode.m4.
2691   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2692   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
2693     Register dst_reg = as_Register($dst$$reg);
2694     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2695                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2696   %}
2697 
2698   // This encoding class is generated automatically from ad_encode.m4.
2699   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2700   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
2701     Register dst_reg = as_Register($dst$$reg);
2702     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2703                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2704   %}
2705 
2706   // This encoding class is generated automatically from ad_encode.m4.
2707   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2708   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
2709     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2710     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2711                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2712   %}
2713 
2714   // This encoding class is generated automatically from ad_encode.m4.
2715   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2716   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
2717     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2718     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2719                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2720   %}
2721 
2722   // This encoding class is generated automatically from ad_encode.m4.
2723   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2724   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
2725     Register src_reg = as_Register($src$$reg);
2726     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2727                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2728   %}
2729 
2730   // This encoding class is generated automatically from ad_encode.m4.
2731   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2732   enc_class aarch64_enc_strb0(memory1 mem) %{
2733     C2_MacroAssembler _masm(&cbuf);
2734     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2735                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2736   %}
2737 
2738   // This encoding class is generated automatically from ad_encode.m4.
2739   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2740   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
2741     Register src_reg = as_Register($src$$reg);
2742     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2743                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2744   %}
2745 
2746   // This encoding class is generated automatically from ad_encode.m4.
2747   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2748   enc_class aarch64_enc_strh0(memory2 mem) %{
2749     C2_MacroAssembler _masm(&cbuf);
2750     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2751                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
2752   %}
2753 
2754   // This encoding class is generated automatically from ad_encode.m4.
2755   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2756   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
2757     Register src_reg = as_Register($src$$reg);
2758     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2759                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2760   %}
2761 
2762   // This encoding class is generated automatically from ad_encode.m4.
2763   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2764   enc_class aarch64_enc_strw0(memory4 mem) %{
2765     C2_MacroAssembler _masm(&cbuf);
2766     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2767                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2768   %}
2769 
2770   // This encoding class is generated automatically from ad_encode.m4.
2771   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2772   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
2773     Register src_reg = as_Register($src$$reg);
2774     // we sometimes get asked to store the stack pointer into the
2775     // current thread -- we cannot do that directly on AArch64
2776     if (src_reg == r31_sp) {
2777       C2_MacroAssembler _masm(&cbuf);
2778       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2779       __ mov(rscratch2, sp);
2780       src_reg = rscratch2;
2781     }
2782     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2783                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2784   %}
2785 
2786   // This encoding class is generated automatically from ad_encode.m4.
2787   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2788   enc_class aarch64_enc_str0(memory8 mem) %{
2789     C2_MacroAssembler _masm(&cbuf);
2790     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2791                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2792   %}
2793 
2794   // This encoding class is generated automatically from ad_encode.m4.
2795   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2796   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
2797     FloatRegister src_reg = as_FloatRegister($src$$reg);
2798     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2799                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2800   %}
2801 
2802   // This encoding class is generated automatically from ad_encode.m4.
2803   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2804   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
2805     FloatRegister src_reg = as_FloatRegister($src$$reg);
2806     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2807                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
2808   %}
2809 
2810   // This encoding class is generated automatically from ad_encode.m4.
2811   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2812   enc_class aarch64_enc_strw_immn(immN src, memory1 mem) %{
2813     C2_MacroAssembler _masm(&cbuf);
2814     address con = (address)$src$$constant;
2815     // need to do this the hard way until we can manage relocs
2816     // for 32 bit constants
2817     __ movoop(rscratch2, (jobject)con);
2818     if (con) __ encode_heap_oop_not_null(rscratch2);
2819     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
2820                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2821   %}
2822 
2823   // This encoding class is generated automatically from ad_encode.m4.
2824   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2825   enc_class aarch64_enc_strw_immnk(immN src, memory4 mem) %{
2826     C2_MacroAssembler _masm(&cbuf);
2827     address con = (address)$src$$constant;
2828     // need to do this the hard way until we can manage relocs
2829     // for 32 bit constants
2830     __ movoop(rscratch2, (jobject)con);
2831     __ encode_klass_not_null(rscratch2);
2832     loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
2833                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
2834   %}
2835 
2836   // This encoding class is generated automatically from ad_encode.m4.
2837   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
2838   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
2839       C2_MacroAssembler _masm(&cbuf);
2840       __ membar(Assembler::StoreStore);
2841       loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2842                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
2843   %}
2844 
2845   // END Non-volatile memory access
2846 
2847   // Vector loads and stores
2848   enc_class aarch64_enc_ldrvH(vecD dst, memory mem) %{
2849     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2850     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
2851        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2852   %}
2853 
2854   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2855     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2856     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2857        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2858   %}
2859 
2860   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2861     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2862     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2863        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2864   %}
2865 
2866   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2867     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2868     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2869        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2870   %}
2871 
2872   enc_class aarch64_enc_strvH(vecD src, memory mem) %{
2873     FloatRegister src_reg = as_FloatRegister($src$$reg);
2874     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::H,
2875        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2876   %}
2877 
2878   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2879     FloatRegister src_reg = as_FloatRegister($src$$reg);
2880     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2881        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2882   %}
2883 
2884   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2885     FloatRegister src_reg = as_FloatRegister($src$$reg);
2886     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2887        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2888   %}
2889 
2890   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2891     FloatRegister src_reg = as_FloatRegister($src$$reg);
2892     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2893        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2894   %}
2895 
2896   // volatile loads and stores
2897 
2898   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2899     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2900                  rscratch1, stlrb);
2901   %}
2902 
2903   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2904     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2905                  rscratch1, stlrh);
2906   %}
2907 
2908   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2909     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2910                  rscratch1, stlrw);
2911   %}
2912 
2913 
2914   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2915     Register dst_reg = as_Register($dst$$reg);
2916     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2917              rscratch1, ldarb);
2918     __ sxtbw(dst_reg, dst_reg);
2919   %}
2920 
2921   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2922     Register dst_reg = as_Register($dst$$reg);
2923     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2924              rscratch1, ldarb);
2925     __ sxtb(dst_reg, dst_reg);
2926   %}
2927 
2928   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2929     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2930              rscratch1, ldarb);
2931   %}
2932 
2933   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2934     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2935              rscratch1, ldarb);
2936   %}
2937 
2938   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2939     Register dst_reg = as_Register($dst$$reg);
2940     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2941              rscratch1, ldarh);
2942     __ sxthw(dst_reg, dst_reg);
2943   %}
2944 
2945   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2946     Register dst_reg = as_Register($dst$$reg);
2947     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2948              rscratch1, ldarh);
2949     __ sxth(dst_reg, dst_reg);
2950   %}
2951 
2952   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2953     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2954              rscratch1, ldarh);
2955   %}
2956 
2957   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2958     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2959              rscratch1, ldarh);
2960   %}
2961 
2962   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2963     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2964              rscratch1, ldarw);
2965   %}
2966 
2967   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2968     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2969              rscratch1, ldarw);
2970   %}
2971 
2972   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2973     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2974              rscratch1, ldar);
2975   %}
2976 
2977   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2978     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2979              rscratch1, ldarw);
2980     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2981   %}
2982 
2983   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2984     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2985              rscratch1, ldar);
2986     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2987   %}
2988 
2989   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2990     Register src_reg = as_Register($src$$reg);
2991     // we sometimes get asked to store the stack pointer into the
2992     // current thread -- we cannot do that directly on AArch64
2993     if (src_reg == r31_sp) {
2994       C2_MacroAssembler _masm(&cbuf);
2995       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2996       __ mov(rscratch2, sp);
2997       src_reg = rscratch2;
2998     }
2999     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3000                  rscratch1, stlr);
3001   %}
3002 
3003   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
3004     {
3005       C2_MacroAssembler _masm(&cbuf);
3006       FloatRegister src_reg = as_FloatRegister($src$$reg);
3007       __ fmovs(rscratch2, src_reg);
3008     }
3009     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3010                  rscratch1, stlrw);
3011   %}
3012 
3013   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
3014     {
3015       C2_MacroAssembler _masm(&cbuf);
3016       FloatRegister src_reg = as_FloatRegister($src$$reg);
3017       __ fmovd(rscratch2, src_reg);
3018     }
3019     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3020                  rscratch1, stlr);
3021   %}
3022 
3023   // synchronized read/update encodings
3024 
3025   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
3026     C2_MacroAssembler _masm(&cbuf);
3027     Register dst_reg = as_Register($dst$$reg);
3028     Register base = as_Register($mem$$base);
3029     int index = $mem$$index;
3030     int scale = $mem$$scale;
3031     int disp = $mem$$disp;
3032     if (index == -1) {
3033        if (disp != 0) {
3034         __ lea(rscratch1, Address(base, disp));
3035         __ ldaxr(dst_reg, rscratch1);
3036       } else {
3037         // TODO
3038         // should we ever get anything other than this case?
3039         __ ldaxr(dst_reg, base);
3040       }
3041     } else {
3042       Register index_reg = as_Register(index);
3043       if (disp == 0) {
3044         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
3045         __ ldaxr(dst_reg, rscratch1);
3046       } else {
3047         __ lea(rscratch1, Address(base, disp));
3048         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
3049         __ ldaxr(dst_reg, rscratch1);
3050       }
3051     }
3052   %}
3053 
3054   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
3055     C2_MacroAssembler _masm(&cbuf);
3056     Register src_reg = as_Register($src$$reg);
3057     Register base = as_Register($mem$$base);
3058     int index = $mem$$index;
3059     int scale = $mem$$scale;
3060     int disp = $mem$$disp;
3061     if (index == -1) {
3062        if (disp != 0) {
3063         __ lea(rscratch2, Address(base, disp));
3064         __ stlxr(rscratch1, src_reg, rscratch2);
3065       } else {
3066         // TODO
3067         // should we ever get anything other than this case?
3068         __ stlxr(rscratch1, src_reg, base);
3069       }
3070     } else {
3071       Register index_reg = as_Register(index);
3072       if (disp == 0) {
3073         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3074         __ stlxr(rscratch1, src_reg, rscratch2);
3075       } else {
3076         __ lea(rscratch2, Address(base, disp));
3077         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3078         __ stlxr(rscratch1, src_reg, rscratch2);
3079       }
3080     }
3081     __ cmpw(rscratch1, zr);
3082   %}
3083 
3084   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3085     C2_MacroAssembler _masm(&cbuf);
3086     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3087     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3088                Assembler::xword, /*acquire*/ false, /*release*/ true,
3089                /*weak*/ false, noreg);
3090   %}
3091 
3092   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3093     C2_MacroAssembler _masm(&cbuf);
3094     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3095     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3096                Assembler::word, /*acquire*/ false, /*release*/ true,
3097                /*weak*/ false, noreg);
3098   %}
3099 
3100   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3101     C2_MacroAssembler _masm(&cbuf);
3102     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3103     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3104                Assembler::halfword, /*acquire*/ false, /*release*/ true,
3105                /*weak*/ false, noreg);
3106   %}
3107 
3108   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3109     C2_MacroAssembler _masm(&cbuf);
3110     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3111     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3112                Assembler::byte, /*acquire*/ false, /*release*/ true,
3113                /*weak*/ false, noreg);
3114   %}
3115 
3116 
3117   // The only difference between aarch64_enc_cmpxchg and
3118   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
3119   // CompareAndSwap sequence to serve as a barrier on acquiring a
3120   // lock.
3121   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3122     C2_MacroAssembler _masm(&cbuf);
3123     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3124     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3125                Assembler::xword, /*acquire*/ true, /*release*/ true,
3126                /*weak*/ false, noreg);
3127   %}
3128 
3129   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3130     C2_MacroAssembler _masm(&cbuf);
3131     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3132     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3133                Assembler::word, /*acquire*/ true, /*release*/ true,
3134                /*weak*/ false, noreg);
3135   %}
3136 
3137   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3138     C2_MacroAssembler _masm(&cbuf);
3139     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3140     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3141                Assembler::halfword, /*acquire*/ true, /*release*/ true,
3142                /*weak*/ false, noreg);
3143   %}
3144 
3145   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3146     C2_MacroAssembler _masm(&cbuf);
3147     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3148     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3149                Assembler::byte, /*acquire*/ true, /*release*/ true,
3150                /*weak*/ false, noreg);
3151   %}
3152 
3153   // auxiliary used for CompareAndSwapX to set result register
3154   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3155     C2_MacroAssembler _masm(&cbuf);
3156     Register res_reg = as_Register($res$$reg);
3157     __ cset(res_reg, Assembler::EQ);
3158   %}
3159 
3160   // prefetch encodings
3161 
3162   enc_class aarch64_enc_prefetchw(memory mem) %{
3163     C2_MacroAssembler _masm(&cbuf);
3164     Register base = as_Register($mem$$base);
3165     int index = $mem$$index;
3166     int scale = $mem$$scale;
3167     int disp = $mem$$disp;
3168     if (index == -1) {
3169       __ prfm(Address(base, disp), PSTL1KEEP);
3170     } else {
3171       Register index_reg = as_Register(index);
3172       if (disp == 0) {
3173         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3174       } else {
3175         __ lea(rscratch1, Address(base, disp));
3176         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3177       }
3178     }
3179   %}
3180 
3181   /// mov envcodings
3182 
3183   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3184     C2_MacroAssembler _masm(&cbuf);
3185     u_int32_t con = (u_int32_t)$src$$constant;
3186     Register dst_reg = as_Register($dst$$reg);
3187     if (con == 0) {
3188       __ movw(dst_reg, zr);
3189     } else {
3190       __ movw(dst_reg, con);
3191     }
3192   %}
3193 
3194   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3195     C2_MacroAssembler _masm(&cbuf);
3196     Register dst_reg = as_Register($dst$$reg);
3197     u_int64_t con = (u_int64_t)$src$$constant;
3198     if (con == 0) {
3199       __ mov(dst_reg, zr);
3200     } else {
3201       __ mov(dst_reg, con);
3202     }
3203   %}
3204 
3205   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3206     C2_MacroAssembler _masm(&cbuf);
3207     Register dst_reg = as_Register($dst$$reg);
3208     address con = (address)$src$$constant;
3209     if (con == NULL || con == (address)1) {
3210       ShouldNotReachHere();
3211     } else {
3212       relocInfo::relocType rtype = $src->constant_reloc();
3213       if (rtype == relocInfo::oop_type) {
3214         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3215       } else if (rtype == relocInfo::metadata_type) {
3216         __ mov_metadata(dst_reg, (Metadata*)con);
3217       } else {
3218         assert(rtype == relocInfo::none, "unexpected reloc type");
3219         if (con < (address)(uintptr_t)os::vm_page_size()) {
3220           __ mov(dst_reg, con);
3221         } else {
3222           unsigned long offset;
3223           __ adrp(dst_reg, con, offset);
3224           __ add(dst_reg, dst_reg, offset);
3225         }
3226       }
3227     }
3228   %}
3229 
3230   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3231     C2_MacroAssembler _masm(&cbuf);
3232     Register dst_reg = as_Register($dst$$reg);
3233     __ mov(dst_reg, zr);
3234   %}
3235 
3236   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3237     C2_MacroAssembler _masm(&cbuf);
3238     Register dst_reg = as_Register($dst$$reg);
3239     __ mov(dst_reg, (u_int64_t)1);
3240   %}
3241 
3242   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3243     C2_MacroAssembler _masm(&cbuf);
3244     __ load_byte_map_base($dst$$Register);
3245   %}
3246 
3247   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3248     C2_MacroAssembler _masm(&cbuf);
3249     Register dst_reg = as_Register($dst$$reg);
3250     address con = (address)$src$$constant;
3251     if (con == NULL) {
3252       ShouldNotReachHere();
3253     } else {
3254       relocInfo::relocType rtype = $src->constant_reloc();
3255       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3256       __ set_narrow_oop(dst_reg, (jobject)con);
3257     }
3258   %}
3259 
3260   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3261     C2_MacroAssembler _masm(&cbuf);
3262     Register dst_reg = as_Register($dst$$reg);
3263     __ mov(dst_reg, zr);
3264   %}
3265 
3266   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3267     C2_MacroAssembler _masm(&cbuf);
3268     Register dst_reg = as_Register($dst$$reg);
3269     address con = (address)$src$$constant;
3270     if (con == NULL) {
3271       ShouldNotReachHere();
3272     } else {
3273       relocInfo::relocType rtype = $src->constant_reloc();
3274       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3275       __ set_narrow_klass(dst_reg, (Klass *)con);
3276     }
3277   %}
3278 
3279   // arithmetic encodings
3280 
3281   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3282     C2_MacroAssembler _masm(&cbuf);
3283     Register dst_reg = as_Register($dst$$reg);
3284     Register src_reg = as_Register($src1$$reg);
3285     int32_t con = (int32_t)$src2$$constant;
3286     // add has primary == 0, subtract has primary == 1
3287     if ($primary) { con = -con; }
3288     if (con < 0) {
3289       __ subw(dst_reg, src_reg, -con);
3290     } else {
3291       __ addw(dst_reg, src_reg, con);
3292     }
3293   %}
3294 
3295   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3296     C2_MacroAssembler _masm(&cbuf);
3297     Register dst_reg = as_Register($dst$$reg);
3298     Register src_reg = as_Register($src1$$reg);
3299     int32_t con = (int32_t)$src2$$constant;
3300     // add has primary == 0, subtract has primary == 1
3301     if ($primary) { con = -con; }
3302     if (con < 0) {
3303       __ sub(dst_reg, src_reg, -con);
3304     } else {
3305       __ add(dst_reg, src_reg, con);
3306     }
3307   %}
3308 
3309   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3310     C2_MacroAssembler _masm(&cbuf);
3311    Register dst_reg = as_Register($dst$$reg);
3312    Register src1_reg = as_Register($src1$$reg);
3313    Register src2_reg = as_Register($src2$$reg);
3314     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3315   %}
3316 
3317   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3318     C2_MacroAssembler _masm(&cbuf);
3319    Register dst_reg = as_Register($dst$$reg);
3320    Register src1_reg = as_Register($src1$$reg);
3321    Register src2_reg = as_Register($src2$$reg);
3322     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3323   %}
3324 
3325   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3326     C2_MacroAssembler _masm(&cbuf);
3327    Register dst_reg = as_Register($dst$$reg);
3328    Register src1_reg = as_Register($src1$$reg);
3329    Register src2_reg = as_Register($src2$$reg);
3330     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3331   %}
3332 
3333   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3334     C2_MacroAssembler _masm(&cbuf);
3335    Register dst_reg = as_Register($dst$$reg);
3336    Register src1_reg = as_Register($src1$$reg);
3337    Register src2_reg = as_Register($src2$$reg);
3338     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3339   %}
3340 
3341   // compare instruction encodings
3342 
3343   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3344     C2_MacroAssembler _masm(&cbuf);
3345     Register reg1 = as_Register($src1$$reg);
3346     Register reg2 = as_Register($src2$$reg);
3347     __ cmpw(reg1, reg2);
3348   %}
3349 
3350   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3351     C2_MacroAssembler _masm(&cbuf);
3352     Register reg = as_Register($src1$$reg);
3353     int32_t val = $src2$$constant;
3354     if (val >= 0) {
3355       __ subsw(zr, reg, val);
3356     } else {
3357       __ addsw(zr, reg, -val);
3358     }
3359   %}
3360 
3361   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3362     C2_MacroAssembler _masm(&cbuf);
3363     Register reg1 = as_Register($src1$$reg);
3364     u_int32_t val = (u_int32_t)$src2$$constant;
3365     __ movw(rscratch1, val);
3366     __ cmpw(reg1, rscratch1);
3367   %}
3368 
3369   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3370     C2_MacroAssembler _masm(&cbuf);
3371     Register reg1 = as_Register($src1$$reg);
3372     Register reg2 = as_Register($src2$$reg);
3373     __ cmp(reg1, reg2);
3374   %}
3375 
3376   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3377     C2_MacroAssembler _masm(&cbuf);
3378     Register reg = as_Register($src1$$reg);
3379     int64_t val = $src2$$constant;
3380     if (val >= 0) {
3381       __ subs(zr, reg, val);
3382     } else if (val != -val) {
3383       __ adds(zr, reg, -val);
3384     } else {
3385     // aargh, Long.MIN_VALUE is a special case
3386       __ orr(rscratch1, zr, (u_int64_t)val);
3387       __ subs(zr, reg, rscratch1);
3388     }
3389   %}
3390 
3391   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3392     C2_MacroAssembler _masm(&cbuf);
3393     Register reg1 = as_Register($src1$$reg);
3394     u_int64_t val = (u_int64_t)$src2$$constant;
3395     __ mov(rscratch1, val);
3396     __ cmp(reg1, rscratch1);
3397   %}
3398 
3399   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3400     C2_MacroAssembler _masm(&cbuf);
3401     Register reg1 = as_Register($src1$$reg);
3402     Register reg2 = as_Register($src2$$reg);
3403     __ cmp(reg1, reg2);
3404   %}
3405 
3406   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3407     C2_MacroAssembler _masm(&cbuf);
3408     Register reg1 = as_Register($src1$$reg);
3409     Register reg2 = as_Register($src2$$reg);
3410     __ cmpw(reg1, reg2);
3411   %}
3412 
3413   enc_class aarch64_enc_testp(iRegP src) %{
3414     C2_MacroAssembler _masm(&cbuf);
3415     Register reg = as_Register($src$$reg);
3416     __ cmp(reg, zr);
3417   %}
3418 
3419   enc_class aarch64_enc_testn(iRegN src) %{
3420     C2_MacroAssembler _masm(&cbuf);
3421     Register reg = as_Register($src$$reg);
3422     __ cmpw(reg, zr);
3423   %}
3424 
3425   enc_class aarch64_enc_b(label lbl) %{
3426     C2_MacroAssembler _masm(&cbuf);
3427     Label *L = $lbl$$label;
3428     __ b(*L);
3429   %}
3430 
3431   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3432     C2_MacroAssembler _masm(&cbuf);
3433     Label *L = $lbl$$label;
3434     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3435   %}
3436 
3437   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3438     C2_MacroAssembler _masm(&cbuf);
3439     Label *L = $lbl$$label;
3440     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3441   %}
3442 
3443   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3444   %{
3445      Register sub_reg = as_Register($sub$$reg);
3446      Register super_reg = as_Register($super$$reg);
3447      Register temp_reg = as_Register($temp$$reg);
3448      Register result_reg = as_Register($result$$reg);
3449 
3450      Label miss;
3451      C2_MacroAssembler _masm(&cbuf);
3452      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3453                                      NULL, &miss,
3454                                      /*set_cond_codes:*/ true);
3455      if ($primary) {
3456        __ mov(result_reg, zr);
3457      }
3458      __ bind(miss);
3459   %}
3460 
3461   enc_class aarch64_enc_java_static_call(method meth) %{
3462     C2_MacroAssembler _masm(&cbuf);
3463 
3464     address addr = (address)$meth$$method;
3465     address call;
3466     if (!_method) {
3467       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3468       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3469     } else {
3470       int method_index = resolved_method_index(cbuf);
3471       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3472                                                   : static_call_Relocation::spec(method_index);
3473       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3474 
3475       // Emit stub for static call
3476       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3477       if (stub == NULL) {
3478         ciEnv::current()->record_failure("CodeCache is full");
3479         return;
3480       }
3481     }
3482     if (call == NULL) {
3483       ciEnv::current()->record_failure("CodeCache is full");
3484       return;
3485     }
3486   %}
3487 
3488   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3489     C2_MacroAssembler _masm(&cbuf);
3490     int method_index = resolved_method_index(cbuf);
3491     address call = __ ic_call((address)$meth$$method, method_index);
3492     if (call == NULL) {
3493       ciEnv::current()->record_failure("CodeCache is full");
3494       return;
3495     }
3496   %}
3497 
3498   enc_class aarch64_enc_call_epilog() %{
3499     C2_MacroAssembler _masm(&cbuf);
3500     if (VerifyStackAtCalls) {
3501       // Check that stack depth is unchanged: find majik cookie on stack
3502       __ call_Unimplemented();
3503     }
3504   %}
3505 
3506   enc_class aarch64_enc_java_to_runtime(method meth) %{
3507     C2_MacroAssembler _masm(&cbuf);
3508 
3509     // some calls to generated routines (arraycopy code) are scheduled
3510     // by C2 as runtime calls. if so we can call them using a br (they
3511     // will be in a reachable segment) otherwise we have to use a blr
3512     // which loads the absolute address into a register.
3513     address entry = (address)$meth$$method;
3514     CodeBlob *cb = CodeCache::find_blob(entry);
3515     if (cb) {
3516       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3517       if (call == NULL) {
3518         ciEnv::current()->record_failure("CodeCache is full");
3519         return;
3520       }
3521     } else {
3522       Label retaddr;
3523       __ adr(rscratch2, retaddr);
3524       __ lea(rscratch1, RuntimeAddress(entry));
3525       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3526       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3527       __ blr(rscratch1);
3528       __ bind(retaddr);
3529       __ add(sp, sp, 2 * wordSize);
3530     }
3531   %}
3532 
3533   enc_class aarch64_enc_rethrow() %{
3534     C2_MacroAssembler _masm(&cbuf);
3535     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3536   %}
3537 
3538   enc_class aarch64_enc_ret() %{
3539     C2_MacroAssembler _masm(&cbuf);
3540     __ ret(lr);
3541   %}
3542 
3543   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3544     C2_MacroAssembler _masm(&cbuf);
3545     Register target_reg = as_Register($jump_target$$reg);
3546     __ br(target_reg);
3547   %}
3548 
3549   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3550     C2_MacroAssembler _masm(&cbuf);
3551     Register target_reg = as_Register($jump_target$$reg);
3552     // exception oop should be in r0
3553     // ret addr has been popped into lr
3554     // callee expects it in r3
3555     __ mov(r3, lr);
3556     __ br(target_reg);
3557   %}
3558 
3559   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3560     C2_MacroAssembler _masm(&cbuf);
3561     Register oop = as_Register($object$$reg);
3562     Register box = as_Register($box$$reg);
3563     Register disp_hdr = as_Register($tmp$$reg);
3564     Register tmp = as_Register($tmp2$$reg);
3565     Label cont;
3566     Label object_has_monitor;
3567     Label cas_failed;
3568 
3569     assert_different_registers(oop, box, tmp, disp_hdr);
3570 
3571     // Load markWord from object into displaced_header.
3572     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3573 
3574     if (UseBiasedLocking && !UseOptoBiasInlining) {
3575       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3576     }
3577 
3578     // Check for existing monitor
3579     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3580 
3581     // Set tmp to be (markWord of object | UNLOCK_VALUE).
3582     __ orr(tmp, disp_hdr, markWord::unlocked_value);
3583 
3584     // Initialize the box. (Must happen before we update the object mark!)
3585     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3586 
3587     // Compare object markWord with an unlocked value (tmp) and if
3588     // equal exchange the stack address of our box with object markWord.
3589     // On failure disp_hdr contains the possibly locked markWord.
3590     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
3591                /*release*/ true, /*weak*/ false, disp_hdr);
3592     __ br(Assembler::EQ, cont);
3593 
3594     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3595 
3596     // If the compare-and-exchange succeeded, then we found an unlocked
3597     // object, will have now locked it will continue at label cont
3598 
3599     __ bind(cas_failed);
3600     // We did not see an unlocked object so try the fast recursive case.
3601 
3602     // Check if the owner is self by comparing the value in the
3603     // markWord of object (disp_hdr) with the stack pointer.
3604     __ mov(rscratch1, sp);
3605     __ sub(disp_hdr, disp_hdr, rscratch1);
3606     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
3607     // If condition is true we are cont and hence we can store 0 as the
3608     // displaced header in the box, which indicates that it is a recursive lock.
3609     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
3610     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3611 
3612     __ b(cont);
3613 
3614     // Handle existing monitor.
3615     __ bind(object_has_monitor);
3616 
3617     // The object's monitor m is unlocked iff m->owner == NULL,
3618     // otherwise m->owner may contain a thread or a stack address.
3619     //
3620     // Try to CAS m->owner from NULL to current thread.
3621     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
3622     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
3623                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
3624 
3625     // Store a non-null value into the box to avoid looking like a re-entrant
3626     // lock. The fast-path monitor unlock code checks for
3627     // markWord::monitor_value so use markWord::unused_mark which has the
3628     // relevant bit set, and also matches ObjectSynchronizer::enter.
3629     __ mov(tmp, (address)markWord::unused_mark().value());
3630     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3631 
3632     __ bind(cont);
3633     // flag == EQ indicates success
3634     // flag == NE indicates failure
3635   %}
3636 
3637   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3638     C2_MacroAssembler _masm(&cbuf);
3639     Register oop = as_Register($object$$reg);
3640     Register box = as_Register($box$$reg);
3641     Register disp_hdr = as_Register($tmp$$reg);
3642     Register tmp = as_Register($tmp2$$reg);
3643     Label cont;
3644     Label object_has_monitor;
3645 
3646     assert_different_registers(oop, box, tmp, disp_hdr);
3647 
3648     if (UseBiasedLocking && !UseOptoBiasInlining) {
3649       __ biased_locking_exit(oop, tmp, cont);
3650     }
3651 
3652     // Find the lock address and load the displaced header from the stack.
3653     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3654 
3655     // If the displaced header is 0, we have a recursive unlock.
3656     __ cmp(disp_hdr, zr);
3657     __ br(Assembler::EQ, cont);
3658 
3659     // Handle existing monitor.
3660     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3661     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3662 
3663     // Check if it is still a light weight lock, this is is true if we
3664     // see the stack address of the basicLock in the markWord of the
3665     // object.
3666 
3667     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
3668                /*release*/ true, /*weak*/ false, tmp);
3669     __ b(cont);
3670 
3671     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3672 
3673     // Handle existing monitor.
3674     __ bind(object_has_monitor);
3675     STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
3676     __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
3677     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3678     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3679     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3680     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3681     __ cmp(rscratch1, zr); // Sets flags for result
3682     __ br(Assembler::NE, cont);
3683 
3684     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3685     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3686     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3687     __ cmp(rscratch1, zr); // Sets flags for result
3688     __ cbnz(rscratch1, cont);
3689     // need a release store here
3690     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3691     __ stlr(zr, tmp); // set unowned
3692 
3693     __ bind(cont);
3694     // flag == EQ indicates success
3695     // flag == NE indicates failure
3696   %}
3697 
3698 %}
3699 
3700 //----------FRAME--------------------------------------------------------------
3701 // Definition of frame structure and management information.
3702 //
3703 //  S T A C K   L A Y O U T    Allocators stack-slot number
3704 //                             |   (to get allocators register number
3705 //  G  Owned by    |        |  v    add OptoReg::stack0())
3706 //  r   CALLER     |        |
3707 //  o     |        +--------+      pad to even-align allocators stack-slot
3708 //  w     V        |  pad0  |        numbers; owned by CALLER
3709 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3710 //  h     ^        |   in   |  5
3711 //        |        |  args  |  4   Holes in incoming args owned by SELF
3712 //  |     |        |        |  3
3713 //  |     |        +--------+
3714 //  V     |        | old out|      Empty on Intel, window on Sparc
3715 //        |    old |preserve|      Must be even aligned.
3716 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3717 //        |        |   in   |  3   area for Intel ret address
3718 //     Owned by    |preserve|      Empty on Sparc.
3719 //       SELF      +--------+
3720 //        |        |  pad2  |  2   pad to align old SP
3721 //        |        +--------+  1
3722 //        |        | locks  |  0
3723 //        |        +--------+----> OptoReg::stack0(), even aligned
3724 //        |        |  pad1  | 11   pad to align new SP
3725 //        |        +--------+
3726 //        |        |        | 10
3727 //        |        | spills |  9   spills
3728 //        V        |        |  8   (pad0 slot for callee)
3729 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3730 //        ^        |  out   |  7
3731 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3732 //     Owned by    +--------+
3733 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3734 //        |    new |preserve|      Must be even-aligned.
3735 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3736 //        |        |        |
3737 //
3738 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3739 //         known from SELF's arguments and the Java calling convention.
3740 //         Region 6-7 is determined per call site.
3741 // Note 2: If the calling convention leaves holes in the incoming argument
3742 //         area, those holes are owned by SELF.  Holes in the outgoing area
3743 //         are owned by the CALLEE.  Holes should not be nessecary in the
3744 //         incoming area, as the Java calling convention is completely under
3745 //         the control of the AD file.  Doubles can be sorted and packed to
3746 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3747 //         varargs C calling conventions.
3748 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3749 //         even aligned with pad0 as needed.
3750 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3751 //           (the latter is true on Intel but is it false on AArch64?)
3752 //         region 6-11 is even aligned; it may be padded out more so that
3753 //         the region from SP to FP meets the minimum stack alignment.
3754 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3755 //         alignment.  Region 11, pad1, may be dynamically extended so that
3756 //         SP meets the minimum alignment.
3757 
3758 frame %{
3759   // What direction does stack grow in (assumed to be same for C & Java)
3760   stack_direction(TOWARDS_LOW);
3761 
3762   // These three registers define part of the calling convention
3763   // between compiled code and the interpreter.
3764 
3765   // Inline Cache Register or methodOop for I2C.
3766   inline_cache_reg(R12);
3767 
3768   // Method Oop Register when calling interpreter.
3769   interpreter_method_oop_reg(R12);
3770 
3771   // Number of stack slots consumed by locking an object
3772   sync_stack_slots(2);
3773 
3774   // Compiled code's Frame Pointer
3775   frame_pointer(R31);
3776 
3777   // Interpreter stores its frame pointer in a register which is
3778   // stored to the stack by I2CAdaptors.
3779   // I2CAdaptors convert from interpreted java to compiled java.
3780   interpreter_frame_pointer(R29);
3781 
3782   // Stack alignment requirement
3783   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3784 
3785   // Number of stack slots between incoming argument block and the start of
3786   // a new frame.  The PROLOG must add this many slots to the stack.  The
3787   // EPILOG must remove this many slots. aarch64 needs two slots for
3788   // return address and fp.
3789   // TODO think this is correct but check
3790   in_preserve_stack_slots(4);
3791 
3792   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3793   // for calls to C.  Supports the var-args backing area for register parms.
3794   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3795 
3796   // The after-PROLOG location of the return address.  Location of
3797   // return address specifies a type (REG or STACK) and a number
3798   // representing the register number (i.e. - use a register name) or
3799   // stack slot.
3800   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3801   // Otherwise, it is above the locks and verification slot and alignment word
3802   // TODO this may well be correct but need to check why that - 2 is there
3803   // ppc port uses 0 but we definitely need to allow for fixed_slots
3804   // which folds in the space used for monitors
3805   return_addr(STACK - 2 +
3806               align_up((Compile::current()->in_preserve_stack_slots() +
3807                         Compile::current()->fixed_slots()),
3808                        stack_alignment_in_slots()));
3809 
3810   // Body of function which returns an integer array locating
3811   // arguments either in registers or in stack slots.  Passed an array
3812   // of ideal registers called "sig" and a "length" count.  Stack-slot
3813   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3814   // arguments for a CALLEE.  Incoming stack arguments are
3815   // automatically biased by the preserve_stack_slots field above.
3816 
3817   calling_convention
3818   %{
3819     // No difference between ingoing/outgoing just pass false
3820     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3821   %}
3822 
3823   c_calling_convention
3824   %{
3825     // This is obviously always outgoing
3826     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3827   %}
3828 
3829   // Location of compiled Java return values.  Same as C for now.
3830   return_value
3831   %{
3832     // TODO do we allow ideal_reg == Op_RegN???
3833     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3834            "only return normal values");
3835 
3836     static const int lo[Op_RegL + 1] = { // enum name
3837       0,                                 // Op_Node
3838       0,                                 // Op_Set
3839       R0_num,                            // Op_RegN
3840       R0_num,                            // Op_RegI
3841       R0_num,                            // Op_RegP
3842       V0_num,                            // Op_RegF
3843       V0_num,                            // Op_RegD
3844       R0_num                             // Op_RegL
3845     };
3846 
3847     static const int hi[Op_RegL + 1] = { // enum name
3848       0,                                 // Op_Node
3849       0,                                 // Op_Set
3850       OptoReg::Bad,                      // Op_RegN
3851       OptoReg::Bad,                      // Op_RegI
3852       R0_H_num,                          // Op_RegP
3853       OptoReg::Bad,                      // Op_RegF
3854       V0_H_num,                          // Op_RegD
3855       R0_H_num                           // Op_RegL
3856     };
3857 
3858     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3859   %}
3860 %}
3861 
3862 //----------ATTRIBUTES---------------------------------------------------------
3863 //----------Operand Attributes-------------------------------------------------
3864 op_attrib op_cost(1);        // Required cost attribute
3865 
3866 //----------Instruction Attributes---------------------------------------------
3867 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3868 ins_attrib ins_size(32);        // Required size attribute (in bits)
3869 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3870                                 // a non-matching short branch variant
3871                                 // of some long branch?
3872 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3873                                 // be a power of 2) specifies the
3874                                 // alignment that some part of the
3875                                 // instruction (not necessarily the
3876                                 // start) requires.  If > 1, a
3877                                 // compute_padding() function must be
3878                                 // provided for the instruction
3879 
3880 //----------OPERANDS-----------------------------------------------------------
3881 // Operand definitions must precede instruction definitions for correct parsing
3882 // in the ADLC because operands constitute user defined types which are used in
3883 // instruction definitions.
3884 
3885 //----------Simple Operands----------------------------------------------------
3886 
3887 // Integer operands 32 bit
3888 // 32 bit immediate
3889 operand immI()
3890 %{
3891   match(ConI);
3892 
3893   op_cost(0);
3894   format %{ %}
3895   interface(CONST_INTER);
3896 %}
3897 
3898 // 32 bit zero
3899 operand immI0()
3900 %{
3901   predicate(n->get_int() == 0);
3902   match(ConI);
3903 
3904   op_cost(0);
3905   format %{ %}
3906   interface(CONST_INTER);
3907 %}
3908 
3909 // 32 bit unit increment
3910 operand immI_1()
3911 %{
3912   predicate(n->get_int() == 1);
3913   match(ConI);
3914 
3915   op_cost(0);
3916   format %{ %}
3917   interface(CONST_INTER);
3918 %}
3919 
3920 // 32 bit unit decrement
3921 operand immI_M1()
3922 %{
3923   predicate(n->get_int() == -1);
3924   match(ConI);
3925 
3926   op_cost(0);
3927   format %{ %}
3928   interface(CONST_INTER);
3929 %}
3930 
3931 // Shift values for add/sub extension shift
3932 operand immIExt()
3933 %{
3934   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3935   match(ConI);
3936 
3937   op_cost(0);
3938   format %{ %}
3939   interface(CONST_INTER);
3940 %}
3941 
3942 operand immI_le_4()
3943 %{
3944   predicate(n->get_int() <= 4);
3945   match(ConI);
3946 
3947   op_cost(0);
3948   format %{ %}
3949   interface(CONST_INTER);
3950 %}
3951 
3952 operand immI_31()
3953 %{
3954   predicate(n->get_int() == 31);
3955   match(ConI);
3956 
3957   op_cost(0);
3958   format %{ %}
3959   interface(CONST_INTER);
3960 %}
3961 
3962 operand immI_2()
3963 %{
3964   predicate(n->get_int() == 2);
3965   match(ConI);
3966 
3967   op_cost(0);
3968   format %{ %}
3969   interface(CONST_INTER);
3970 %}
3971 
3972 operand immI_4()
3973 %{
3974   predicate(n->get_int() == 4);
3975   match(ConI);
3976 
3977   op_cost(0);
3978   format %{ %}
3979   interface(CONST_INTER);
3980 %}
3981 
3982 operand immI_8()
3983 %{
3984   predicate(n->get_int() == 8);
3985   match(ConI);
3986 
3987   op_cost(0);
3988   format %{ %}
3989   interface(CONST_INTER);
3990 %}
3991 
3992 operand immI_16()
3993 %{
3994   predicate(n->get_int() == 16);
3995   match(ConI);
3996 
3997   op_cost(0);
3998   format %{ %}
3999   interface(CONST_INTER);
4000 %}
4001 
4002 operand immI_24()
4003 %{
4004   predicate(n->get_int() == 24);
4005   match(ConI);
4006 
4007   op_cost(0);
4008   format %{ %}
4009   interface(CONST_INTER);
4010 %}
4011 
4012 operand immI_32()
4013 %{
4014   predicate(n->get_int() == 32);
4015   match(ConI);
4016 
4017   op_cost(0);
4018   format %{ %}
4019   interface(CONST_INTER);
4020 %}
4021 
4022 operand immI_48()
4023 %{
4024   predicate(n->get_int() == 48);
4025   match(ConI);
4026 
4027   op_cost(0);
4028   format %{ %}
4029   interface(CONST_INTER);
4030 %}
4031 
4032 operand immI_56()
4033 %{
4034   predicate(n->get_int() == 56);
4035   match(ConI);
4036 
4037   op_cost(0);
4038   format %{ %}
4039   interface(CONST_INTER);
4040 %}
4041 
4042 operand immI_63()
4043 %{
4044   predicate(n->get_int() == 63);
4045   match(ConI);
4046 
4047   op_cost(0);
4048   format %{ %}
4049   interface(CONST_INTER);
4050 %}
4051 
4052 operand immI_64()
4053 %{
4054   predicate(n->get_int() == 64);
4055   match(ConI);
4056 
4057   op_cost(0);
4058   format %{ %}
4059   interface(CONST_INTER);
4060 %}
4061 
4062 operand immI_255()
4063 %{
4064   predicate(n->get_int() == 255);
4065   match(ConI);
4066 
4067   op_cost(0);
4068   format %{ %}
4069   interface(CONST_INTER);
4070 %}
4071 
4072 operand immI_65535()
4073 %{
4074   predicate(n->get_int() == 65535);
4075   match(ConI);
4076 
4077   op_cost(0);
4078   format %{ %}
4079   interface(CONST_INTER);
4080 %}
4081 
4082 operand immL_255()
4083 %{
4084   predicate(n->get_long() == 255L);
4085   match(ConL);
4086 
4087   op_cost(0);
4088   format %{ %}
4089   interface(CONST_INTER);
4090 %}
4091 
4092 operand immL_65535()
4093 %{
4094   predicate(n->get_long() == 65535L);
4095   match(ConL);
4096 
4097   op_cost(0);
4098   format %{ %}
4099   interface(CONST_INTER);
4100 %}
4101 
4102 operand immL_4294967295()
4103 %{
4104   predicate(n->get_long() == 4294967295L);
4105   match(ConL);
4106 
4107   op_cost(0);
4108   format %{ %}
4109   interface(CONST_INTER);
4110 %}
4111 
4112 operand immL_bitmask()
4113 %{
4114   predicate((n->get_long() != 0)
4115             && ((n->get_long() & 0xc000000000000000l) == 0)
4116             && is_power_of_2(n->get_long() + 1));
4117   match(ConL);
4118 
4119   op_cost(0);
4120   format %{ %}
4121   interface(CONST_INTER);
4122 %}
4123 
4124 operand immI_bitmask()
4125 %{
4126   predicate((n->get_int() != 0)
4127             && ((n->get_int() & 0xc0000000) == 0)
4128             && is_power_of_2(n->get_int() + 1));
4129   match(ConI);
4130 
4131   op_cost(0);
4132   format %{ %}
4133   interface(CONST_INTER);
4134 %}
4135 
4136 // Scale values for scaled offset addressing modes (up to long but not quad)
4137 operand immIScale()
4138 %{
4139   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4140   match(ConI);
4141 
4142   op_cost(0);
4143   format %{ %}
4144   interface(CONST_INTER);
4145 %}
4146 
4147 // 26 bit signed offset -- for pc-relative branches
4148 operand immI26()
4149 %{
4150   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4151   match(ConI);
4152 
4153   op_cost(0);
4154   format %{ %}
4155   interface(CONST_INTER);
4156 %}
4157 
4158 // 19 bit signed offset -- for pc-relative loads
4159 operand immI19()
4160 %{
4161   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4162   match(ConI);
4163 
4164   op_cost(0);
4165   format %{ %}
4166   interface(CONST_INTER);
4167 %}
4168 
4169 // 12 bit unsigned offset -- for base plus immediate loads
4170 operand immIU12()
4171 %{
4172   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4173   match(ConI);
4174 
4175   op_cost(0);
4176   format %{ %}
4177   interface(CONST_INTER);
4178 %}
4179 
4180 operand immLU12()
4181 %{
4182   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4183   match(ConL);
4184 
4185   op_cost(0);
4186   format %{ %}
4187   interface(CONST_INTER);
4188 %}
4189 
4190 // Offset for scaled or unscaled immediate loads and stores
4191 operand immIOffset()
4192 %{
4193   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
4194   match(ConI);
4195 
4196   op_cost(0);
4197   format %{ %}
4198   interface(CONST_INTER);
4199 %}
4200 
4201 operand immIOffset1()
4202 %{
4203   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
4204   match(ConI);
4205 
4206   op_cost(0);
4207   format %{ %}
4208   interface(CONST_INTER);
4209 %}
4210 
4211 operand immIOffset2()
4212 %{
4213   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
4214   match(ConI);
4215 
4216   op_cost(0);
4217   format %{ %}
4218   interface(CONST_INTER);
4219 %}
4220 
4221 operand immIOffset4()
4222 %{
4223   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4224   match(ConI);
4225 
4226   op_cost(0);
4227   format %{ %}
4228   interface(CONST_INTER);
4229 %}
4230 
4231 operand immIOffset8()
4232 %{
4233   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4234   match(ConI);
4235 
4236   op_cost(0);
4237   format %{ %}
4238   interface(CONST_INTER);
4239 %}
4240 
4241 operand immIOffset16()
4242 %{
4243   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4244   match(ConI);
4245 
4246   op_cost(0);
4247   format %{ %}
4248   interface(CONST_INTER);
4249 %}
4250 
4251 operand immLoffset()
4252 %{
4253   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
4254   match(ConL);
4255 
4256   op_cost(0);
4257   format %{ %}
4258   interface(CONST_INTER);
4259 %}
4260 
4261 operand immLoffset1()
4262 %{
4263   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
4264   match(ConL);
4265 
4266   op_cost(0);
4267   format %{ %}
4268   interface(CONST_INTER);
4269 %}
4270 
4271 operand immLoffset2()
4272 %{
4273   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
4274   match(ConL);
4275 
4276   op_cost(0);
4277   format %{ %}
4278   interface(CONST_INTER);
4279 %}
4280 
4281 operand immLoffset4()
4282 %{
4283   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4284   match(ConL);
4285 
4286   op_cost(0);
4287   format %{ %}
4288   interface(CONST_INTER);
4289 %}
4290 
4291 operand immLoffset8()
4292 %{
4293   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4294   match(ConL);
4295 
4296   op_cost(0);
4297   format %{ %}
4298   interface(CONST_INTER);
4299 %}
4300 
4301 operand immLoffset16()
4302 %{
4303   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4304   match(ConL);
4305 
4306   op_cost(0);
4307   format %{ %}
4308   interface(CONST_INTER);
4309 %}
4310 
4311 // 32 bit integer valid for add sub immediate
4312 operand immIAddSub()
4313 %{
4314   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4315   match(ConI);
4316   op_cost(0);
4317   format %{ %}
4318   interface(CONST_INTER);
4319 %}
4320 
4321 // 32 bit unsigned integer valid for logical immediate
4322 // TODO -- check this is right when e.g the mask is 0x80000000
4323 operand immILog()
4324 %{
4325   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4326   match(ConI);
4327 
4328   op_cost(0);
4329   format %{ %}
4330   interface(CONST_INTER);
4331 %}
4332 
4333 // Integer operands 64 bit
4334 // 64 bit immediate
4335 operand immL()
4336 %{
4337   match(ConL);
4338 
4339   op_cost(0);
4340   format %{ %}
4341   interface(CONST_INTER);
4342 %}
4343 
4344 // 64 bit zero
4345 operand immL0()
4346 %{
4347   predicate(n->get_long() == 0);
4348   match(ConL);
4349 
4350   op_cost(0);
4351   format %{ %}
4352   interface(CONST_INTER);
4353 %}
4354 
4355 // 64 bit unit increment
4356 operand immL_1()
4357 %{
4358   predicate(n->get_long() == 1);
4359   match(ConL);
4360 
4361   op_cost(0);
4362   format %{ %}
4363   interface(CONST_INTER);
4364 %}
4365 
4366 // 64 bit unit decrement
4367 operand immL_M1()
4368 %{
4369   predicate(n->get_long() == -1);
4370   match(ConL);
4371 
4372   op_cost(0);
4373   format %{ %}
4374   interface(CONST_INTER);
4375 %}
4376 
4377 // 32 bit offset of pc in thread anchor
4378 
4379 operand immL_pc_off()
4380 %{
4381   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4382                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4383   match(ConL);
4384 
4385   op_cost(0);
4386   format %{ %}
4387   interface(CONST_INTER);
4388 %}
4389 
4390 // 64 bit integer valid for add sub immediate
4391 operand immLAddSub()
4392 %{
4393   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4394   match(ConL);
4395   op_cost(0);
4396   format %{ %}
4397   interface(CONST_INTER);
4398 %}
4399 
4400 // 64 bit integer valid for logical immediate
4401 operand immLLog()
4402 %{
4403   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4404   match(ConL);
4405   op_cost(0);
4406   format %{ %}
4407   interface(CONST_INTER);
4408 %}
4409 
4410 // Long Immediate: low 32-bit mask
4411 operand immL_32bits()
4412 %{
4413   predicate(n->get_long() == 0xFFFFFFFFL);
4414   match(ConL);
4415   op_cost(0);
4416   format %{ %}
4417   interface(CONST_INTER);
4418 %}
4419 
4420 // Pointer operands
4421 // Pointer Immediate
4422 operand immP()
4423 %{
4424   match(ConP);
4425 
4426   op_cost(0);
4427   format %{ %}
4428   interface(CONST_INTER);
4429 %}
4430 
4431 // NULL Pointer Immediate
4432 operand immP0()
4433 %{
4434   predicate(n->get_ptr() == 0);
4435   match(ConP);
4436 
4437   op_cost(0);
4438   format %{ %}
4439   interface(CONST_INTER);
4440 %}
4441 
4442 // Pointer Immediate One
4443 // this is used in object initialization (initial object header)
4444 operand immP_1()
4445 %{
4446   predicate(n->get_ptr() == 1);
4447   match(ConP);
4448 
4449   op_cost(0);
4450   format %{ %}
4451   interface(CONST_INTER);
4452 %}
4453 
4454 // Card Table Byte Map Base
4455 operand immByteMapBase()
4456 %{
4457   // Get base of card map
4458   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4459             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4460   match(ConP);
4461 
4462   op_cost(0);
4463   format %{ %}
4464   interface(CONST_INTER);
4465 %}
4466 
4467 // Pointer Immediate Minus One
4468 // this is used when we want to write the current PC to the thread anchor
4469 operand immP_M1()
4470 %{
4471   predicate(n->get_ptr() == -1);
4472   match(ConP);
4473 
4474   op_cost(0);
4475   format %{ %}
4476   interface(CONST_INTER);
4477 %}
4478 
4479 // Pointer Immediate Minus Two
4480 // this is used when we want to write the current PC to the thread anchor
4481 operand immP_M2()
4482 %{
4483   predicate(n->get_ptr() == -2);
4484   match(ConP);
4485 
4486   op_cost(0);
4487   format %{ %}
4488   interface(CONST_INTER);
4489 %}
4490 
4491 // Float and Double operands
4492 // Double Immediate
4493 operand immD()
4494 %{
4495   match(ConD);
4496   op_cost(0);
4497   format %{ %}
4498   interface(CONST_INTER);
4499 %}
4500 
4501 // Double Immediate: +0.0d
4502 operand immD0()
4503 %{
4504   predicate(jlong_cast(n->getd()) == 0);
4505   match(ConD);
4506 
4507   op_cost(0);
4508   format %{ %}
4509   interface(CONST_INTER);
4510 %}
4511 
4512 // constant 'double +0.0'.
4513 operand immDPacked()
4514 %{
4515   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4516   match(ConD);
4517   op_cost(0);
4518   format %{ %}
4519   interface(CONST_INTER);
4520 %}
4521 
4522 // Float Immediate
4523 operand immF()
4524 %{
4525   match(ConF);
4526   op_cost(0);
4527   format %{ %}
4528   interface(CONST_INTER);
4529 %}
4530 
4531 // Float Immediate: +0.0f.
4532 operand immF0()
4533 %{
4534   predicate(jint_cast(n->getf()) == 0);
4535   match(ConF);
4536 
4537   op_cost(0);
4538   format %{ %}
4539   interface(CONST_INTER);
4540 %}
4541 
4542 //
4543 operand immFPacked()
4544 %{
4545   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4546   match(ConF);
4547   op_cost(0);
4548   format %{ %}
4549   interface(CONST_INTER);
4550 %}
4551 
4552 // Narrow pointer operands
4553 // Narrow Pointer Immediate
4554 operand immN()
4555 %{
4556   match(ConN);
4557 
4558   op_cost(0);
4559   format %{ %}
4560   interface(CONST_INTER);
4561 %}
4562 
4563 // Narrow NULL Pointer Immediate
4564 operand immN0()
4565 %{
4566   predicate(n->get_narrowcon() == 0);
4567   match(ConN);
4568 
4569   op_cost(0);
4570   format %{ %}
4571   interface(CONST_INTER);
4572 %}
4573 
4574 operand immNKlass()
4575 %{
4576   match(ConNKlass);
4577 
4578   op_cost(0);
4579   format %{ %}
4580   interface(CONST_INTER);
4581 %}
4582 
4583 // Integer 32 bit Register Operands
4584 // Integer 32 bitRegister (excludes SP)
4585 operand iRegI()
4586 %{
4587   constraint(ALLOC_IN_RC(any_reg32));
4588   match(RegI);
4589   match(iRegINoSp);
4590   op_cost(0);
4591   format %{ %}
4592   interface(REG_INTER);
4593 %}
4594 
4595 // Integer 32 bit Register not Special
4596 operand iRegINoSp()
4597 %{
4598   constraint(ALLOC_IN_RC(no_special_reg32));
4599   match(RegI);
4600   op_cost(0);
4601   format %{ %}
4602   interface(REG_INTER);
4603 %}
4604 
4605 // Integer 64 bit Register Operands
4606 // Integer 64 bit Register (includes SP)
4607 operand iRegL()
4608 %{
4609   constraint(ALLOC_IN_RC(any_reg));
4610   match(RegL);
4611   match(iRegLNoSp);
4612   op_cost(0);
4613   format %{ %}
4614   interface(REG_INTER);
4615 %}
4616 
4617 // Integer 64 bit Register not Special
4618 operand iRegLNoSp()
4619 %{
4620   constraint(ALLOC_IN_RC(no_special_reg));
4621   match(RegL);
4622   match(iRegL_R0);
4623   format %{ %}
4624   interface(REG_INTER);
4625 %}
4626 
4627 // Pointer Register Operands
4628 // Pointer Register
4629 operand iRegP()
4630 %{
4631   constraint(ALLOC_IN_RC(ptr_reg));
4632   match(RegP);
4633   match(iRegPNoSp);
4634   match(iRegP_R0);
4635   //match(iRegP_R2);
4636   //match(iRegP_R4);
4637   //match(iRegP_R5);
4638   match(thread_RegP);
4639   op_cost(0);
4640   format %{ %}
4641   interface(REG_INTER);
4642 %}
4643 
4644 // Pointer 64 bit Register not Special
4645 operand iRegPNoSp()
4646 %{
4647   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4648   match(RegP);
4649   // match(iRegP);
4650   // match(iRegP_R0);
4651   // match(iRegP_R2);
4652   // match(iRegP_R4);
4653   // match(iRegP_R5);
4654   // match(thread_RegP);
4655   op_cost(0);
4656   format %{ %}
4657   interface(REG_INTER);
4658 %}
4659 
4660 // Pointer 64 bit Register R0 only
4661 operand iRegP_R0()
4662 %{
4663   constraint(ALLOC_IN_RC(r0_reg));
4664   match(RegP);
4665   // match(iRegP);
4666   match(iRegPNoSp);
4667   op_cost(0);
4668   format %{ %}
4669   interface(REG_INTER);
4670 %}
4671 
4672 // Pointer 64 bit Register R1 only
4673 operand iRegP_R1()
4674 %{
4675   constraint(ALLOC_IN_RC(r1_reg));
4676   match(RegP);
4677   // match(iRegP);
4678   match(iRegPNoSp);
4679   op_cost(0);
4680   format %{ %}
4681   interface(REG_INTER);
4682 %}
4683 
4684 // Pointer 64 bit Register R2 only
4685 operand iRegP_R2()
4686 %{
4687   constraint(ALLOC_IN_RC(r2_reg));
4688   match(RegP);
4689   // match(iRegP);
4690   match(iRegPNoSp);
4691   op_cost(0);
4692   format %{ %}
4693   interface(REG_INTER);
4694 %}
4695 
4696 // Pointer 64 bit Register R3 only
4697 operand iRegP_R3()
4698 %{
4699   constraint(ALLOC_IN_RC(r3_reg));
4700   match(RegP);
4701   // match(iRegP);
4702   match(iRegPNoSp);
4703   op_cost(0);
4704   format %{ %}
4705   interface(REG_INTER);
4706 %}
4707 
4708 // Pointer 64 bit Register R4 only
4709 operand iRegP_R4()
4710 %{
4711   constraint(ALLOC_IN_RC(r4_reg));
4712   match(RegP);
4713   // match(iRegP);
4714   match(iRegPNoSp);
4715   op_cost(0);
4716   format %{ %}
4717   interface(REG_INTER);
4718 %}
4719 
4720 // Pointer 64 bit Register R5 only
4721 operand iRegP_R5()
4722 %{
4723   constraint(ALLOC_IN_RC(r5_reg));
4724   match(RegP);
4725   // match(iRegP);
4726   match(iRegPNoSp);
4727   op_cost(0);
4728   format %{ %}
4729   interface(REG_INTER);
4730 %}
4731 
4732 // Pointer 64 bit Register R10 only
4733 operand iRegP_R10()
4734 %{
4735   constraint(ALLOC_IN_RC(r10_reg));
4736   match(RegP);
4737   // match(iRegP);
4738   match(iRegPNoSp);
4739   op_cost(0);
4740   format %{ %}
4741   interface(REG_INTER);
4742 %}
4743 
4744 // Long 64 bit Register R0 only
4745 operand iRegL_R0()
4746 %{
4747   constraint(ALLOC_IN_RC(r0_reg));
4748   match(RegL);
4749   match(iRegLNoSp);
4750   op_cost(0);
4751   format %{ %}
4752   interface(REG_INTER);
4753 %}
4754 
4755 // Long 64 bit Register R2 only
4756 operand iRegL_R2()
4757 %{
4758   constraint(ALLOC_IN_RC(r2_reg));
4759   match(RegL);
4760   match(iRegLNoSp);
4761   op_cost(0);
4762   format %{ %}
4763   interface(REG_INTER);
4764 %}
4765 
4766 // Long 64 bit Register R3 only
4767 operand iRegL_R3()
4768 %{
4769   constraint(ALLOC_IN_RC(r3_reg));
4770   match(RegL);
4771   match(iRegLNoSp);
4772   op_cost(0);
4773   format %{ %}
4774   interface(REG_INTER);
4775 %}
4776 
4777 // Long 64 bit Register R11 only
4778 operand iRegL_R11()
4779 %{
4780   constraint(ALLOC_IN_RC(r11_reg));
4781   match(RegL);
4782   match(iRegLNoSp);
4783   op_cost(0);
4784   format %{ %}
4785   interface(REG_INTER);
4786 %}
4787 
4788 // Pointer 64 bit Register FP only
4789 operand iRegP_FP()
4790 %{
4791   constraint(ALLOC_IN_RC(fp_reg));
4792   match(RegP);
4793   // match(iRegP);
4794   op_cost(0);
4795   format %{ %}
4796   interface(REG_INTER);
4797 %}
4798 
4799 // Register R0 only
4800 operand iRegI_R0()
4801 %{
4802   constraint(ALLOC_IN_RC(int_r0_reg));
4803   match(RegI);
4804   match(iRegINoSp);
4805   op_cost(0);
4806   format %{ %}
4807   interface(REG_INTER);
4808 %}
4809 
4810 // Register R2 only
4811 operand iRegI_R2()
4812 %{
4813   constraint(ALLOC_IN_RC(int_r2_reg));
4814   match(RegI);
4815   match(iRegINoSp);
4816   op_cost(0);
4817   format %{ %}
4818   interface(REG_INTER);
4819 %}
4820 
4821 // Register R3 only
4822 operand iRegI_R3()
4823 %{
4824   constraint(ALLOC_IN_RC(int_r3_reg));
4825   match(RegI);
4826   match(iRegINoSp);
4827   op_cost(0);
4828   format %{ %}
4829   interface(REG_INTER);
4830 %}
4831 
4832 
4833 // Register R4 only
4834 operand iRegI_R4()
4835 %{
4836   constraint(ALLOC_IN_RC(int_r4_reg));
4837   match(RegI);
4838   match(iRegINoSp);
4839   op_cost(0);
4840   format %{ %}
4841   interface(REG_INTER);
4842 %}
4843 
4844 
4845 // Pointer Register Operands
4846 // Narrow Pointer Register
4847 operand iRegN()
4848 %{
4849   constraint(ALLOC_IN_RC(any_reg32));
4850   match(RegN);
4851   match(iRegNNoSp);
4852   op_cost(0);
4853   format %{ %}
4854   interface(REG_INTER);
4855 %}
4856 
4857 operand iRegN_R0()
4858 %{
4859   constraint(ALLOC_IN_RC(r0_reg));
4860   match(iRegN);
4861   op_cost(0);
4862   format %{ %}
4863   interface(REG_INTER);
4864 %}
4865 
4866 operand iRegN_R2()
4867 %{
4868   constraint(ALLOC_IN_RC(r2_reg));
4869   match(iRegN);
4870   op_cost(0);
4871   format %{ %}
4872   interface(REG_INTER);
4873 %}
4874 
4875 operand iRegN_R3()
4876 %{
4877   constraint(ALLOC_IN_RC(r3_reg));
4878   match(iRegN);
4879   op_cost(0);
4880   format %{ %}
4881   interface(REG_INTER);
4882 %}
4883 
4884 // Integer 64 bit Register not Special
4885 operand iRegNNoSp()
4886 %{
4887   constraint(ALLOC_IN_RC(no_special_reg32));
4888   match(RegN);
4889   op_cost(0);
4890   format %{ %}
4891   interface(REG_INTER);
4892 %}
4893 
4894 // heap base register -- used for encoding immN0
4895 
4896 operand iRegIHeapbase()
4897 %{
4898   constraint(ALLOC_IN_RC(heapbase_reg));
4899   match(RegI);
4900   op_cost(0);
4901   format %{ %}
4902   interface(REG_INTER);
4903 %}
4904 
4905 // Float Register
4906 // Float register operands
4907 operand vRegF()
4908 %{
4909   constraint(ALLOC_IN_RC(float_reg));
4910   match(RegF);
4911 
4912   op_cost(0);
4913   format %{ %}
4914   interface(REG_INTER);
4915 %}
4916 
4917 // Double Register
4918 // Double register operands
4919 operand vRegD()
4920 %{
4921   constraint(ALLOC_IN_RC(double_reg));
4922   match(RegD);
4923 
4924   op_cost(0);
4925   format %{ %}
4926   interface(REG_INTER);
4927 %}
4928 
4929 operand vecD()
4930 %{
4931   constraint(ALLOC_IN_RC(vectord_reg));
4932   match(VecD);
4933 
4934   op_cost(0);
4935   format %{ %}
4936   interface(REG_INTER);
4937 %}
4938 
4939 operand vecX()
4940 %{
4941   constraint(ALLOC_IN_RC(vectorx_reg));
4942   match(VecX);
4943 
4944   op_cost(0);
4945   format %{ %}
4946   interface(REG_INTER);
4947 %}
4948 
4949 operand vRegD_V0()
4950 %{
4951   constraint(ALLOC_IN_RC(v0_reg));
4952   match(RegD);
4953   op_cost(0);
4954   format %{ %}
4955   interface(REG_INTER);
4956 %}
4957 
4958 operand vRegD_V1()
4959 %{
4960   constraint(ALLOC_IN_RC(v1_reg));
4961   match(RegD);
4962   op_cost(0);
4963   format %{ %}
4964   interface(REG_INTER);
4965 %}
4966 
4967 operand vRegD_V2()
4968 %{
4969   constraint(ALLOC_IN_RC(v2_reg));
4970   match(RegD);
4971   op_cost(0);
4972   format %{ %}
4973   interface(REG_INTER);
4974 %}
4975 
4976 operand vRegD_V3()
4977 %{
4978   constraint(ALLOC_IN_RC(v3_reg));
4979   match(RegD);
4980   op_cost(0);
4981   format %{ %}
4982   interface(REG_INTER);
4983 %}
4984 
4985 operand vRegD_V4()
4986 %{
4987   constraint(ALLOC_IN_RC(v4_reg));
4988   match(RegD);
4989   op_cost(0);
4990   format %{ %}
4991   interface(REG_INTER);
4992 %}
4993 
4994 operand vRegD_V5()
4995 %{
4996   constraint(ALLOC_IN_RC(v5_reg));
4997   match(RegD);
4998   op_cost(0);
4999   format %{ %}
5000   interface(REG_INTER);
5001 %}
5002 
5003 operand vRegD_V6()
5004 %{
5005   constraint(ALLOC_IN_RC(v6_reg));
5006   match(RegD);
5007   op_cost(0);
5008   format %{ %}
5009   interface(REG_INTER);
5010 %}
5011 
5012 operand vRegD_V7()
5013 %{
5014   constraint(ALLOC_IN_RC(v7_reg));
5015   match(RegD);
5016   op_cost(0);
5017   format %{ %}
5018   interface(REG_INTER);
5019 %}
5020 
5021 operand vRegD_V8()
5022 %{
5023   constraint(ALLOC_IN_RC(v8_reg));
5024   match(RegD);
5025   op_cost(0);
5026   format %{ %}
5027   interface(REG_INTER);
5028 %}
5029 
5030 operand vRegD_V9()
5031 %{
5032   constraint(ALLOC_IN_RC(v9_reg));
5033   match(RegD);
5034   op_cost(0);
5035   format %{ %}
5036   interface(REG_INTER);
5037 %}
5038 
5039 operand vRegD_V10()
5040 %{
5041   constraint(ALLOC_IN_RC(v10_reg));
5042   match(RegD);
5043   op_cost(0);
5044   format %{ %}
5045   interface(REG_INTER);
5046 %}
5047 
5048 operand vRegD_V11()
5049 %{
5050   constraint(ALLOC_IN_RC(v11_reg));
5051   match(RegD);
5052   op_cost(0);
5053   format %{ %}
5054   interface(REG_INTER);
5055 %}
5056 
5057 operand vRegD_V12()
5058 %{
5059   constraint(ALLOC_IN_RC(v12_reg));
5060   match(RegD);
5061   op_cost(0);
5062   format %{ %}
5063   interface(REG_INTER);
5064 %}
5065 
5066 operand vRegD_V13()
5067 %{
5068   constraint(ALLOC_IN_RC(v13_reg));
5069   match(RegD);
5070   op_cost(0);
5071   format %{ %}
5072   interface(REG_INTER);
5073 %}
5074 
5075 operand vRegD_V14()
5076 %{
5077   constraint(ALLOC_IN_RC(v14_reg));
5078   match(RegD);
5079   op_cost(0);
5080   format %{ %}
5081   interface(REG_INTER);
5082 %}
5083 
5084 operand vRegD_V15()
5085 %{
5086   constraint(ALLOC_IN_RC(v15_reg));
5087   match(RegD);
5088   op_cost(0);
5089   format %{ %}
5090   interface(REG_INTER);
5091 %}
5092 
5093 operand vRegD_V16()
5094 %{
5095   constraint(ALLOC_IN_RC(v16_reg));
5096   match(RegD);
5097   op_cost(0);
5098   format %{ %}
5099   interface(REG_INTER);
5100 %}
5101 
5102 operand vRegD_V17()
5103 %{
5104   constraint(ALLOC_IN_RC(v17_reg));
5105   match(RegD);
5106   op_cost(0);
5107   format %{ %}
5108   interface(REG_INTER);
5109 %}
5110 
5111 operand vRegD_V18()
5112 %{
5113   constraint(ALLOC_IN_RC(v18_reg));
5114   match(RegD);
5115   op_cost(0);
5116   format %{ %}
5117   interface(REG_INTER);
5118 %}
5119 
5120 operand vRegD_V19()
5121 %{
5122   constraint(ALLOC_IN_RC(v19_reg));
5123   match(RegD);
5124   op_cost(0);
5125   format %{ %}
5126   interface(REG_INTER);
5127 %}
5128 
5129 operand vRegD_V20()
5130 %{
5131   constraint(ALLOC_IN_RC(v20_reg));
5132   match(RegD);
5133   op_cost(0);
5134   format %{ %}
5135   interface(REG_INTER);
5136 %}
5137 
5138 operand vRegD_V21()
5139 %{
5140   constraint(ALLOC_IN_RC(v21_reg));
5141   match(RegD);
5142   op_cost(0);
5143   format %{ %}
5144   interface(REG_INTER);
5145 %}
5146 
5147 operand vRegD_V22()
5148 %{
5149   constraint(ALLOC_IN_RC(v22_reg));
5150   match(RegD);
5151   op_cost(0);
5152   format %{ %}
5153   interface(REG_INTER);
5154 %}
5155 
5156 operand vRegD_V23()
5157 %{
5158   constraint(ALLOC_IN_RC(v23_reg));
5159   match(RegD);
5160   op_cost(0);
5161   format %{ %}
5162   interface(REG_INTER);
5163 %}
5164 
5165 operand vRegD_V24()
5166 %{
5167   constraint(ALLOC_IN_RC(v24_reg));
5168   match(RegD);
5169   op_cost(0);
5170   format %{ %}
5171   interface(REG_INTER);
5172 %}
5173 
5174 operand vRegD_V25()
5175 %{
5176   constraint(ALLOC_IN_RC(v25_reg));
5177   match(RegD);
5178   op_cost(0);
5179   format %{ %}
5180   interface(REG_INTER);
5181 %}
5182 
5183 operand vRegD_V26()
5184 %{
5185   constraint(ALLOC_IN_RC(v26_reg));
5186   match(RegD);
5187   op_cost(0);
5188   format %{ %}
5189   interface(REG_INTER);
5190 %}
5191 
5192 operand vRegD_V27()
5193 %{
5194   constraint(ALLOC_IN_RC(v27_reg));
5195   match(RegD);
5196   op_cost(0);
5197   format %{ %}
5198   interface(REG_INTER);
5199 %}
5200 
5201 operand vRegD_V28()
5202 %{
5203   constraint(ALLOC_IN_RC(v28_reg));
5204   match(RegD);
5205   op_cost(0);
5206   format %{ %}
5207   interface(REG_INTER);
5208 %}
5209 
5210 operand vRegD_V29()
5211 %{
5212   constraint(ALLOC_IN_RC(v29_reg));
5213   match(RegD);
5214   op_cost(0);
5215   format %{ %}
5216   interface(REG_INTER);
5217 %}
5218 
5219 operand vRegD_V30()
5220 %{
5221   constraint(ALLOC_IN_RC(v30_reg));
5222   match(RegD);
5223   op_cost(0);
5224   format %{ %}
5225   interface(REG_INTER);
5226 %}
5227 
5228 operand vRegD_V31()
5229 %{
5230   constraint(ALLOC_IN_RC(v31_reg));
5231   match(RegD);
5232   op_cost(0);
5233   format %{ %}
5234   interface(REG_INTER);
5235 %}
5236 
5237 // Flags register, used as output of signed compare instructions
5238 
5239 // note that on AArch64 we also use this register as the output for
5240 // for floating point compare instructions (CmpF CmpD). this ensures
5241 // that ordered inequality tests use GT, GE, LT or LE none of which
5242 // pass through cases where the result is unordered i.e. one or both
5243 // inputs to the compare is a NaN. this means that the ideal code can
5244 // replace e.g. a GT with an LE and not end up capturing the NaN case
5245 // (where the comparison should always fail). EQ and NE tests are
5246 // always generated in ideal code so that unordered folds into the NE
5247 // case, matching the behaviour of AArch64 NE.
5248 //
5249 // This differs from x86 where the outputs of FP compares use a
5250 // special FP flags registers and where compares based on this
5251 // register are distinguished into ordered inequalities (cmpOpUCF) and
5252 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5253 // to explicitly handle the unordered case in branches. x86 also has
5254 // to include extra CMoveX rules to accept a cmpOpUCF input.
5255 
5256 operand rFlagsReg()
5257 %{
5258   constraint(ALLOC_IN_RC(int_flags));
5259   match(RegFlags);
5260 
5261   op_cost(0);
5262   format %{ "RFLAGS" %}
5263   interface(REG_INTER);
5264 %}
5265 
5266 // Flags register, used as output of unsigned compare instructions
5267 operand rFlagsRegU()
5268 %{
5269   constraint(ALLOC_IN_RC(int_flags));
5270   match(RegFlags);
5271 
5272   op_cost(0);
5273   format %{ "RFLAGSU" %}
5274   interface(REG_INTER);
5275 %}
5276 
5277 // Special Registers
5278 
5279 // Method Register
5280 operand inline_cache_RegP(iRegP reg)
5281 %{
5282   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5283   match(reg);
5284   match(iRegPNoSp);
5285   op_cost(0);
5286   format %{ %}
5287   interface(REG_INTER);
5288 %}
5289 
5290 operand interpreter_method_oop_RegP(iRegP reg)
5291 %{
5292   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5293   match(reg);
5294   match(iRegPNoSp);
5295   op_cost(0);
5296   format %{ %}
5297   interface(REG_INTER);
5298 %}
5299 
5300 // Thread Register
5301 operand thread_RegP(iRegP reg)
5302 %{
5303   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5304   match(reg);
5305   op_cost(0);
5306   format %{ %}
5307   interface(REG_INTER);
5308 %}
5309 
5310 operand lr_RegP(iRegP reg)
5311 %{
5312   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5313   match(reg);
5314   op_cost(0);
5315   format %{ %}
5316   interface(REG_INTER);
5317 %}
5318 
5319 //----------Memory Operands----------------------------------------------------
5320 
5321 operand indirect(iRegP reg)
5322 %{
5323   constraint(ALLOC_IN_RC(ptr_reg));
5324   match(reg);
5325   op_cost(0);
5326   format %{ "[$reg]" %}
5327   interface(MEMORY_INTER) %{
5328     base($reg);
5329     index(0xffffffff);
5330     scale(0x0);
5331     disp(0x0);
5332   %}
5333 %}
5334 
5335 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5336 %{
5337   constraint(ALLOC_IN_RC(ptr_reg));
5338   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5339   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5340   op_cost(0);
5341   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5342   interface(MEMORY_INTER) %{
5343     base($reg);
5344     index($ireg);
5345     scale($scale);
5346     disp(0x0);
5347   %}
5348 %}
5349 
5350 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5351 %{
5352   constraint(ALLOC_IN_RC(ptr_reg));
5353   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5354   match(AddP reg (LShiftL lreg scale));
5355   op_cost(0);
5356   format %{ "$reg, $lreg lsl($scale)" %}
5357   interface(MEMORY_INTER) %{
5358     base($reg);
5359     index($lreg);
5360     scale($scale);
5361     disp(0x0);
5362   %}
5363 %}
5364 
5365 operand indIndexI2L(iRegP reg, iRegI ireg)
5366 %{
5367   constraint(ALLOC_IN_RC(ptr_reg));
5368   match(AddP reg (ConvI2L ireg));
5369   op_cost(0);
5370   format %{ "$reg, $ireg, 0, I2L" %}
5371   interface(MEMORY_INTER) %{
5372     base($reg);
5373     index($ireg);
5374     scale(0x0);
5375     disp(0x0);
5376   %}
5377 %}
5378 
5379 operand indIndex(iRegP reg, iRegL lreg)
5380 %{
5381   constraint(ALLOC_IN_RC(ptr_reg));
5382   match(AddP reg lreg);
5383   op_cost(0);
5384   format %{ "$reg, $lreg" %}
5385   interface(MEMORY_INTER) %{
5386     base($reg);
5387     index($lreg);
5388     scale(0x0);
5389     disp(0x0);
5390   %}
5391 %}
5392 
5393 operand indOffI(iRegP reg, immIOffset off)
5394 %{
5395   constraint(ALLOC_IN_RC(ptr_reg));
5396   match(AddP reg off);
5397   op_cost(0);
5398   format %{ "[$reg, $off]" %}
5399   interface(MEMORY_INTER) %{
5400     base($reg);
5401     index(0xffffffff);
5402     scale(0x0);
5403     disp($off);
5404   %}
5405 %}
5406 
5407 operand indOffI1(iRegP reg, immIOffset1 off)
5408 %{
5409   constraint(ALLOC_IN_RC(ptr_reg));
5410   match(AddP reg off);
5411   op_cost(0);
5412   format %{ "[$reg, $off]" %}
5413   interface(MEMORY_INTER) %{
5414     base($reg);
5415     index(0xffffffff);
5416     scale(0x0);
5417     disp($off);
5418   %}
5419 %}
5420 
5421 operand indOffI2(iRegP reg, immIOffset2 off)
5422 %{
5423   constraint(ALLOC_IN_RC(ptr_reg));
5424   match(AddP reg off);
5425   op_cost(0);
5426   format %{ "[$reg, $off]" %}
5427   interface(MEMORY_INTER) %{
5428     base($reg);
5429     index(0xffffffff);
5430     scale(0x0);
5431     disp($off);
5432   %}
5433 %}
5434 
5435 operand indOffI4(iRegP reg, immIOffset4 off)
5436 %{
5437   constraint(ALLOC_IN_RC(ptr_reg));
5438   match(AddP reg off);
5439   op_cost(0);
5440   format %{ "[$reg, $off]" %}
5441   interface(MEMORY_INTER) %{
5442     base($reg);
5443     index(0xffffffff);
5444     scale(0x0);
5445     disp($off);
5446   %}
5447 %}
5448 
5449 operand indOffI8(iRegP reg, immIOffset8 off)
5450 %{
5451   constraint(ALLOC_IN_RC(ptr_reg));
5452   match(AddP reg off);
5453   op_cost(0);
5454   format %{ "[$reg, $off]" %}
5455   interface(MEMORY_INTER) %{
5456     base($reg);
5457     index(0xffffffff);
5458     scale(0x0);
5459     disp($off);
5460   %}
5461 %}
5462 
5463 operand indOffI16(iRegP reg, immIOffset16 off)
5464 %{
5465   constraint(ALLOC_IN_RC(ptr_reg));
5466   match(AddP reg off);
5467   op_cost(0);
5468   format %{ "[$reg, $off]" %}
5469   interface(MEMORY_INTER) %{
5470     base($reg);
5471     index(0xffffffff);
5472     scale(0x0);
5473     disp($off);
5474   %}
5475 %}
5476 
5477 operand indOffL(iRegP reg, immLoffset off)
5478 %{
5479   constraint(ALLOC_IN_RC(ptr_reg));
5480   match(AddP reg off);
5481   op_cost(0);
5482   format %{ "[$reg, $off]" %}
5483   interface(MEMORY_INTER) %{
5484     base($reg);
5485     index(0xffffffff);
5486     scale(0x0);
5487     disp($off);
5488   %}
5489 %}
5490 
5491 operand indOffL1(iRegP reg, immLoffset1 off)
5492 %{
5493   constraint(ALLOC_IN_RC(ptr_reg));
5494   match(AddP reg off);
5495   op_cost(0);
5496   format %{ "[$reg, $off]" %}
5497   interface(MEMORY_INTER) %{
5498     base($reg);
5499     index(0xffffffff);
5500     scale(0x0);
5501     disp($off);
5502   %}
5503 %}
5504 
5505 operand indOffL2(iRegP reg, immLoffset2 off)
5506 %{
5507   constraint(ALLOC_IN_RC(ptr_reg));
5508   match(AddP reg off);
5509   op_cost(0);
5510   format %{ "[$reg, $off]" %}
5511   interface(MEMORY_INTER) %{
5512     base($reg);
5513     index(0xffffffff);
5514     scale(0x0);
5515     disp($off);
5516   %}
5517 %}
5518 
5519 operand indOffL4(iRegP reg, immLoffset4 off)
5520 %{
5521   constraint(ALLOC_IN_RC(ptr_reg));
5522   match(AddP reg off);
5523   op_cost(0);
5524   format %{ "[$reg, $off]" %}
5525   interface(MEMORY_INTER) %{
5526     base($reg);
5527     index(0xffffffff);
5528     scale(0x0);
5529     disp($off);
5530   %}
5531 %}
5532 
5533 operand indOffL8(iRegP reg, immLoffset8 off)
5534 %{
5535   constraint(ALLOC_IN_RC(ptr_reg));
5536   match(AddP reg off);
5537   op_cost(0);
5538   format %{ "[$reg, $off]" %}
5539   interface(MEMORY_INTER) %{
5540     base($reg);
5541     index(0xffffffff);
5542     scale(0x0);
5543     disp($off);
5544   %}
5545 %}
5546 
5547 operand indOffL16(iRegP reg, immLoffset16 off)
5548 %{
5549   constraint(ALLOC_IN_RC(ptr_reg));
5550   match(AddP reg off);
5551   op_cost(0);
5552   format %{ "[$reg, $off]" %}
5553   interface(MEMORY_INTER) %{
5554     base($reg);
5555     index(0xffffffff);
5556     scale(0x0);
5557     disp($off);
5558   %}
5559 %}
5560 
5561 operand indirectN(iRegN reg)
5562 %{
5563   predicate(CompressedOops::shift() == 0);
5564   constraint(ALLOC_IN_RC(ptr_reg));
5565   match(DecodeN reg);
5566   op_cost(0);
5567   format %{ "[$reg]\t# narrow" %}
5568   interface(MEMORY_INTER) %{
5569     base($reg);
5570     index(0xffffffff);
5571     scale(0x0);
5572     disp(0x0);
5573   %}
5574 %}
5575 
5576 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5577 %{
5578   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5579   constraint(ALLOC_IN_RC(ptr_reg));
5580   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5581   op_cost(0);
5582   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5583   interface(MEMORY_INTER) %{
5584     base($reg);
5585     index($ireg);
5586     scale($scale);
5587     disp(0x0);
5588   %}
5589 %}
5590 
5591 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5592 %{
5593   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5594   constraint(ALLOC_IN_RC(ptr_reg));
5595   match(AddP (DecodeN reg) (LShiftL lreg scale));
5596   op_cost(0);
5597   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5598   interface(MEMORY_INTER) %{
5599     base($reg);
5600     index($lreg);
5601     scale($scale);
5602     disp(0x0);
5603   %}
5604 %}
5605 
5606 operand indIndexI2LN(iRegN reg, iRegI ireg)
5607 %{
5608   predicate(CompressedOops::shift() == 0);
5609   constraint(ALLOC_IN_RC(ptr_reg));
5610   match(AddP (DecodeN reg) (ConvI2L ireg));
5611   op_cost(0);
5612   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5613   interface(MEMORY_INTER) %{
5614     base($reg);
5615     index($ireg);
5616     scale(0x0);
5617     disp(0x0);
5618   %}
5619 %}
5620 
5621 operand indIndexN(iRegN reg, iRegL lreg)
5622 %{
5623   predicate(CompressedOops::shift() == 0);
5624   constraint(ALLOC_IN_RC(ptr_reg));
5625   match(AddP (DecodeN reg) lreg);
5626   op_cost(0);
5627   format %{ "$reg, $lreg\t# narrow" %}
5628   interface(MEMORY_INTER) %{
5629     base($reg);
5630     index($lreg);
5631     scale(0x0);
5632     disp(0x0);
5633   %}
5634 %}
5635 
5636 operand indOffIN(iRegN reg, immIOffset off)
5637 %{
5638   predicate(CompressedOops::shift() == 0);
5639   constraint(ALLOC_IN_RC(ptr_reg));
5640   match(AddP (DecodeN reg) off);
5641   op_cost(0);
5642   format %{ "[$reg, $off]\t# narrow" %}
5643   interface(MEMORY_INTER) %{
5644     base($reg);
5645     index(0xffffffff);
5646     scale(0x0);
5647     disp($off);
5648   %}
5649 %}
5650 
5651 operand indOffLN(iRegN reg, immLoffset off)
5652 %{
5653   predicate(CompressedOops::shift() == 0);
5654   constraint(ALLOC_IN_RC(ptr_reg));
5655   match(AddP (DecodeN reg) off);
5656   op_cost(0);
5657   format %{ "[$reg, $off]\t# narrow" %}
5658   interface(MEMORY_INTER) %{
5659     base($reg);
5660     index(0xffffffff);
5661     scale(0x0);
5662     disp($off);
5663   %}
5664 %}
5665 
5666 
5667 
5668 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5669 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5670 %{
5671   constraint(ALLOC_IN_RC(ptr_reg));
5672   match(AddP reg off);
5673   op_cost(0);
5674   format %{ "[$reg, $off]" %}
5675   interface(MEMORY_INTER) %{
5676     base($reg);
5677     index(0xffffffff);
5678     scale(0x0);
5679     disp($off);
5680   %}
5681 %}
5682 
5683 //----------Special Memory Operands--------------------------------------------
5684 // Stack Slot Operand - This operand is used for loading and storing temporary
5685 //                      values on the stack where a match requires a value to
5686 //                      flow through memory.
5687 operand stackSlotP(sRegP reg)
5688 %{
5689   constraint(ALLOC_IN_RC(stack_slots));
5690   op_cost(100);
5691   // No match rule because this operand is only generated in matching
5692   // match(RegP);
5693   format %{ "[$reg]" %}
5694   interface(MEMORY_INTER) %{
5695     base(0x1e);  // RSP
5696     index(0x0);  // No Index
5697     scale(0x0);  // No Scale
5698     disp($reg);  // Stack Offset
5699   %}
5700 %}
5701 
5702 operand stackSlotI(sRegI reg)
5703 %{
5704   constraint(ALLOC_IN_RC(stack_slots));
5705   // No match rule because this operand is only generated in matching
5706   // match(RegI);
5707   format %{ "[$reg]" %}
5708   interface(MEMORY_INTER) %{
5709     base(0x1e);  // RSP
5710     index(0x0);  // No Index
5711     scale(0x0);  // No Scale
5712     disp($reg);  // Stack Offset
5713   %}
5714 %}
5715 
5716 operand stackSlotF(sRegF reg)
5717 %{
5718   constraint(ALLOC_IN_RC(stack_slots));
5719   // No match rule because this operand is only generated in matching
5720   // match(RegF);
5721   format %{ "[$reg]" %}
5722   interface(MEMORY_INTER) %{
5723     base(0x1e);  // RSP
5724     index(0x0);  // No Index
5725     scale(0x0);  // No Scale
5726     disp($reg);  // Stack Offset
5727   %}
5728 %}
5729 
5730 operand stackSlotD(sRegD reg)
5731 %{
5732   constraint(ALLOC_IN_RC(stack_slots));
5733   // No match rule because this operand is only generated in matching
5734   // match(RegD);
5735   format %{ "[$reg]" %}
5736   interface(MEMORY_INTER) %{
5737     base(0x1e);  // RSP
5738     index(0x0);  // No Index
5739     scale(0x0);  // No Scale
5740     disp($reg);  // Stack Offset
5741   %}
5742 %}
5743 
5744 operand stackSlotL(sRegL reg)
5745 %{
5746   constraint(ALLOC_IN_RC(stack_slots));
5747   // No match rule because this operand is only generated in matching
5748   // match(RegL);
5749   format %{ "[$reg]" %}
5750   interface(MEMORY_INTER) %{
5751     base(0x1e);  // RSP
5752     index(0x0);  // No Index
5753     scale(0x0);  // No Scale
5754     disp($reg);  // Stack Offset
5755   %}
5756 %}
5757 
5758 // Operands for expressing Control Flow
5759 // NOTE: Label is a predefined operand which should not be redefined in
5760 //       the AD file. It is generically handled within the ADLC.
5761 
5762 //----------Conditional Branch Operands----------------------------------------
5763 // Comparison Op  - This is the operation of the comparison, and is limited to
5764 //                  the following set of codes:
5765 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5766 //
5767 // Other attributes of the comparison, such as unsignedness, are specified
5768 // by the comparison instruction that sets a condition code flags register.
5769 // That result is represented by a flags operand whose subtype is appropriate
5770 // to the unsignedness (etc.) of the comparison.
5771 //
5772 // Later, the instruction which matches both the Comparison Op (a Bool) and
5773 // the flags (produced by the Cmp) specifies the coding of the comparison op
5774 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5775 
5776 // used for signed integral comparisons and fp comparisons
5777 
5778 operand cmpOp()
5779 %{
5780   match(Bool);
5781 
5782   format %{ "" %}
5783   interface(COND_INTER) %{
5784     equal(0x0, "eq");
5785     not_equal(0x1, "ne");
5786     less(0xb, "lt");
5787     greater_equal(0xa, "ge");
5788     less_equal(0xd, "le");
5789     greater(0xc, "gt");
5790     overflow(0x6, "vs");
5791     no_overflow(0x7, "vc");
5792   %}
5793 %}
5794 
5795 // used for unsigned integral comparisons
5796 
5797 operand cmpOpU()
5798 %{
5799   match(Bool);
5800 
5801   format %{ "" %}
5802   interface(COND_INTER) %{
5803     equal(0x0, "eq");
5804     not_equal(0x1, "ne");
5805     less(0x3, "lo");
5806     greater_equal(0x2, "hs");
5807     less_equal(0x9, "ls");
5808     greater(0x8, "hi");
5809     overflow(0x6, "vs");
5810     no_overflow(0x7, "vc");
5811   %}
5812 %}
5813 
5814 // used for certain integral comparisons which can be
5815 // converted to cbxx or tbxx instructions
5816 
5817 operand cmpOpEqNe()
5818 %{
5819   match(Bool);
5820   op_cost(0);
5821   predicate(n->as_Bool()->_test._test == BoolTest::ne
5822             || n->as_Bool()->_test._test == BoolTest::eq);
5823 
5824   format %{ "" %}
5825   interface(COND_INTER) %{
5826     equal(0x0, "eq");
5827     not_equal(0x1, "ne");
5828     less(0xb, "lt");
5829     greater_equal(0xa, "ge");
5830     less_equal(0xd, "le");
5831     greater(0xc, "gt");
5832     overflow(0x6, "vs");
5833     no_overflow(0x7, "vc");
5834   %}
5835 %}
5836 
5837 // used for certain integral comparisons which can be
5838 // converted to cbxx or tbxx instructions
5839 
5840 operand cmpOpLtGe()
5841 %{
5842   match(Bool);
5843   op_cost(0);
5844 
5845   predicate(n->as_Bool()->_test._test == BoolTest::lt
5846             || n->as_Bool()->_test._test == BoolTest::ge);
5847 
5848   format %{ "" %}
5849   interface(COND_INTER) %{
5850     equal(0x0, "eq");
5851     not_equal(0x1, "ne");
5852     less(0xb, "lt");
5853     greater_equal(0xa, "ge");
5854     less_equal(0xd, "le");
5855     greater(0xc, "gt");
5856     overflow(0x6, "vs");
5857     no_overflow(0x7, "vc");
5858   %}
5859 %}
5860 
5861 // used for certain unsigned integral comparisons which can be
5862 // converted to cbxx or tbxx instructions
5863 
5864 operand cmpOpUEqNeLtGe()
5865 %{
5866   match(Bool);
5867   op_cost(0);
5868 
5869   predicate(n->as_Bool()->_test._test == BoolTest::eq
5870             || n->as_Bool()->_test._test == BoolTest::ne
5871             || n->as_Bool()->_test._test == BoolTest::lt
5872             || n->as_Bool()->_test._test == BoolTest::ge);
5873 
5874   format %{ "" %}
5875   interface(COND_INTER) %{
5876     equal(0x0, "eq");
5877     not_equal(0x1, "ne");
5878     less(0xb, "lt");
5879     greater_equal(0xa, "ge");
5880     less_equal(0xd, "le");
5881     greater(0xc, "gt");
5882     overflow(0x6, "vs");
5883     no_overflow(0x7, "vc");
5884   %}
5885 %}
5886 
5887 // Special operand allowing long args to int ops to be truncated for free
5888 
5889 operand iRegL2I(iRegL reg) %{
5890 
5891   op_cost(0);
5892 
5893   match(ConvL2I reg);
5894 
5895   format %{ "l2i($reg)" %}
5896 
5897   interface(REG_INTER)
5898 %}
5899 
5900 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5901 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5902 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5903 
5904 //----------OPERAND CLASSES----------------------------------------------------
5905 // Operand Classes are groups of operands that are used as to simplify
5906 // instruction definitions by not requiring the AD writer to specify
5907 // separate instructions for every form of operand when the
5908 // instruction accepts multiple operand types with the same basic
5909 // encoding and format. The classic case of this is memory operands.
5910 
5911 // memory is used to define read/write location for load/store
5912 // instruction defs. we can turn a memory op into an Address
5913 
5914 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
5915                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
5916 
5917 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
5918                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN);
5919 
5920 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
5921                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5922 
5923 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
5924                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5925 
5926 // All of the memory operands. For the pipeline description.
5927 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
5928                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
5929                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5930 
5931 
5932 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5933 // operations. it allows the src to be either an iRegI or a (ConvL2I
5934 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5935 // can be elided because the 32-bit instruction will just employ the
5936 // lower 32 bits anyway.
5937 //
5938 // n.b. this does not elide all L2I conversions. if the truncated
5939 // value is consumed by more than one operation then the ConvL2I
5940 // cannot be bundled into the consuming nodes so an l2i gets planted
5941 // (actually a movw $dst $src) and the downstream instructions consume
5942 // the result of the l2i as an iRegI input. That's a shame since the
5943 // movw is actually redundant but its not too costly.
5944 
5945 opclass iRegIorL2I(iRegI, iRegL2I);
5946 
5947 //----------PIPELINE-----------------------------------------------------------
5948 // Rules which define the behavior of the target architectures pipeline.
5949 
5950 // For specific pipelines, eg A53, define the stages of that pipeline
5951 //pipe_desc(ISS, EX1, EX2, WR);
5952 #define ISS S0
5953 #define EX1 S1
5954 #define EX2 S2
5955 #define WR  S3
5956 
5957 // Integer ALU reg operation
5958 pipeline %{
5959 
5960 attributes %{
5961   // ARM instructions are of fixed length
5962   fixed_size_instructions;        // Fixed size instructions TODO does
5963   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5964   // ARM instructions come in 32-bit word units
5965   instruction_unit_size = 4;         // An instruction is 4 bytes long
5966   instruction_fetch_unit_size = 64;  // The processor fetches one line
5967   instruction_fetch_units = 1;       // of 64 bytes
5968 
5969   // List of nop instructions
5970   nops( MachNop );
5971 %}
5972 
5973 // We don't use an actual pipeline model so don't care about resources
5974 // or description. we do use pipeline classes to introduce fixed
5975 // latencies
5976 
5977 //----------RESOURCES----------------------------------------------------------
5978 // Resources are the functional units available to the machine
5979 
5980 resources( INS0, INS1, INS01 = INS0 | INS1,
5981            ALU0, ALU1, ALU = ALU0 | ALU1,
5982            MAC,
5983            DIV,
5984            BRANCH,
5985            LDST,
5986            NEON_FP);
5987 
5988 //----------PIPELINE DESCRIPTION-----------------------------------------------
5989 // Pipeline Description specifies the stages in the machine's pipeline
5990 
5991 // Define the pipeline as a generic 6 stage pipeline
5992 pipe_desc(S0, S1, S2, S3, S4, S5);
5993 
5994 //----------PIPELINE CLASSES---------------------------------------------------
5995 // Pipeline Classes describe the stages in which input and output are
5996 // referenced by the hardware pipeline.
5997 
5998 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5999 %{
6000   single_instruction;
6001   src1   : S1(read);
6002   src2   : S2(read);
6003   dst    : S5(write);
6004   INS01  : ISS;
6005   NEON_FP : S5;
6006 %}
6007 
6008 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6009 %{
6010   single_instruction;
6011   src1   : S1(read);
6012   src2   : S2(read);
6013   dst    : S5(write);
6014   INS01  : ISS;
6015   NEON_FP : S5;
6016 %}
6017 
6018 pipe_class fp_uop_s(vRegF dst, vRegF src)
6019 %{
6020   single_instruction;
6021   src    : S1(read);
6022   dst    : S5(write);
6023   INS01  : ISS;
6024   NEON_FP : S5;
6025 %}
6026 
6027 pipe_class fp_uop_d(vRegD dst, vRegD src)
6028 %{
6029   single_instruction;
6030   src    : S1(read);
6031   dst    : S5(write);
6032   INS01  : ISS;
6033   NEON_FP : S5;
6034 %}
6035 
6036 pipe_class fp_d2f(vRegF dst, vRegD src)
6037 %{
6038   single_instruction;
6039   src    : S1(read);
6040   dst    : S5(write);
6041   INS01  : ISS;
6042   NEON_FP : S5;
6043 %}
6044 
6045 pipe_class fp_f2d(vRegD dst, vRegF src)
6046 %{
6047   single_instruction;
6048   src    : S1(read);
6049   dst    : S5(write);
6050   INS01  : ISS;
6051   NEON_FP : S5;
6052 %}
6053 
6054 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6055 %{
6056   single_instruction;
6057   src    : S1(read);
6058   dst    : S5(write);
6059   INS01  : ISS;
6060   NEON_FP : S5;
6061 %}
6062 
6063 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6064 %{
6065   single_instruction;
6066   src    : S1(read);
6067   dst    : S5(write);
6068   INS01  : ISS;
6069   NEON_FP : S5;
6070 %}
6071 
6072 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6073 %{
6074   single_instruction;
6075   src    : S1(read);
6076   dst    : S5(write);
6077   INS01  : ISS;
6078   NEON_FP : S5;
6079 %}
6080 
6081 pipe_class fp_l2f(vRegF dst, iRegL src)
6082 %{
6083   single_instruction;
6084   src    : S1(read);
6085   dst    : S5(write);
6086   INS01  : ISS;
6087   NEON_FP : S5;
6088 %}
6089 
6090 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6091 %{
6092   single_instruction;
6093   src    : S1(read);
6094   dst    : S5(write);
6095   INS01  : ISS;
6096   NEON_FP : S5;
6097 %}
6098 
6099 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6100 %{
6101   single_instruction;
6102   src    : S1(read);
6103   dst    : S5(write);
6104   INS01  : ISS;
6105   NEON_FP : S5;
6106 %}
6107 
6108 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6109 %{
6110   single_instruction;
6111   src    : S1(read);
6112   dst    : S5(write);
6113   INS01  : ISS;
6114   NEON_FP : S5;
6115 %}
6116 
6117 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6118 %{
6119   single_instruction;
6120   src    : S1(read);
6121   dst    : S5(write);
6122   INS01  : ISS;
6123   NEON_FP : S5;
6124 %}
6125 
6126 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6127 %{
6128   single_instruction;
6129   src1   : S1(read);
6130   src2   : S2(read);
6131   dst    : S5(write);
6132   INS0   : ISS;
6133   NEON_FP : S5;
6134 %}
6135 
6136 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6137 %{
6138   single_instruction;
6139   src1   : S1(read);
6140   src2   : S2(read);
6141   dst    : S5(write);
6142   INS0   : ISS;
6143   NEON_FP : S5;
6144 %}
6145 
6146 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6147 %{
6148   single_instruction;
6149   cr     : S1(read);
6150   src1   : S1(read);
6151   src2   : S1(read);
6152   dst    : S3(write);
6153   INS01  : ISS;
6154   NEON_FP : S3;
6155 %}
6156 
6157 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6158 %{
6159   single_instruction;
6160   cr     : S1(read);
6161   src1   : S1(read);
6162   src2   : S1(read);
6163   dst    : S3(write);
6164   INS01  : ISS;
6165   NEON_FP : S3;
6166 %}
6167 
6168 pipe_class fp_imm_s(vRegF dst)
6169 %{
6170   single_instruction;
6171   dst    : S3(write);
6172   INS01  : ISS;
6173   NEON_FP : S3;
6174 %}
6175 
6176 pipe_class fp_imm_d(vRegD dst)
6177 %{
6178   single_instruction;
6179   dst    : S3(write);
6180   INS01  : ISS;
6181   NEON_FP : S3;
6182 %}
6183 
6184 pipe_class fp_load_constant_s(vRegF dst)
6185 %{
6186   single_instruction;
6187   dst    : S4(write);
6188   INS01  : ISS;
6189   NEON_FP : S4;
6190 %}
6191 
6192 pipe_class fp_load_constant_d(vRegD dst)
6193 %{
6194   single_instruction;
6195   dst    : S4(write);
6196   INS01  : ISS;
6197   NEON_FP : S4;
6198 %}
6199 
6200 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6201 %{
6202   single_instruction;
6203   dst    : S5(write);
6204   src1   : S1(read);
6205   src2   : S1(read);
6206   INS01  : ISS;
6207   NEON_FP : S5;
6208 %}
6209 
6210 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6211 %{
6212   single_instruction;
6213   dst    : S5(write);
6214   src1   : S1(read);
6215   src2   : S1(read);
6216   INS0   : ISS;
6217   NEON_FP : S5;
6218 %}
6219 
6220 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6221 %{
6222   single_instruction;
6223   dst    : S5(write);
6224   src1   : S1(read);
6225   src2   : S1(read);
6226   dst    : S1(read);
6227   INS01  : ISS;
6228   NEON_FP : S5;
6229 %}
6230 
6231 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6232 %{
6233   single_instruction;
6234   dst    : S5(write);
6235   src1   : S1(read);
6236   src2   : S1(read);
6237   dst    : S1(read);
6238   INS0   : ISS;
6239   NEON_FP : S5;
6240 %}
6241 
6242 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6243 %{
6244   single_instruction;
6245   dst    : S4(write);
6246   src1   : S2(read);
6247   src2   : S2(read);
6248   INS01  : ISS;
6249   NEON_FP : S4;
6250 %}
6251 
6252 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6253 %{
6254   single_instruction;
6255   dst    : S4(write);
6256   src1   : S2(read);
6257   src2   : S2(read);
6258   INS0   : ISS;
6259   NEON_FP : S4;
6260 %}
6261 
6262 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6263 %{
6264   single_instruction;
6265   dst    : S3(write);
6266   src1   : S2(read);
6267   src2   : S2(read);
6268   INS01  : ISS;
6269   NEON_FP : S3;
6270 %}
6271 
6272 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6273 %{
6274   single_instruction;
6275   dst    : S3(write);
6276   src1   : S2(read);
6277   src2   : S2(read);
6278   INS0   : ISS;
6279   NEON_FP : S3;
6280 %}
6281 
6282 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6283 %{
6284   single_instruction;
6285   dst    : S3(write);
6286   src    : S1(read);
6287   shift  : S1(read);
6288   INS01  : ISS;
6289   NEON_FP : S3;
6290 %}
6291 
6292 pipe_class vshift128(vecX dst, vecX src, vecX shift)
6293 %{
6294   single_instruction;
6295   dst    : S3(write);
6296   src    : S1(read);
6297   shift  : S1(read);
6298   INS0   : ISS;
6299   NEON_FP : S3;
6300 %}
6301 
6302 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
6303 %{
6304   single_instruction;
6305   dst    : S3(write);
6306   src    : S1(read);
6307   INS01  : ISS;
6308   NEON_FP : S3;
6309 %}
6310 
6311 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
6312 %{
6313   single_instruction;
6314   dst    : S3(write);
6315   src    : S1(read);
6316   INS0   : ISS;
6317   NEON_FP : S3;
6318 %}
6319 
6320 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
6321 %{
6322   single_instruction;
6323   dst    : S5(write);
6324   src1   : S1(read);
6325   src2   : S1(read);
6326   INS01  : ISS;
6327   NEON_FP : S5;
6328 %}
6329 
6330 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
6331 %{
6332   single_instruction;
6333   dst    : S5(write);
6334   src1   : S1(read);
6335   src2   : S1(read);
6336   INS0   : ISS;
6337   NEON_FP : S5;
6338 %}
6339 
6340 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
6341 %{
6342   single_instruction;
6343   dst    : S5(write);
6344   src1   : S1(read);
6345   src2   : S1(read);
6346   INS0   : ISS;
6347   NEON_FP : S5;
6348 %}
6349 
6350 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
6351 %{
6352   single_instruction;
6353   dst    : S5(write);
6354   src1   : S1(read);
6355   src2   : S1(read);
6356   INS0   : ISS;
6357   NEON_FP : S5;
6358 %}
6359 
6360 pipe_class vsqrt_fp128(vecX dst, vecX src)
6361 %{
6362   single_instruction;
6363   dst    : S5(write);
6364   src    : S1(read);
6365   INS0   : ISS;
6366   NEON_FP : S5;
6367 %}
6368 
6369 pipe_class vunop_fp64(vecD dst, vecD src)
6370 %{
6371   single_instruction;
6372   dst    : S5(write);
6373   src    : S1(read);
6374   INS01  : ISS;
6375   NEON_FP : S5;
6376 %}
6377 
6378 pipe_class vunop_fp128(vecX dst, vecX src)
6379 %{
6380   single_instruction;
6381   dst    : S5(write);
6382   src    : S1(read);
6383   INS0   : ISS;
6384   NEON_FP : S5;
6385 %}
6386 
6387 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
6388 %{
6389   single_instruction;
6390   dst    : S3(write);
6391   src    : S1(read);
6392   INS01  : ISS;
6393   NEON_FP : S3;
6394 %}
6395 
6396 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
6397 %{
6398   single_instruction;
6399   dst    : S3(write);
6400   src    : S1(read);
6401   INS01  : ISS;
6402   NEON_FP : S3;
6403 %}
6404 
6405 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
6406 %{
6407   single_instruction;
6408   dst    : S3(write);
6409   src    : S1(read);
6410   INS01  : ISS;
6411   NEON_FP : S3;
6412 %}
6413 
6414 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
6415 %{
6416   single_instruction;
6417   dst    : S3(write);
6418   src    : S1(read);
6419   INS01  : ISS;
6420   NEON_FP : S3;
6421 %}
6422 
6423 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
6424 %{
6425   single_instruction;
6426   dst    : S3(write);
6427   src    : S1(read);
6428   INS01  : ISS;
6429   NEON_FP : S3;
6430 %}
6431 
6432 pipe_class vmovi_reg_imm64(vecD dst)
6433 %{
6434   single_instruction;
6435   dst    : S3(write);
6436   INS01  : ISS;
6437   NEON_FP : S3;
6438 %}
6439 
6440 pipe_class vmovi_reg_imm128(vecX dst)
6441 %{
6442   single_instruction;
6443   dst    : S3(write);
6444   INS0   : ISS;
6445   NEON_FP : S3;
6446 %}
6447 
6448 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6449 %{
6450   single_instruction;
6451   dst    : S5(write);
6452   mem    : ISS(read);
6453   INS01  : ISS;
6454   NEON_FP : S3;
6455 %}
6456 
6457 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6458 %{
6459   single_instruction;
6460   dst    : S5(write);
6461   mem    : ISS(read);
6462   INS01  : ISS;
6463   NEON_FP : S3;
6464 %}
6465 
6466 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6467 %{
6468   single_instruction;
6469   mem    : ISS(read);
6470   src    : S2(read);
6471   INS01  : ISS;
6472   NEON_FP : S3;
6473 %}
6474 
6475 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6476 %{
6477   single_instruction;
6478   mem    : ISS(read);
6479   src    : S2(read);
6480   INS01  : ISS;
6481   NEON_FP : S3;
6482 %}
6483 
6484 //------- Integer ALU operations --------------------------
6485 
6486 // Integer ALU reg-reg operation
6487 // Operands needed in EX1, result generated in EX2
6488 // Eg.  ADD     x0, x1, x2
6489 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6490 %{
6491   single_instruction;
6492   dst    : EX2(write);
6493   src1   : EX1(read);
6494   src2   : EX1(read);
6495   INS01  : ISS; // Dual issue as instruction 0 or 1
6496   ALU    : EX2;
6497 %}
6498 
6499 // Integer ALU reg-reg operation with constant shift
6500 // Shifted register must be available in LATE_ISS instead of EX1
6501 // Eg.  ADD     x0, x1, x2, LSL #2
6502 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6503 %{
6504   single_instruction;
6505   dst    : EX2(write);
6506   src1   : EX1(read);
6507   src2   : ISS(read);
6508   INS01  : ISS;
6509   ALU    : EX2;
6510 %}
6511 
6512 // Integer ALU reg operation with constant shift
6513 // Eg.  LSL     x0, x1, #shift
6514 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6515 %{
6516   single_instruction;
6517   dst    : EX2(write);
6518   src1   : ISS(read);
6519   INS01  : ISS;
6520   ALU    : EX2;
6521 %}
6522 
6523 // Integer ALU reg-reg operation with variable shift
6524 // Both operands must be available in LATE_ISS instead of EX1
6525 // Result is available in EX1 instead of EX2
6526 // Eg.  LSLV    x0, x1, x2
6527 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6528 %{
6529   single_instruction;
6530   dst    : EX1(write);
6531   src1   : ISS(read);
6532   src2   : ISS(read);
6533   INS01  : ISS;
6534   ALU    : EX1;
6535 %}
6536 
6537 // Integer ALU reg-reg operation with extract
6538 // As for _vshift above, but result generated in EX2
6539 // Eg.  EXTR    x0, x1, x2, #N
6540 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6541 %{
6542   single_instruction;
6543   dst    : EX2(write);
6544   src1   : ISS(read);
6545   src2   : ISS(read);
6546   INS1   : ISS; // Can only dual issue as Instruction 1
6547   ALU    : EX1;
6548 %}
6549 
6550 // Integer ALU reg operation
6551 // Eg.  NEG     x0, x1
6552 pipe_class ialu_reg(iRegI dst, iRegI src)
6553 %{
6554   single_instruction;
6555   dst    : EX2(write);
6556   src    : EX1(read);
6557   INS01  : ISS;
6558   ALU    : EX2;
6559 %}
6560 
6561 // Integer ALU reg mmediate operation
6562 // Eg.  ADD     x0, x1, #N
6563 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6564 %{
6565   single_instruction;
6566   dst    : EX2(write);
6567   src1   : EX1(read);
6568   INS01  : ISS;
6569   ALU    : EX2;
6570 %}
6571 
6572 // Integer ALU immediate operation (no source operands)
6573 // Eg.  MOV     x0, #N
6574 pipe_class ialu_imm(iRegI dst)
6575 %{
6576   single_instruction;
6577   dst    : EX1(write);
6578   INS01  : ISS;
6579   ALU    : EX1;
6580 %}
6581 
6582 //------- Compare operation -------------------------------
6583 
6584 // Compare reg-reg
6585 // Eg.  CMP     x0, x1
6586 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6587 %{
6588   single_instruction;
6589 //  fixed_latency(16);
6590   cr     : EX2(write);
6591   op1    : EX1(read);
6592   op2    : EX1(read);
6593   INS01  : ISS;
6594   ALU    : EX2;
6595 %}
6596 
6597 // Compare reg-reg
6598 // Eg.  CMP     x0, #N
6599 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6600 %{
6601   single_instruction;
6602 //  fixed_latency(16);
6603   cr     : EX2(write);
6604   op1    : EX1(read);
6605   INS01  : ISS;
6606   ALU    : EX2;
6607 %}
6608 
6609 //------- Conditional instructions ------------------------
6610 
6611 // Conditional no operands
6612 // Eg.  CSINC   x0, zr, zr, <cond>
6613 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6614 %{
6615   single_instruction;
6616   cr     : EX1(read);
6617   dst    : EX2(write);
6618   INS01  : ISS;
6619   ALU    : EX2;
6620 %}
6621 
6622 // Conditional 2 operand
6623 // EG.  CSEL    X0, X1, X2, <cond>
6624 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6625 %{
6626   single_instruction;
6627   cr     : EX1(read);
6628   src1   : EX1(read);
6629   src2   : EX1(read);
6630   dst    : EX2(write);
6631   INS01  : ISS;
6632   ALU    : EX2;
6633 %}
6634 
6635 // Conditional 2 operand
6636 // EG.  CSEL    X0, X1, X2, <cond>
6637 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6638 %{
6639   single_instruction;
6640   cr     : EX1(read);
6641   src    : EX1(read);
6642   dst    : EX2(write);
6643   INS01  : ISS;
6644   ALU    : EX2;
6645 %}
6646 
6647 //------- Multiply pipeline operations --------------------
6648 
6649 // Multiply reg-reg
6650 // Eg.  MUL     w0, w1, w2
6651 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6652 %{
6653   single_instruction;
6654   dst    : WR(write);
6655   src1   : ISS(read);
6656   src2   : ISS(read);
6657   INS01  : ISS;
6658   MAC    : WR;
6659 %}
6660 
6661 // Multiply accumulate
6662 // Eg.  MADD    w0, w1, w2, w3
6663 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6664 %{
6665   single_instruction;
6666   dst    : WR(write);
6667   src1   : ISS(read);
6668   src2   : ISS(read);
6669   src3   : ISS(read);
6670   INS01  : ISS;
6671   MAC    : WR;
6672 %}
6673 
6674 // Eg.  MUL     w0, w1, w2
6675 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6676 %{
6677   single_instruction;
6678   fixed_latency(3); // Maximum latency for 64 bit mul
6679   dst    : WR(write);
6680   src1   : ISS(read);
6681   src2   : ISS(read);
6682   INS01  : ISS;
6683   MAC    : WR;
6684 %}
6685 
6686 // Multiply accumulate
6687 // Eg.  MADD    w0, w1, w2, w3
6688 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6689 %{
6690   single_instruction;
6691   fixed_latency(3); // Maximum latency for 64 bit mul
6692   dst    : WR(write);
6693   src1   : ISS(read);
6694   src2   : ISS(read);
6695   src3   : ISS(read);
6696   INS01  : ISS;
6697   MAC    : WR;
6698 %}
6699 
6700 //------- Divide pipeline operations --------------------
6701 
6702 // Eg.  SDIV    w0, w1, w2
6703 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6704 %{
6705   single_instruction;
6706   fixed_latency(8); // Maximum latency for 32 bit divide
6707   dst    : WR(write);
6708   src1   : ISS(read);
6709   src2   : ISS(read);
6710   INS0   : ISS; // Can only dual issue as instruction 0
6711   DIV    : WR;
6712 %}
6713 
6714 // Eg.  SDIV    x0, x1, x2
6715 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6716 %{
6717   single_instruction;
6718   fixed_latency(16); // Maximum latency for 64 bit divide
6719   dst    : WR(write);
6720   src1   : ISS(read);
6721   src2   : ISS(read);
6722   INS0   : ISS; // Can only dual issue as instruction 0
6723   DIV    : WR;
6724 %}
6725 
6726 //------- Load pipeline operations ------------------------
6727 
6728 // Load - prefetch
6729 // Eg.  PFRM    <mem>
6730 pipe_class iload_prefetch(memory mem)
6731 %{
6732   single_instruction;
6733   mem    : ISS(read);
6734   INS01  : ISS;
6735   LDST   : WR;
6736 %}
6737 
6738 // Load - reg, mem
6739 // Eg.  LDR     x0, <mem>
6740 pipe_class iload_reg_mem(iRegI dst, memory mem)
6741 %{
6742   single_instruction;
6743   dst    : WR(write);
6744   mem    : ISS(read);
6745   INS01  : ISS;
6746   LDST   : WR;
6747 %}
6748 
6749 // Load - reg, reg
6750 // Eg.  LDR     x0, [sp, x1]
6751 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6752 %{
6753   single_instruction;
6754   dst    : WR(write);
6755   src    : ISS(read);
6756   INS01  : ISS;
6757   LDST   : WR;
6758 %}
6759 
6760 //------- Store pipeline operations -----------------------
6761 
6762 // Store - zr, mem
6763 // Eg.  STR     zr, <mem>
6764 pipe_class istore_mem(memory mem)
6765 %{
6766   single_instruction;
6767   mem    : ISS(read);
6768   INS01  : ISS;
6769   LDST   : WR;
6770 %}
6771 
6772 // Store - reg, mem
6773 // Eg.  STR     x0, <mem>
6774 pipe_class istore_reg_mem(iRegI src, memory mem)
6775 %{
6776   single_instruction;
6777   mem    : ISS(read);
6778   src    : EX2(read);
6779   INS01  : ISS;
6780   LDST   : WR;
6781 %}
6782 
6783 // Store - reg, reg
6784 // Eg. STR      x0, [sp, x1]
6785 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6786 %{
6787   single_instruction;
6788   dst    : ISS(read);
6789   src    : EX2(read);
6790   INS01  : ISS;
6791   LDST   : WR;
6792 %}
6793 
6794 //------- Store pipeline operations -----------------------
6795 
6796 // Branch
6797 pipe_class pipe_branch()
6798 %{
6799   single_instruction;
6800   INS01  : ISS;
6801   BRANCH : EX1;
6802 %}
6803 
6804 // Conditional branch
6805 pipe_class pipe_branch_cond(rFlagsReg cr)
6806 %{
6807   single_instruction;
6808   cr     : EX1(read);
6809   INS01  : ISS;
6810   BRANCH : EX1;
6811 %}
6812 
6813 // Compare & Branch
6814 // EG.  CBZ/CBNZ
6815 pipe_class pipe_cmp_branch(iRegI op1)
6816 %{
6817   single_instruction;
6818   op1    : EX1(read);
6819   INS01  : ISS;
6820   BRANCH : EX1;
6821 %}
6822 
6823 //------- Synchronisation operations ----------------------
6824 
6825 // Any operation requiring serialization.
6826 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6827 pipe_class pipe_serial()
6828 %{
6829   single_instruction;
6830   force_serialization;
6831   fixed_latency(16);
6832   INS01  : ISS(2); // Cannot dual issue with any other instruction
6833   LDST   : WR;
6834 %}
6835 
6836 // Generic big/slow expanded idiom - also serialized
6837 pipe_class pipe_slow()
6838 %{
6839   instruction_count(10);
6840   multiple_bundles;
6841   force_serialization;
6842   fixed_latency(16);
6843   INS01  : ISS(2); // Cannot dual issue with any other instruction
6844   LDST   : WR;
6845 %}
6846 
6847 // Empty pipeline class
6848 pipe_class pipe_class_empty()
6849 %{
6850   single_instruction;
6851   fixed_latency(0);
6852 %}
6853 
6854 // Default pipeline class.
6855 pipe_class pipe_class_default()
6856 %{
6857   single_instruction;
6858   fixed_latency(2);
6859 %}
6860 
6861 // Pipeline class for compares.
6862 pipe_class pipe_class_compare()
6863 %{
6864   single_instruction;
6865   fixed_latency(16);
6866 %}
6867 
6868 // Pipeline class for memory operations.
6869 pipe_class pipe_class_memory()
6870 %{
6871   single_instruction;
6872   fixed_latency(16);
6873 %}
6874 
6875 // Pipeline class for call.
6876 pipe_class pipe_class_call()
6877 %{
6878   single_instruction;
6879   fixed_latency(100);
6880 %}
6881 
6882 // Define the class for the Nop node.
6883 define %{
6884    MachNop = pipe_class_empty;
6885 %}
6886 
6887 %}
6888 //----------INSTRUCTIONS-------------------------------------------------------
6889 //
6890 // match      -- States which machine-independent subtree may be replaced
6891 //               by this instruction.
6892 // ins_cost   -- The estimated cost of this instruction is used by instruction
6893 //               selection to identify a minimum cost tree of machine
6894 //               instructions that matches a tree of machine-independent
6895 //               instructions.
6896 // format     -- A string providing the disassembly for this instruction.
6897 //               The value of an instruction's operand may be inserted
6898 //               by referring to it with a '$' prefix.
6899 // opcode     -- Three instruction opcodes may be provided.  These are referred
6900 //               to within an encode class as $primary, $secondary, and $tertiary
6901 //               rrspectively.  The primary opcode is commonly used to
6902 //               indicate the type of machine instruction, while secondary
6903 //               and tertiary are often used for prefix options or addressing
6904 //               modes.
6905 // ins_encode -- A list of encode classes with parameters. The encode class
6906 //               name must have been defined in an 'enc_class' specification
6907 //               in the encode section of the architecture description.
6908 
6909 // ============================================================================
6910 // Memory (Load/Store) Instructions
6911 
6912 // Load Instructions
6913 
6914 // Load Byte (8 bit signed)
6915 instruct loadB(iRegINoSp dst, memory1 mem)
6916 %{
6917   match(Set dst (LoadB mem));
6918   predicate(!needs_acquiring_load(n));
6919 
6920   ins_cost(4 * INSN_COST);
6921   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6922 
6923   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6924 
6925   ins_pipe(iload_reg_mem);
6926 %}
6927 
6928 // Load Byte (8 bit signed) into long
6929 instruct loadB2L(iRegLNoSp dst, memory1 mem)
6930 %{
6931   match(Set dst (ConvI2L (LoadB mem)));
6932   predicate(!needs_acquiring_load(n->in(1)));
6933 
6934   ins_cost(4 * INSN_COST);
6935   format %{ "ldrsb  $dst, $mem\t# byte" %}
6936 
6937   ins_encode(aarch64_enc_ldrsb(dst, mem));
6938 
6939   ins_pipe(iload_reg_mem);
6940 %}
6941 
6942 // Load Byte (8 bit unsigned)
6943 instruct loadUB(iRegINoSp dst, memory1 mem)
6944 %{
6945   match(Set dst (LoadUB mem));
6946   predicate(!needs_acquiring_load(n));
6947 
6948   ins_cost(4 * INSN_COST);
6949   format %{ "ldrbw  $dst, $mem\t# byte" %}
6950 
6951   ins_encode(aarch64_enc_ldrb(dst, mem));
6952 
6953   ins_pipe(iload_reg_mem);
6954 %}
6955 
6956 // Load Byte (8 bit unsigned) into long
6957 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
6958 %{
6959   match(Set dst (ConvI2L (LoadUB mem)));
6960   predicate(!needs_acquiring_load(n->in(1)));
6961 
6962   ins_cost(4 * INSN_COST);
6963   format %{ "ldrb  $dst, $mem\t# byte" %}
6964 
6965   ins_encode(aarch64_enc_ldrb(dst, mem));
6966 
6967   ins_pipe(iload_reg_mem);
6968 %}
6969 
6970 // Load Short (16 bit signed)
6971 instruct loadS(iRegINoSp dst, memory2 mem)
6972 %{
6973   match(Set dst (LoadS mem));
6974   predicate(!needs_acquiring_load(n));
6975 
6976   ins_cost(4 * INSN_COST);
6977   format %{ "ldrshw  $dst, $mem\t# short" %}
6978 
6979   ins_encode(aarch64_enc_ldrshw(dst, mem));
6980 
6981   ins_pipe(iload_reg_mem);
6982 %}
6983 
6984 // Load Short (16 bit signed) into long
6985 instruct loadS2L(iRegLNoSp dst, memory2 mem)
6986 %{
6987   match(Set dst (ConvI2L (LoadS mem)));
6988   predicate(!needs_acquiring_load(n->in(1)));
6989 
6990   ins_cost(4 * INSN_COST);
6991   format %{ "ldrsh  $dst, $mem\t# short" %}
6992 
6993   ins_encode(aarch64_enc_ldrsh(dst, mem));
6994 
6995   ins_pipe(iload_reg_mem);
6996 %}
6997 
6998 // Load Char (16 bit unsigned)
6999 instruct loadUS(iRegINoSp dst, memory2 mem)
7000 %{
7001   match(Set dst (LoadUS mem));
7002   predicate(!needs_acquiring_load(n));
7003 
7004   ins_cost(4 * INSN_COST);
7005   format %{ "ldrh  $dst, $mem\t# short" %}
7006 
7007   ins_encode(aarch64_enc_ldrh(dst, mem));
7008 
7009   ins_pipe(iload_reg_mem);
7010 %}
7011 
7012 // Load Short/Char (16 bit unsigned) into long
7013 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
7014 %{
7015   match(Set dst (ConvI2L (LoadUS mem)));
7016   predicate(!needs_acquiring_load(n->in(1)));
7017 
7018   ins_cost(4 * INSN_COST);
7019   format %{ "ldrh  $dst, $mem\t# short" %}
7020 
7021   ins_encode(aarch64_enc_ldrh(dst, mem));
7022 
7023   ins_pipe(iload_reg_mem);
7024 %}
7025 
7026 // Load Integer (32 bit signed)
7027 instruct loadI(iRegINoSp dst, memory4 mem)
7028 %{
7029   match(Set dst (LoadI mem));
7030   predicate(!needs_acquiring_load(n));
7031 
7032   ins_cost(4 * INSN_COST);
7033   format %{ "ldrw  $dst, $mem\t# int" %}
7034 
7035   ins_encode(aarch64_enc_ldrw(dst, mem));
7036 
7037   ins_pipe(iload_reg_mem);
7038 %}
7039 
7040 // Load Integer (32 bit signed) into long
7041 instruct loadI2L(iRegLNoSp dst, memory4 mem)
7042 %{
7043   match(Set dst (ConvI2L (LoadI mem)));
7044   predicate(!needs_acquiring_load(n->in(1)));
7045 
7046   ins_cost(4 * INSN_COST);
7047   format %{ "ldrsw  $dst, $mem\t# int" %}
7048 
7049   ins_encode(aarch64_enc_ldrsw(dst, mem));
7050 
7051   ins_pipe(iload_reg_mem);
7052 %}
7053 
7054 // Load Integer (32 bit unsigned) into long
7055 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
7056 %{
7057   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7058   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7059 
7060   ins_cost(4 * INSN_COST);
7061   format %{ "ldrw  $dst, $mem\t# int" %}
7062 
7063   ins_encode(aarch64_enc_ldrw(dst, mem));
7064 
7065   ins_pipe(iload_reg_mem);
7066 %}
7067 
7068 // Load Long (64 bit signed)
7069 instruct loadL(iRegLNoSp dst, memory8 mem)
7070 %{
7071   match(Set dst (LoadL mem));
7072   predicate(!needs_acquiring_load(n));
7073 
7074   ins_cost(4 * INSN_COST);
7075   format %{ "ldr  $dst, $mem\t# int" %}
7076 
7077   ins_encode(aarch64_enc_ldr(dst, mem));
7078 
7079   ins_pipe(iload_reg_mem);
7080 %}
7081 
7082 // Load Range
7083 instruct loadRange(iRegINoSp dst, memory4 mem)
7084 %{
7085   match(Set dst (LoadRange mem));
7086 
7087   ins_cost(4 * INSN_COST);
7088   format %{ "ldrw  $dst, $mem\t# range" %}
7089 
7090   ins_encode(aarch64_enc_ldrw(dst, mem));
7091 
7092   ins_pipe(iload_reg_mem);
7093 %}
7094 
7095 // Load Pointer
7096 instruct loadP(iRegPNoSp dst, memory8 mem)
7097 %{
7098   match(Set dst (LoadP mem));
7099   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
7100 
7101   ins_cost(4 * INSN_COST);
7102   format %{ "ldr  $dst, $mem\t# ptr" %}
7103 
7104   ins_encode(aarch64_enc_ldr(dst, mem));
7105 
7106   ins_pipe(iload_reg_mem);
7107 %}
7108 
7109 // Load Compressed Pointer
7110 instruct loadN(iRegNNoSp dst, memory4 mem)
7111 %{
7112   match(Set dst (LoadN mem));
7113   predicate(!needs_acquiring_load(n));
7114 
7115   ins_cost(4 * INSN_COST);
7116   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7117 
7118   ins_encode(aarch64_enc_ldrw(dst, mem));
7119 
7120   ins_pipe(iload_reg_mem);
7121 %}
7122 
7123 // Load Klass Pointer
7124 instruct loadKlass(iRegPNoSp dst, memory8 mem)
7125 %{
7126   match(Set dst (LoadKlass mem));
7127   predicate(!needs_acquiring_load(n));
7128 
7129   ins_cost(4 * INSN_COST);
7130   format %{ "ldr  $dst, $mem\t# class" %}
7131 
7132   ins_encode(aarch64_enc_ldr(dst, mem));
7133 
7134   ins_pipe(iload_reg_mem);
7135 %}
7136 
7137 // Load Narrow Klass Pointer
7138 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
7139 %{
7140   match(Set dst (LoadNKlass mem));
7141   predicate(!needs_acquiring_load(n));
7142 
7143   ins_cost(4 * INSN_COST);
7144   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7145 
7146   ins_encode(aarch64_enc_ldrw(dst, mem));
7147 
7148   ins_pipe(iload_reg_mem);
7149 %}
7150 
7151 // Load Float
7152 instruct loadF(vRegF dst, memory4 mem)
7153 %{
7154   match(Set dst (LoadF mem));
7155   predicate(!needs_acquiring_load(n));
7156 
7157   ins_cost(4 * INSN_COST);
7158   format %{ "ldrs  $dst, $mem\t# float" %}
7159 
7160   ins_encode( aarch64_enc_ldrs(dst, mem) );
7161 
7162   ins_pipe(pipe_class_memory);
7163 %}
7164 
7165 // Load Double
7166 instruct loadD(vRegD dst, memory8 mem)
7167 %{
7168   match(Set dst (LoadD mem));
7169   predicate(!needs_acquiring_load(n));
7170 
7171   ins_cost(4 * INSN_COST);
7172   format %{ "ldrd  $dst, $mem\t# double" %}
7173 
7174   ins_encode( aarch64_enc_ldrd(dst, mem) );
7175 
7176   ins_pipe(pipe_class_memory);
7177 %}
7178 
7179 
7180 // Load Int Constant
7181 instruct loadConI(iRegINoSp dst, immI src)
7182 %{
7183   match(Set dst src);
7184 
7185   ins_cost(INSN_COST);
7186   format %{ "mov $dst, $src\t# int" %}
7187 
7188   ins_encode( aarch64_enc_movw_imm(dst, src) );
7189 
7190   ins_pipe(ialu_imm);
7191 %}
7192 
7193 // Load Long Constant
7194 instruct loadConL(iRegLNoSp dst, immL src)
7195 %{
7196   match(Set dst src);
7197 
7198   ins_cost(INSN_COST);
7199   format %{ "mov $dst, $src\t# long" %}
7200 
7201   ins_encode( aarch64_enc_mov_imm(dst, src) );
7202 
7203   ins_pipe(ialu_imm);
7204 %}
7205 
7206 // Load Pointer Constant
7207 
7208 instruct loadConP(iRegPNoSp dst, immP con)
7209 %{
7210   match(Set dst con);
7211 
7212   ins_cost(INSN_COST * 4);
7213   format %{
7214     "mov  $dst, $con\t# ptr\n\t"
7215   %}
7216 
7217   ins_encode(aarch64_enc_mov_p(dst, con));
7218 
7219   ins_pipe(ialu_imm);
7220 %}
7221 
7222 // Load Null Pointer Constant
7223 
7224 instruct loadConP0(iRegPNoSp dst, immP0 con)
7225 %{
7226   match(Set dst con);
7227 
7228   ins_cost(INSN_COST);
7229   format %{ "mov  $dst, $con\t# NULL ptr" %}
7230 
7231   ins_encode(aarch64_enc_mov_p0(dst, con));
7232 
7233   ins_pipe(ialu_imm);
7234 %}
7235 
7236 // Load Pointer Constant One
7237 
7238 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7239 %{
7240   match(Set dst con);
7241 
7242   ins_cost(INSN_COST);
7243   format %{ "mov  $dst, $con\t# NULL ptr" %}
7244 
7245   ins_encode(aarch64_enc_mov_p1(dst, con));
7246 
7247   ins_pipe(ialu_imm);
7248 %}
7249 
7250 // Load Byte Map Base Constant
7251 
7252 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7253 %{
7254   match(Set dst con);
7255 
7256   ins_cost(INSN_COST);
7257   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7258 
7259   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7260 
7261   ins_pipe(ialu_imm);
7262 %}
7263 
7264 // Load Narrow Pointer Constant
7265 
7266 instruct loadConN(iRegNNoSp dst, immN con)
7267 %{
7268   match(Set dst con);
7269 
7270   ins_cost(INSN_COST * 4);
7271   format %{ "mov  $dst, $con\t# compressed ptr" %}
7272 
7273   ins_encode(aarch64_enc_mov_n(dst, con));
7274 
7275   ins_pipe(ialu_imm);
7276 %}
7277 
7278 // Load Narrow Null Pointer Constant
7279 
7280 instruct loadConN0(iRegNNoSp dst, immN0 con)
7281 %{
7282   match(Set dst con);
7283 
7284   ins_cost(INSN_COST);
7285   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7286 
7287   ins_encode(aarch64_enc_mov_n0(dst, con));
7288 
7289   ins_pipe(ialu_imm);
7290 %}
7291 
7292 // Load Narrow Klass Constant
7293 
7294 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7295 %{
7296   match(Set dst con);
7297 
7298   ins_cost(INSN_COST);
7299   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7300 
7301   ins_encode(aarch64_enc_mov_nk(dst, con));
7302 
7303   ins_pipe(ialu_imm);
7304 %}
7305 
7306 // Load Packed Float Constant
7307 
7308 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7309   match(Set dst con);
7310   ins_cost(INSN_COST * 4);
7311   format %{ "fmovs  $dst, $con"%}
7312   ins_encode %{
7313     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7314   %}
7315 
7316   ins_pipe(fp_imm_s);
7317 %}
7318 
7319 // Load Float Constant
7320 
7321 instruct loadConF(vRegF dst, immF con) %{
7322   match(Set dst con);
7323 
7324   ins_cost(INSN_COST * 4);
7325 
7326   format %{
7327     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7328   %}
7329 
7330   ins_encode %{
7331     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7332   %}
7333 
7334   ins_pipe(fp_load_constant_s);
7335 %}
7336 
7337 // Load Packed Double Constant
7338 
7339 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7340   match(Set dst con);
7341   ins_cost(INSN_COST);
7342   format %{ "fmovd  $dst, $con"%}
7343   ins_encode %{
7344     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7345   %}
7346 
7347   ins_pipe(fp_imm_d);
7348 %}
7349 
7350 // Load Double Constant
7351 
7352 instruct loadConD(vRegD dst, immD con) %{
7353   match(Set dst con);
7354 
7355   ins_cost(INSN_COST * 5);
7356   format %{
7357     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7358   %}
7359 
7360   ins_encode %{
7361     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7362   %}
7363 
7364   ins_pipe(fp_load_constant_d);
7365 %}
7366 
7367 // Store Instructions
7368 
7369 // Store CMS card-mark Immediate
7370 instruct storeimmCM0(immI0 zero, memory1 mem)
7371 %{
7372   match(Set mem (StoreCM mem zero));
7373 
7374   ins_cost(INSN_COST);
7375   format %{ "storestore (elided)\n\t"
7376             "strb zr, $mem\t# byte" %}
7377 
7378   ins_encode(aarch64_enc_strb0(mem));
7379 
7380   ins_pipe(istore_mem);
7381 %}
7382 
7383 // Store CMS card-mark Immediate with intervening StoreStore
7384 // needed when using CMS with no conditional card marking
7385 instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
7386 %{
7387   match(Set mem (StoreCM mem zero));
7388 
7389   ins_cost(INSN_COST * 2);
7390   format %{ "storestore\n\t"
7391             "dmb ishst"
7392             "\n\tstrb zr, $mem\t# byte" %}
7393 
7394   ins_encode(aarch64_enc_strb0_ordered(mem));
7395 
7396   ins_pipe(istore_mem);
7397 %}
7398 
7399 // Store Byte
7400 instruct storeB(iRegIorL2I src, memory1 mem)
7401 %{
7402   match(Set mem (StoreB mem src));
7403   predicate(!needs_releasing_store(n));
7404 
7405   ins_cost(INSN_COST);
7406   format %{ "strb  $src, $mem\t# byte" %}
7407 
7408   ins_encode(aarch64_enc_strb(src, mem));
7409 
7410   ins_pipe(istore_reg_mem);
7411 %}
7412 
7413 
7414 instruct storeimmB0(immI0 zero, memory1 mem)
7415 %{
7416   match(Set mem (StoreB mem zero));
7417   predicate(!needs_releasing_store(n));
7418 
7419   ins_cost(INSN_COST);
7420   format %{ "strb rscractch2, $mem\t# byte" %}
7421 
7422   ins_encode(aarch64_enc_strb0(mem));
7423 
7424   ins_pipe(istore_mem);
7425 %}
7426 
7427 // Store Char/Short
7428 instruct storeC(iRegIorL2I src, memory2 mem)
7429 %{
7430   match(Set mem (StoreC mem src));
7431   predicate(!needs_releasing_store(n));
7432 
7433   ins_cost(INSN_COST);
7434   format %{ "strh  $src, $mem\t# short" %}
7435 
7436   ins_encode(aarch64_enc_strh(src, mem));
7437 
7438   ins_pipe(istore_reg_mem);
7439 %}
7440 
7441 instruct storeimmC0(immI0 zero, memory2 mem)
7442 %{
7443   match(Set mem (StoreC mem zero));
7444   predicate(!needs_releasing_store(n));
7445 
7446   ins_cost(INSN_COST);
7447   format %{ "strh  zr, $mem\t# short" %}
7448 
7449   ins_encode(aarch64_enc_strh0(mem));
7450 
7451   ins_pipe(istore_mem);
7452 %}
7453 
7454 // Store Integer
7455 
7456 instruct storeI(iRegIorL2I src, memory4 mem)
7457 %{
7458   match(Set mem(StoreI mem src));
7459   predicate(!needs_releasing_store(n));
7460 
7461   ins_cost(INSN_COST);
7462   format %{ "strw  $src, $mem\t# int" %}
7463 
7464   ins_encode(aarch64_enc_strw(src, mem));
7465 
7466   ins_pipe(istore_reg_mem);
7467 %}
7468 
7469 instruct storeimmI0(immI0 zero, memory4 mem)
7470 %{
7471   match(Set mem(StoreI mem zero));
7472   predicate(!needs_releasing_store(n));
7473 
7474   ins_cost(INSN_COST);
7475   format %{ "strw  zr, $mem\t# int" %}
7476 
7477   ins_encode(aarch64_enc_strw0(mem));
7478 
7479   ins_pipe(istore_mem);
7480 %}
7481 
7482 // Store Long (64 bit signed)
7483 instruct storeL(iRegL src, memory8 mem)
7484 %{
7485   match(Set mem (StoreL mem src));
7486   predicate(!needs_releasing_store(n));
7487 
7488   ins_cost(INSN_COST);
7489   format %{ "str  $src, $mem\t# int" %}
7490 
7491   ins_encode(aarch64_enc_str(src, mem));
7492 
7493   ins_pipe(istore_reg_mem);
7494 %}
7495 
7496 // Store Long (64 bit signed)
7497 instruct storeimmL0(immL0 zero, memory8 mem)
7498 %{
7499   match(Set mem (StoreL mem zero));
7500   predicate(!needs_releasing_store(n));
7501 
7502   ins_cost(INSN_COST);
7503   format %{ "str  zr, $mem\t# int" %}
7504 
7505   ins_encode(aarch64_enc_str0(mem));
7506 
7507   ins_pipe(istore_mem);
7508 %}
7509 
7510 // Store Pointer
7511 instruct storeP(iRegP src, memory8 mem)
7512 %{
7513   match(Set mem (StoreP mem src));
7514   predicate(!needs_releasing_store(n));
7515 
7516   ins_cost(INSN_COST);
7517   format %{ "str  $src, $mem\t# ptr" %}
7518 
7519   ins_encode(aarch64_enc_str(src, mem));
7520 
7521   ins_pipe(istore_reg_mem);
7522 %}
7523 
7524 // Store Pointer
7525 instruct storeimmP0(immP0 zero, memory8 mem)
7526 %{
7527   match(Set mem (StoreP mem zero));
7528   predicate(!needs_releasing_store(n));
7529 
7530   ins_cost(INSN_COST);
7531   format %{ "str zr, $mem\t# ptr" %}
7532 
7533   ins_encode(aarch64_enc_str0(mem));
7534 
7535   ins_pipe(istore_mem);
7536 %}
7537 
7538 // Store Compressed Pointer
7539 instruct storeN(iRegN src, memory4 mem)
7540 %{
7541   match(Set mem (StoreN mem src));
7542   predicate(!needs_releasing_store(n));
7543 
7544   ins_cost(INSN_COST);
7545   format %{ "strw  $src, $mem\t# compressed ptr" %}
7546 
7547   ins_encode(aarch64_enc_strw(src, mem));
7548 
7549   ins_pipe(istore_reg_mem);
7550 %}
7551 
7552 instruct storeImmN0(immN0 zero, memory4 mem)
7553 %{
7554   match(Set mem (StoreN mem zero));
7555   predicate(!needs_releasing_store(n));
7556 
7557   ins_cost(INSN_COST);
7558   format %{ "strw  zr, $mem\t# compressed ptr" %}
7559 
7560   ins_encode(aarch64_enc_strw0(mem));
7561 
7562   ins_pipe(istore_mem);
7563 %}
7564 
7565 // Store Float
7566 instruct storeF(vRegF src, memory4 mem)
7567 %{
7568   match(Set mem (StoreF mem src));
7569   predicate(!needs_releasing_store(n));
7570 
7571   ins_cost(INSN_COST);
7572   format %{ "strs  $src, $mem\t# float" %}
7573 
7574   ins_encode( aarch64_enc_strs(src, mem) );
7575 
7576   ins_pipe(pipe_class_memory);
7577 %}
7578 
7579 // TODO
7580 // implement storeImmF0 and storeFImmPacked
7581 
7582 // Store Double
7583 instruct storeD(vRegD src, memory8 mem)
7584 %{
7585   match(Set mem (StoreD mem src));
7586   predicate(!needs_releasing_store(n));
7587 
7588   ins_cost(INSN_COST);
7589   format %{ "strd  $src, $mem\t# double" %}
7590 
7591   ins_encode( aarch64_enc_strd(src, mem) );
7592 
7593   ins_pipe(pipe_class_memory);
7594 %}
7595 
7596 // Store Compressed Klass Pointer
7597 instruct storeNKlass(iRegN src, memory4 mem)
7598 %{
7599   predicate(!needs_releasing_store(n));
7600   match(Set mem (StoreNKlass mem src));
7601 
7602   ins_cost(INSN_COST);
7603   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7604 
7605   ins_encode(aarch64_enc_strw(src, mem));
7606 
7607   ins_pipe(istore_reg_mem);
7608 %}
7609 
7610 // TODO
7611 // implement storeImmD0 and storeDImmPacked
7612 
7613 // prefetch instructions
7614 // Must be safe to execute with invalid address (cannot fault).
7615 
7616 instruct prefetchalloc( memory8 mem ) %{
7617   match(PrefetchAllocation mem);
7618 
7619   ins_cost(INSN_COST);
7620   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7621 
7622   ins_encode( aarch64_enc_prefetchw(mem) );
7623 
7624   ins_pipe(iload_prefetch);
7625 %}
7626 
7627 //  ---------------- volatile loads and stores ----------------
7628 
7629 // Load Byte (8 bit signed)
7630 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7631 %{
7632   match(Set dst (LoadB mem));
7633 
7634   ins_cost(VOLATILE_REF_COST);
7635   format %{ "ldarsb  $dst, $mem\t# byte" %}
7636 
7637   ins_encode(aarch64_enc_ldarsb(dst, mem));
7638 
7639   ins_pipe(pipe_serial);
7640 %}
7641 
7642 // Load Byte (8 bit signed) into long
7643 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7644 %{
7645   match(Set dst (ConvI2L (LoadB mem)));
7646 
7647   ins_cost(VOLATILE_REF_COST);
7648   format %{ "ldarsb  $dst, $mem\t# byte" %}
7649 
7650   ins_encode(aarch64_enc_ldarsb(dst, mem));
7651 
7652   ins_pipe(pipe_serial);
7653 %}
7654 
7655 // Load Byte (8 bit unsigned)
7656 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7657 %{
7658   match(Set dst (LoadUB mem));
7659 
7660   ins_cost(VOLATILE_REF_COST);
7661   format %{ "ldarb  $dst, $mem\t# byte" %}
7662 
7663   ins_encode(aarch64_enc_ldarb(dst, mem));
7664 
7665   ins_pipe(pipe_serial);
7666 %}
7667 
7668 // Load Byte (8 bit unsigned) into long
7669 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7670 %{
7671   match(Set dst (ConvI2L (LoadUB mem)));
7672 
7673   ins_cost(VOLATILE_REF_COST);
7674   format %{ "ldarb  $dst, $mem\t# byte" %}
7675 
7676   ins_encode(aarch64_enc_ldarb(dst, mem));
7677 
7678   ins_pipe(pipe_serial);
7679 %}
7680 
7681 // Load Short (16 bit signed)
7682 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7683 %{
7684   match(Set dst (LoadS mem));
7685 
7686   ins_cost(VOLATILE_REF_COST);
7687   format %{ "ldarshw  $dst, $mem\t# short" %}
7688 
7689   ins_encode(aarch64_enc_ldarshw(dst, mem));
7690 
7691   ins_pipe(pipe_serial);
7692 %}
7693 
7694 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7695 %{
7696   match(Set dst (LoadUS mem));
7697 
7698   ins_cost(VOLATILE_REF_COST);
7699   format %{ "ldarhw  $dst, $mem\t# short" %}
7700 
7701   ins_encode(aarch64_enc_ldarhw(dst, mem));
7702 
7703   ins_pipe(pipe_serial);
7704 %}
7705 
7706 // Load Short/Char (16 bit unsigned) into long
7707 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7708 %{
7709   match(Set dst (ConvI2L (LoadUS mem)));
7710 
7711   ins_cost(VOLATILE_REF_COST);
7712   format %{ "ldarh  $dst, $mem\t# short" %}
7713 
7714   ins_encode(aarch64_enc_ldarh(dst, mem));
7715 
7716   ins_pipe(pipe_serial);
7717 %}
7718 
7719 // Load Short/Char (16 bit signed) into long
7720 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7721 %{
7722   match(Set dst (ConvI2L (LoadS mem)));
7723 
7724   ins_cost(VOLATILE_REF_COST);
7725   format %{ "ldarh  $dst, $mem\t# short" %}
7726 
7727   ins_encode(aarch64_enc_ldarsh(dst, mem));
7728 
7729   ins_pipe(pipe_serial);
7730 %}
7731 
7732 // Load Integer (32 bit signed)
7733 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7734 %{
7735   match(Set dst (LoadI mem));
7736 
7737   ins_cost(VOLATILE_REF_COST);
7738   format %{ "ldarw  $dst, $mem\t# int" %}
7739 
7740   ins_encode(aarch64_enc_ldarw(dst, mem));
7741 
7742   ins_pipe(pipe_serial);
7743 %}
7744 
7745 // Load Integer (32 bit unsigned) into long
7746 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7747 %{
7748   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7749 
7750   ins_cost(VOLATILE_REF_COST);
7751   format %{ "ldarw  $dst, $mem\t# int" %}
7752 
7753   ins_encode(aarch64_enc_ldarw(dst, mem));
7754 
7755   ins_pipe(pipe_serial);
7756 %}
7757 
7758 // Load Long (64 bit signed)
7759 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7760 %{
7761   match(Set dst (LoadL mem));
7762 
7763   ins_cost(VOLATILE_REF_COST);
7764   format %{ "ldar  $dst, $mem\t# int" %}
7765 
7766   ins_encode(aarch64_enc_ldar(dst, mem));
7767 
7768   ins_pipe(pipe_serial);
7769 %}
7770 
7771 // Load Pointer
7772 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7773 %{
7774   match(Set dst (LoadP mem));
7775   predicate(n->as_Load()->barrier_data() == 0);
7776 
7777   ins_cost(VOLATILE_REF_COST);
7778   format %{ "ldar  $dst, $mem\t# ptr" %}
7779 
7780   ins_encode(aarch64_enc_ldar(dst, mem));
7781 
7782   ins_pipe(pipe_serial);
7783 %}
7784 
7785 // Load Compressed Pointer
7786 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7787 %{
7788   match(Set dst (LoadN mem));
7789 
7790   ins_cost(VOLATILE_REF_COST);
7791   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7792 
7793   ins_encode(aarch64_enc_ldarw(dst, mem));
7794 
7795   ins_pipe(pipe_serial);
7796 %}
7797 
7798 // Load Float
7799 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7800 %{
7801   match(Set dst (LoadF mem));
7802 
7803   ins_cost(VOLATILE_REF_COST);
7804   format %{ "ldars  $dst, $mem\t# float" %}
7805 
7806   ins_encode( aarch64_enc_fldars(dst, mem) );
7807 
7808   ins_pipe(pipe_serial);
7809 %}
7810 
7811 // Load Double
7812 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7813 %{
7814   match(Set dst (LoadD mem));
7815 
7816   ins_cost(VOLATILE_REF_COST);
7817   format %{ "ldard  $dst, $mem\t# double" %}
7818 
7819   ins_encode( aarch64_enc_fldard(dst, mem) );
7820 
7821   ins_pipe(pipe_serial);
7822 %}
7823 
7824 // Store Byte
7825 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7826 %{
7827   match(Set mem (StoreB mem src));
7828 
7829   ins_cost(VOLATILE_REF_COST);
7830   format %{ "stlrb  $src, $mem\t# byte" %}
7831 
7832   ins_encode(aarch64_enc_stlrb(src, mem));
7833 
7834   ins_pipe(pipe_class_memory);
7835 %}
7836 
7837 // Store Char/Short
7838 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7839 %{
7840   match(Set mem (StoreC mem src));
7841 
7842   ins_cost(VOLATILE_REF_COST);
7843   format %{ "stlrh  $src, $mem\t# short" %}
7844 
7845   ins_encode(aarch64_enc_stlrh(src, mem));
7846 
7847   ins_pipe(pipe_class_memory);
7848 %}
7849 
7850 // Store Integer
7851 
7852 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7853 %{
7854   match(Set mem(StoreI mem src));
7855 
7856   ins_cost(VOLATILE_REF_COST);
7857   format %{ "stlrw  $src, $mem\t# int" %}
7858 
7859   ins_encode(aarch64_enc_stlrw(src, mem));
7860 
7861   ins_pipe(pipe_class_memory);
7862 %}
7863 
7864 // Store Long (64 bit signed)
7865 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7866 %{
7867   match(Set mem (StoreL mem src));
7868 
7869   ins_cost(VOLATILE_REF_COST);
7870   format %{ "stlr  $src, $mem\t# int" %}
7871 
7872   ins_encode(aarch64_enc_stlr(src, mem));
7873 
7874   ins_pipe(pipe_class_memory);
7875 %}
7876 
7877 // Store Pointer
7878 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7879 %{
7880   match(Set mem (StoreP mem src));
7881 
7882   ins_cost(VOLATILE_REF_COST);
7883   format %{ "stlr  $src, $mem\t# ptr" %}
7884 
7885   ins_encode(aarch64_enc_stlr(src, mem));
7886 
7887   ins_pipe(pipe_class_memory);
7888 %}
7889 
7890 // Store Compressed Pointer
7891 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7892 %{
7893   match(Set mem (StoreN mem src));
7894 
7895   ins_cost(VOLATILE_REF_COST);
7896   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7897 
7898   ins_encode(aarch64_enc_stlrw(src, mem));
7899 
7900   ins_pipe(pipe_class_memory);
7901 %}
7902 
7903 // Store Float
7904 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7905 %{
7906   match(Set mem (StoreF mem src));
7907 
7908   ins_cost(VOLATILE_REF_COST);
7909   format %{ "stlrs  $src, $mem\t# float" %}
7910 
7911   ins_encode( aarch64_enc_fstlrs(src, mem) );
7912 
7913   ins_pipe(pipe_class_memory);
7914 %}
7915 
7916 // TODO
7917 // implement storeImmF0 and storeFImmPacked
7918 
7919 // Store Double
7920 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7921 %{
7922   match(Set mem (StoreD mem src));
7923 
7924   ins_cost(VOLATILE_REF_COST);
7925   format %{ "stlrd  $src, $mem\t# double" %}
7926 
7927   ins_encode( aarch64_enc_fstlrd(src, mem) );
7928 
7929   ins_pipe(pipe_class_memory);
7930 %}
7931 
7932 //  ---------------- end of volatile loads and stores ----------------
7933 
7934 instruct cacheWB(indirect addr)
7935 %{
7936   predicate(VM_Version::supports_data_cache_line_flush());
7937   match(CacheWB addr);
7938 
7939   ins_cost(100);
7940   format %{"cache wb $addr" %}
7941   ins_encode %{
7942     assert($addr->index_position() < 0, "should be");
7943     assert($addr$$disp == 0, "should be");
7944     __ cache_wb(Address($addr$$base$$Register, 0));
7945   %}
7946   ins_pipe(pipe_slow); // XXX
7947 %}
7948 
7949 instruct cacheWBPreSync()
7950 %{
7951   predicate(VM_Version::supports_data_cache_line_flush());
7952   match(CacheWBPreSync);
7953 
7954   ins_cost(100);
7955   format %{"cache wb presync" %}
7956   ins_encode %{
7957     __ cache_wbsync(true);
7958   %}
7959   ins_pipe(pipe_slow); // XXX
7960 %}
7961 
7962 instruct cacheWBPostSync()
7963 %{
7964   predicate(VM_Version::supports_data_cache_line_flush());
7965   match(CacheWBPostSync);
7966 
7967   ins_cost(100);
7968   format %{"cache wb postsync" %}
7969   ins_encode %{
7970     __ cache_wbsync(false);
7971   %}
7972   ins_pipe(pipe_slow); // XXX
7973 %}
7974 
7975 // ============================================================================
7976 // BSWAP Instructions
7977 
7978 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7979   match(Set dst (ReverseBytesI src));
7980 
7981   ins_cost(INSN_COST);
7982   format %{ "revw  $dst, $src" %}
7983 
7984   ins_encode %{
7985     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7986   %}
7987 
7988   ins_pipe(ialu_reg);
7989 %}
7990 
7991 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7992   match(Set dst (ReverseBytesL src));
7993 
7994   ins_cost(INSN_COST);
7995   format %{ "rev  $dst, $src" %}
7996 
7997   ins_encode %{
7998     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7999   %}
8000 
8001   ins_pipe(ialu_reg);
8002 %}
8003 
8004 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8005   match(Set dst (ReverseBytesUS src));
8006 
8007   ins_cost(INSN_COST);
8008   format %{ "rev16w  $dst, $src" %}
8009 
8010   ins_encode %{
8011     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8012   %}
8013 
8014   ins_pipe(ialu_reg);
8015 %}
8016 
8017 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8018   match(Set dst (ReverseBytesS src));
8019 
8020   ins_cost(INSN_COST);
8021   format %{ "rev16w  $dst, $src\n\t"
8022             "sbfmw $dst, $dst, #0, #15" %}
8023 
8024   ins_encode %{
8025     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8026     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8027   %}
8028 
8029   ins_pipe(ialu_reg);
8030 %}
8031 
8032 // ============================================================================
8033 // Zero Count Instructions
8034 
8035 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8036   match(Set dst (CountLeadingZerosI src));
8037 
8038   ins_cost(INSN_COST);
8039   format %{ "clzw  $dst, $src" %}
8040   ins_encode %{
8041     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8042   %}
8043 
8044   ins_pipe(ialu_reg);
8045 %}
8046 
8047 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8048   match(Set dst (CountLeadingZerosL src));
8049 
8050   ins_cost(INSN_COST);
8051   format %{ "clz   $dst, $src" %}
8052   ins_encode %{
8053     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8054   %}
8055 
8056   ins_pipe(ialu_reg);
8057 %}
8058 
8059 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8060   match(Set dst (CountTrailingZerosI src));
8061 
8062   ins_cost(INSN_COST * 2);
8063   format %{ "rbitw  $dst, $src\n\t"
8064             "clzw   $dst, $dst" %}
8065   ins_encode %{
8066     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8067     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8068   %}
8069 
8070   ins_pipe(ialu_reg);
8071 %}
8072 
8073 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8074   match(Set dst (CountTrailingZerosL src));
8075 
8076   ins_cost(INSN_COST * 2);
8077   format %{ "rbit   $dst, $src\n\t"
8078             "clz    $dst, $dst" %}
8079   ins_encode %{
8080     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8081     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8082   %}
8083 
8084   ins_pipe(ialu_reg);
8085 %}
8086 
8087 //---------- Population Count Instructions -------------------------------------
8088 //
8089 
8090 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8091   predicate(UsePopCountInstruction);
8092   match(Set dst (PopCountI src));
8093   effect(TEMP tmp);
8094   ins_cost(INSN_COST * 13);
8095 
8096   format %{ "movw   $src, $src\n\t"
8097             "mov    $tmp, $src\t# vector (1D)\n\t"
8098             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8099             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8100             "mov    $dst, $tmp\t# vector (1D)" %}
8101   ins_encode %{
8102     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8103     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8104     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8105     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8106     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8107   %}
8108 
8109   ins_pipe(pipe_class_default);
8110 %}
8111 
8112 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
8113   predicate(UsePopCountInstruction);
8114   match(Set dst (PopCountI (LoadI mem)));
8115   effect(TEMP tmp);
8116   ins_cost(INSN_COST * 13);
8117 
8118   format %{ "ldrs   $tmp, $mem\n\t"
8119             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8120             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8121             "mov    $dst, $tmp\t# vector (1D)" %}
8122   ins_encode %{
8123     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8124     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8125               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
8126     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8127     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8128     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8129   %}
8130 
8131   ins_pipe(pipe_class_default);
8132 %}
8133 
8134 // Note: Long.bitCount(long) returns an int.
8135 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8136   predicate(UsePopCountInstruction);
8137   match(Set dst (PopCountL src));
8138   effect(TEMP tmp);
8139   ins_cost(INSN_COST * 13);
8140 
8141   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8142             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8143             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8144             "mov    $dst, $tmp\t# vector (1D)" %}
8145   ins_encode %{
8146     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8147     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8148     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8149     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8150   %}
8151 
8152   ins_pipe(pipe_class_default);
8153 %}
8154 
8155 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
8156   predicate(UsePopCountInstruction);
8157   match(Set dst (PopCountL (LoadL mem)));
8158   effect(TEMP tmp);
8159   ins_cost(INSN_COST * 13);
8160 
8161   format %{ "ldrd   $tmp, $mem\n\t"
8162             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8163             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8164             "mov    $dst, $tmp\t# vector (1D)" %}
8165   ins_encode %{
8166     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8167     loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8168               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
8169     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8170     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8171     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8172   %}
8173 
8174   ins_pipe(pipe_class_default);
8175 %}
8176 
8177 // ============================================================================
8178 // MemBar Instruction
8179 
8180 instruct load_fence() %{
8181   match(LoadFence);
8182   ins_cost(VOLATILE_REF_COST);
8183 
8184   format %{ "load_fence" %}
8185 
8186   ins_encode %{
8187     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8188   %}
8189   ins_pipe(pipe_serial);
8190 %}
8191 
8192 instruct unnecessary_membar_acquire() %{
8193   predicate(unnecessary_acquire(n));
8194   match(MemBarAcquire);
8195   ins_cost(0);
8196 
8197   format %{ "membar_acquire (elided)" %}
8198 
8199   ins_encode %{
8200     __ block_comment("membar_acquire (elided)");
8201   %}
8202 
8203   ins_pipe(pipe_class_empty);
8204 %}
8205 
8206 instruct membar_acquire() %{
8207   match(MemBarAcquire);
8208   ins_cost(VOLATILE_REF_COST);
8209 
8210   format %{ "membar_acquire\n\t"
8211             "dmb ish" %}
8212 
8213   ins_encode %{
8214     __ block_comment("membar_acquire");
8215     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8216   %}
8217 
8218   ins_pipe(pipe_serial);
8219 %}
8220 
8221 
8222 instruct membar_acquire_lock() %{
8223   match(MemBarAcquireLock);
8224   ins_cost(VOLATILE_REF_COST);
8225 
8226   format %{ "membar_acquire_lock (elided)" %}
8227 
8228   ins_encode %{
8229     __ block_comment("membar_acquire_lock (elided)");
8230   %}
8231 
8232   ins_pipe(pipe_serial);
8233 %}
8234 
8235 instruct store_fence() %{
8236   match(StoreFence);
8237   ins_cost(VOLATILE_REF_COST);
8238 
8239   format %{ "store_fence" %}
8240 
8241   ins_encode %{
8242     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8243   %}
8244   ins_pipe(pipe_serial);
8245 %}
8246 
8247 instruct unnecessary_membar_release() %{
8248   predicate(unnecessary_release(n));
8249   match(MemBarRelease);
8250   ins_cost(0);
8251 
8252   format %{ "membar_release (elided)" %}
8253 
8254   ins_encode %{
8255     __ block_comment("membar_release (elided)");
8256   %}
8257   ins_pipe(pipe_serial);
8258 %}
8259 
8260 instruct membar_release() %{
8261   match(MemBarRelease);
8262   ins_cost(VOLATILE_REF_COST);
8263 
8264   format %{ "membar_release\n\t"
8265             "dmb ish" %}
8266 
8267   ins_encode %{
8268     __ block_comment("membar_release");
8269     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8270   %}
8271   ins_pipe(pipe_serial);
8272 %}
8273 
8274 instruct membar_storestore() %{
8275   match(MemBarStoreStore);
8276   ins_cost(VOLATILE_REF_COST);
8277 
8278   format %{ "MEMBAR-store-store" %}
8279 
8280   ins_encode %{
8281     __ membar(Assembler::StoreStore);
8282   %}
8283   ins_pipe(pipe_serial);
8284 %}
8285 
8286 instruct membar_release_lock() %{
8287   match(MemBarReleaseLock);
8288   ins_cost(VOLATILE_REF_COST);
8289 
8290   format %{ "membar_release_lock (elided)" %}
8291 
8292   ins_encode %{
8293     __ block_comment("membar_release_lock (elided)");
8294   %}
8295 
8296   ins_pipe(pipe_serial);
8297 %}
8298 
8299 instruct unnecessary_membar_volatile() %{
8300   predicate(unnecessary_volatile(n));
8301   match(MemBarVolatile);
8302   ins_cost(0);
8303 
8304   format %{ "membar_volatile (elided)" %}
8305 
8306   ins_encode %{
8307     __ block_comment("membar_volatile (elided)");
8308   %}
8309 
8310   ins_pipe(pipe_serial);
8311 %}
8312 
8313 instruct membar_volatile() %{
8314   match(MemBarVolatile);
8315   ins_cost(VOLATILE_REF_COST*100);
8316 
8317   format %{ "membar_volatile\n\t"
8318              "dmb ish"%}
8319 
8320   ins_encode %{
8321     __ block_comment("membar_volatile");
8322     __ membar(Assembler::StoreLoad);
8323   %}
8324 
8325   ins_pipe(pipe_serial);
8326 %}
8327 
8328 // ============================================================================
8329 // Cast/Convert Instructions
8330 
8331 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8332   match(Set dst (CastX2P src));
8333 
8334   ins_cost(INSN_COST);
8335   format %{ "mov $dst, $src\t# long -> ptr" %}
8336 
8337   ins_encode %{
8338     if ($dst$$reg != $src$$reg) {
8339       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8340     }
8341   %}
8342 
8343   ins_pipe(ialu_reg);
8344 %}
8345 
8346 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8347   match(Set dst (CastP2X src));
8348 
8349   ins_cost(INSN_COST);
8350   format %{ "mov $dst, $src\t# ptr -> long" %}
8351 
8352   ins_encode %{
8353     if ($dst$$reg != $src$$reg) {
8354       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8355     }
8356   %}
8357 
8358   ins_pipe(ialu_reg);
8359 %}
8360 
8361 // Convert oop into int for vectors alignment masking
8362 instruct convP2I(iRegINoSp dst, iRegP src) %{
8363   match(Set dst (ConvL2I (CastP2X src)));
8364 
8365   ins_cost(INSN_COST);
8366   format %{ "movw $dst, $src\t# ptr -> int" %}
8367   ins_encode %{
8368     __ movw($dst$$Register, $src$$Register);
8369   %}
8370 
8371   ins_pipe(ialu_reg);
8372 %}
8373 
8374 // Convert compressed oop into int for vectors alignment masking
8375 // in case of 32bit oops (heap < 4Gb).
8376 instruct convN2I(iRegINoSp dst, iRegN src)
8377 %{
8378   predicate(CompressedOops::shift() == 0);
8379   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8380 
8381   ins_cost(INSN_COST);
8382   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8383   ins_encode %{
8384     __ movw($dst$$Register, $src$$Register);
8385   %}
8386 
8387   ins_pipe(ialu_reg);
8388 %}
8389 
8390 
8391 // Convert oop pointer into compressed form
8392 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8393   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8394   match(Set dst (EncodeP src));
8395   effect(KILL cr);
8396   ins_cost(INSN_COST * 3);
8397   format %{ "encode_heap_oop $dst, $src" %}
8398   ins_encode %{
8399     Register s = $src$$Register;
8400     Register d = $dst$$Register;
8401     __ encode_heap_oop(d, s);
8402   %}
8403   ins_pipe(ialu_reg);
8404 %}
8405 
8406 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8407   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8408   match(Set dst (EncodeP src));
8409   ins_cost(INSN_COST * 3);
8410   format %{ "encode_heap_oop_not_null $dst, $src" %}
8411   ins_encode %{
8412     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8413   %}
8414   ins_pipe(ialu_reg);
8415 %}
8416 
8417 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8418   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8419             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8420   match(Set dst (DecodeN src));
8421   ins_cost(INSN_COST * 3);
8422   format %{ "decode_heap_oop $dst, $src" %}
8423   ins_encode %{
8424     Register s = $src$$Register;
8425     Register d = $dst$$Register;
8426     __ decode_heap_oop(d, s);
8427   %}
8428   ins_pipe(ialu_reg);
8429 %}
8430 
8431 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8432   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8433             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8434   match(Set dst (DecodeN src));
8435   ins_cost(INSN_COST * 3);
8436   format %{ "decode_heap_oop_not_null $dst, $src" %}
8437   ins_encode %{
8438     Register s = $src$$Register;
8439     Register d = $dst$$Register;
8440     __ decode_heap_oop_not_null(d, s);
8441   %}
8442   ins_pipe(ialu_reg);
8443 %}
8444 
8445 // n.b. AArch64 implementations of encode_klass_not_null and
8446 // decode_klass_not_null do not modify the flags register so, unlike
8447 // Intel, we don't kill CR as a side effect here
8448 
8449 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8450   match(Set dst (EncodePKlass src));
8451 
8452   ins_cost(INSN_COST * 3);
8453   format %{ "encode_klass_not_null $dst,$src" %}
8454 
8455   ins_encode %{
8456     Register src_reg = as_Register($src$$reg);
8457     Register dst_reg = as_Register($dst$$reg);
8458     __ encode_klass_not_null(dst_reg, src_reg);
8459   %}
8460 
8461    ins_pipe(ialu_reg);
8462 %}
8463 
8464 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8465   match(Set dst (DecodeNKlass src));
8466 
8467   ins_cost(INSN_COST * 3);
8468   format %{ "decode_klass_not_null $dst,$src" %}
8469 
8470   ins_encode %{
8471     Register src_reg = as_Register($src$$reg);
8472     Register dst_reg = as_Register($dst$$reg);
8473     if (dst_reg != src_reg) {
8474       __ decode_klass_not_null(dst_reg, src_reg);
8475     } else {
8476       __ decode_klass_not_null(dst_reg);
8477     }
8478   %}
8479 
8480    ins_pipe(ialu_reg);
8481 %}
8482 
8483 instruct checkCastPP(iRegPNoSp dst)
8484 %{
8485   match(Set dst (CheckCastPP dst));
8486 
8487   size(0);
8488   format %{ "# checkcastPP of $dst" %}
8489   ins_encode(/* empty encoding */);
8490   ins_pipe(pipe_class_empty);
8491 %}
8492 
8493 instruct castPP(iRegPNoSp dst)
8494 %{
8495   match(Set dst (CastPP dst));
8496 
8497   size(0);
8498   format %{ "# castPP of $dst" %}
8499   ins_encode(/* empty encoding */);
8500   ins_pipe(pipe_class_empty);
8501 %}
8502 
8503 instruct castII(iRegI dst)
8504 %{
8505   match(Set dst (CastII dst));
8506 
8507   size(0);
8508   format %{ "# castII of $dst" %}
8509   ins_encode(/* empty encoding */);
8510   ins_cost(0);
8511   ins_pipe(pipe_class_empty);
8512 %}
8513 
8514 // ============================================================================
8515 // Atomic operation instructions
8516 //
8517 // Intel and SPARC both implement Ideal Node LoadPLocked and
8518 // Store{PIL}Conditional instructions using a normal load for the
8519 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8520 //
8521 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8522 // pair to lock object allocations from Eden space when not using
8523 // TLABs.
8524 //
8525 // There does not appear to be a Load{IL}Locked Ideal Node and the
8526 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8527 // and to use StoreIConditional only for 32-bit and StoreLConditional
8528 // only for 64-bit.
8529 //
8530 // We implement LoadPLocked and StorePLocked instructions using,
8531 // respectively the AArch64 hw load-exclusive and store-conditional
8532 // instructions. Whereas we must implement each of
8533 // Store{IL}Conditional using a CAS which employs a pair of
8534 // instructions comprising a load-exclusive followed by a
8535 // store-conditional.
8536 
8537 
8538 // Locked-load (linked load) of the current heap-top
8539 // used when updating the eden heap top
8540 // implemented using ldaxr on AArch64
8541 
8542 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8543 %{
8544   match(Set dst (LoadPLocked mem));
8545 
8546   ins_cost(VOLATILE_REF_COST);
8547 
8548   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8549 
8550   ins_encode(aarch64_enc_ldaxr(dst, mem));
8551 
8552   ins_pipe(pipe_serial);
8553 %}
8554 
8555 // Conditional-store of the updated heap-top.
8556 // Used during allocation of the shared heap.
8557 // Sets flag (EQ) on success.
8558 // implemented using stlxr on AArch64.
8559 
8560 instruct storePConditional(memory8 heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8561 %{
8562   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8563 
8564   ins_cost(VOLATILE_REF_COST);
8565 
8566  // TODO
8567  // do we need to do a store-conditional release or can we just use a
8568  // plain store-conditional?
8569 
8570   format %{
8571     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8572     "cmpw rscratch1, zr\t# EQ on successful write"
8573   %}
8574 
8575   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8576 
8577   ins_pipe(pipe_serial);
8578 %}
8579 
8580 
8581 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8582 // when attempting to rebias a lock towards the current thread.  We
8583 // must use the acquire form of cmpxchg in order to guarantee acquire
8584 // semantics in this case.
8585 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8586 %{
8587   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8588 
8589   ins_cost(VOLATILE_REF_COST);
8590 
8591   format %{
8592     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8593     "cmpw rscratch1, zr\t# EQ on successful write"
8594   %}
8595 
8596   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8597 
8598   ins_pipe(pipe_slow);
8599 %}
8600 
8601 // storeIConditional also has acquire semantics, for no better reason
8602 // than matching storeLConditional.  At the time of writing this
8603 // comment storeIConditional was not used anywhere by AArch64.
8604 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8605 %{
8606   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8607 
8608   ins_cost(VOLATILE_REF_COST);
8609 
8610   format %{
8611     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8612     "cmpw rscratch1, zr\t# EQ on successful write"
8613   %}
8614 
8615   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8616 
8617   ins_pipe(pipe_slow);
8618 %}
8619 
8620 // standard CompareAndSwapX when we are using barriers
8621 // these have higher priority than the rules selected by a predicate
8622 
8623 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8624 // can't match them
8625 
8626 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8627 
8628   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8629   ins_cost(2 * VOLATILE_REF_COST);
8630 
8631   effect(KILL cr);
8632 
8633   format %{
8634     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8635     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8636   %}
8637 
8638   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8639             aarch64_enc_cset_eq(res));
8640 
8641   ins_pipe(pipe_slow);
8642 %}
8643 
8644 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8645 
8646   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8647   ins_cost(2 * VOLATILE_REF_COST);
8648 
8649   effect(KILL cr);
8650 
8651   format %{
8652     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8653     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8654   %}
8655 
8656   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8657             aarch64_enc_cset_eq(res));
8658 
8659   ins_pipe(pipe_slow);
8660 %}
8661 
8662 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8663 
8664   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8665   ins_cost(2 * VOLATILE_REF_COST);
8666 
8667   effect(KILL cr);
8668 
8669  format %{
8670     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8671     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8672  %}
8673 
8674  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8675             aarch64_enc_cset_eq(res));
8676 
8677   ins_pipe(pipe_slow);
8678 %}
8679 
8680 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8681 
8682   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8683   ins_cost(2 * VOLATILE_REF_COST);
8684 
8685   effect(KILL cr);
8686 
8687  format %{
8688     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8689     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8690  %}
8691 
8692  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8693             aarch64_enc_cset_eq(res));
8694 
8695   ins_pipe(pipe_slow);
8696 %}
8697 
8698 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8699 
8700   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8701   predicate(n->as_LoadStore()->barrier_data() == 0);
8702   ins_cost(2 * VOLATILE_REF_COST);
8703 
8704   effect(KILL cr);
8705 
8706  format %{
8707     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8708     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8709  %}
8710 
8711  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8712             aarch64_enc_cset_eq(res));
8713 
8714   ins_pipe(pipe_slow);
8715 %}
8716 
8717 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8718 
8719   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8720   ins_cost(2 * VOLATILE_REF_COST);
8721 
8722   effect(KILL cr);
8723 
8724  format %{
8725     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8726     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8727  %}
8728 
8729  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8730             aarch64_enc_cset_eq(res));
8731 
8732   ins_pipe(pipe_slow);
8733 %}
8734 
8735 // alternative CompareAndSwapX when we are eliding barriers
8736 
8737 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8738 
8739   predicate(needs_acquiring_load_exclusive(n));
8740   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8741   ins_cost(VOLATILE_REF_COST);
8742 
8743   effect(KILL cr);
8744 
8745   format %{
8746     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8747     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8748   %}
8749 
8750   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8751             aarch64_enc_cset_eq(res));
8752 
8753   ins_pipe(pipe_slow);
8754 %}
8755 
8756 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8757 
8758   predicate(needs_acquiring_load_exclusive(n));
8759   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8760   ins_cost(VOLATILE_REF_COST);
8761 
8762   effect(KILL cr);
8763 
8764   format %{
8765     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8766     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8767   %}
8768 
8769   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8770             aarch64_enc_cset_eq(res));
8771 
8772   ins_pipe(pipe_slow);
8773 %}
8774 
8775 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8776 
8777   predicate(needs_acquiring_load_exclusive(n));
8778   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8779   ins_cost(VOLATILE_REF_COST);
8780 
8781   effect(KILL cr);
8782 
8783  format %{
8784     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8785     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8786  %}
8787 
8788  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8789             aarch64_enc_cset_eq(res));
8790 
8791   ins_pipe(pipe_slow);
8792 %}
8793 
8794 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8795 
8796   predicate(needs_acquiring_load_exclusive(n));
8797   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8798   ins_cost(VOLATILE_REF_COST);
8799 
8800   effect(KILL cr);
8801 
8802  format %{
8803     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8804     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8805  %}
8806 
8807  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8808             aarch64_enc_cset_eq(res));
8809 
8810   ins_pipe(pipe_slow);
8811 %}
8812 
8813 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8814 
8815   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
8816   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8817   ins_cost(VOLATILE_REF_COST);
8818 
8819   effect(KILL cr);
8820 
8821  format %{
8822     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8823     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8824  %}
8825 
8826  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8827             aarch64_enc_cset_eq(res));
8828 
8829   ins_pipe(pipe_slow);
8830 %}
8831 
8832 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8833 
8834   predicate(needs_acquiring_load_exclusive(n));
8835   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8836   ins_cost(VOLATILE_REF_COST);
8837 
8838   effect(KILL cr);
8839 
8840  format %{
8841     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8842     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8843  %}
8844 
8845  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8846             aarch64_enc_cset_eq(res));
8847 
8848   ins_pipe(pipe_slow);
8849 %}
8850 
8851 
8852 // ---------------------------------------------------------------------
8853 
8854 
8855 // BEGIN This section of the file is automatically generated. Do not edit --------------
8856 
8857 // Sundry CAS operations.  Note that release is always true,
8858 // regardless of the memory ordering of the CAS.  This is because we
8859 // need the volatile case to be sequentially consistent but there is
8860 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8861 // can't check the type of memory ordering here, so we always emit a
8862 // STLXR.
8863 
8864 // This section is generated from aarch64_ad_cas.m4
8865 
8866 
8867 
8868 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8869   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8870   ins_cost(2 * VOLATILE_REF_COST);
8871   effect(TEMP_DEF res, KILL cr);
8872   format %{
8873     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8874   %}
8875   ins_encode %{
8876     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8877                Assembler::byte, /*acquire*/ false, /*release*/ true,
8878                /*weak*/ false, $res$$Register);
8879     __ sxtbw($res$$Register, $res$$Register);
8880   %}
8881   ins_pipe(pipe_slow);
8882 %}
8883 
8884 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8885   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8886   ins_cost(2 * VOLATILE_REF_COST);
8887   effect(TEMP_DEF res, KILL cr);
8888   format %{
8889     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8890   %}
8891   ins_encode %{
8892     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8893                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8894                /*weak*/ false, $res$$Register);
8895     __ sxthw($res$$Register, $res$$Register);
8896   %}
8897   ins_pipe(pipe_slow);
8898 %}
8899 
8900 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8901   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8902   ins_cost(2 * VOLATILE_REF_COST);
8903   effect(TEMP_DEF res, KILL cr);
8904   format %{
8905     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8906   %}
8907   ins_encode %{
8908     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8909                Assembler::word, /*acquire*/ false, /*release*/ true,
8910                /*weak*/ false, $res$$Register);
8911   %}
8912   ins_pipe(pipe_slow);
8913 %}
8914 
8915 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8916   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8917   ins_cost(2 * VOLATILE_REF_COST);
8918   effect(TEMP_DEF res, KILL cr);
8919   format %{
8920     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8921   %}
8922   ins_encode %{
8923     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8924                Assembler::xword, /*acquire*/ false, /*release*/ true,
8925                /*weak*/ false, $res$$Register);
8926   %}
8927   ins_pipe(pipe_slow);
8928 %}
8929 
8930 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8931   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8932   ins_cost(2 * VOLATILE_REF_COST);
8933   effect(TEMP_DEF res, KILL cr);
8934   format %{
8935     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8936   %}
8937   ins_encode %{
8938     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8939                Assembler::word, /*acquire*/ false, /*release*/ true,
8940                /*weak*/ false, $res$$Register);
8941   %}
8942   ins_pipe(pipe_slow);
8943 %}
8944 
8945 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8946   predicate(n->as_LoadStore()->barrier_data() == 0);
8947   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8948   ins_cost(2 * VOLATILE_REF_COST);
8949   effect(TEMP_DEF res, KILL cr);
8950   format %{
8951     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8952   %}
8953   ins_encode %{
8954     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8955                Assembler::xword, /*acquire*/ false, /*release*/ true,
8956                /*weak*/ false, $res$$Register);
8957   %}
8958   ins_pipe(pipe_slow);
8959 %}
8960 
8961 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8962   predicate(needs_acquiring_load_exclusive(n));
8963   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8964   ins_cost(VOLATILE_REF_COST);
8965   effect(TEMP_DEF res, KILL cr);
8966   format %{
8967     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8968   %}
8969   ins_encode %{
8970     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8971                Assembler::byte, /*acquire*/ true, /*release*/ true,
8972                /*weak*/ false, $res$$Register);
8973     __ sxtbw($res$$Register, $res$$Register);
8974   %}
8975   ins_pipe(pipe_slow);
8976 %}
8977 
8978 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8979   predicate(needs_acquiring_load_exclusive(n));
8980   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8981   ins_cost(VOLATILE_REF_COST);
8982   effect(TEMP_DEF res, KILL cr);
8983   format %{
8984     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8985   %}
8986   ins_encode %{
8987     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8988                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8989                /*weak*/ false, $res$$Register);
8990     __ sxthw($res$$Register, $res$$Register);
8991   %}
8992   ins_pipe(pipe_slow);
8993 %}
8994 
8995 
8996 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8997   predicate(needs_acquiring_load_exclusive(n));
8998   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8999   ins_cost(VOLATILE_REF_COST);
9000   effect(TEMP_DEF res, KILL cr);
9001   format %{
9002     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9003   %}
9004   ins_encode %{
9005     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9006                Assembler::word, /*acquire*/ true, /*release*/ true,
9007                /*weak*/ false, $res$$Register);
9008   %}
9009   ins_pipe(pipe_slow);
9010 %}
9011 
9012 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9013   predicate(needs_acquiring_load_exclusive(n));
9014   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
9015   ins_cost(VOLATILE_REF_COST);
9016   effect(TEMP_DEF res, KILL cr);
9017   format %{
9018     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9019   %}
9020   ins_encode %{
9021     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9022                Assembler::xword, /*acquire*/ true, /*release*/ true,
9023                /*weak*/ false, $res$$Register);
9024   %}
9025   ins_pipe(pipe_slow);
9026 %}
9027 
9028 
9029 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9030   predicate(needs_acquiring_load_exclusive(n));
9031   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
9032   ins_cost(VOLATILE_REF_COST);
9033   effect(TEMP_DEF res, KILL cr);
9034   format %{
9035     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9036   %}
9037   ins_encode %{
9038     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9039                Assembler::word, /*acquire*/ true, /*release*/ true,
9040                /*weak*/ false, $res$$Register);
9041   %}
9042   ins_pipe(pipe_slow);
9043 %}
9044 
9045 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9046   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9047   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
9048   ins_cost(VOLATILE_REF_COST);
9049   effect(TEMP_DEF res, KILL cr);
9050   format %{
9051     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9052   %}
9053   ins_encode %{
9054     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9055                Assembler::xword, /*acquire*/ true, /*release*/ true,
9056                /*weak*/ false, $res$$Register);
9057   %}
9058   ins_pipe(pipe_slow);
9059 %}
9060 
9061 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9062   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9063   ins_cost(2 * VOLATILE_REF_COST);
9064   effect(KILL cr);
9065   format %{
9066     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9067     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9068   %}
9069   ins_encode %{
9070     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9071                Assembler::byte, /*acquire*/ false, /*release*/ true,
9072                /*weak*/ true, noreg);
9073     __ csetw($res$$Register, Assembler::EQ);
9074   %}
9075   ins_pipe(pipe_slow);
9076 %}
9077 
9078 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9079   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9080   ins_cost(2 * VOLATILE_REF_COST);
9081   effect(KILL cr);
9082   format %{
9083     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9084     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9085   %}
9086   ins_encode %{
9087     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9088                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9089                /*weak*/ true, noreg);
9090     __ csetw($res$$Register, Assembler::EQ);
9091   %}
9092   ins_pipe(pipe_slow);
9093 %}
9094 
9095 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9096   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9097   ins_cost(2 * VOLATILE_REF_COST);
9098   effect(KILL cr);
9099   format %{
9100     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9101     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9102   %}
9103   ins_encode %{
9104     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9105                Assembler::word, /*acquire*/ false, /*release*/ true,
9106                /*weak*/ true, noreg);
9107     __ csetw($res$$Register, Assembler::EQ);
9108   %}
9109   ins_pipe(pipe_slow);
9110 %}
9111 
9112 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9113   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9114   ins_cost(2 * VOLATILE_REF_COST);
9115   effect(KILL cr);
9116   format %{
9117     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9118     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9119   %}
9120   ins_encode %{
9121     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9122                Assembler::xword, /*acquire*/ false, /*release*/ true,
9123                /*weak*/ true, noreg);
9124     __ csetw($res$$Register, Assembler::EQ);
9125   %}
9126   ins_pipe(pipe_slow);
9127 %}
9128 
9129 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9130   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9131   ins_cost(2 * VOLATILE_REF_COST);
9132   effect(KILL cr);
9133   format %{
9134     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9135     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9136   %}
9137   ins_encode %{
9138     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9139                Assembler::word, /*acquire*/ false, /*release*/ true,
9140                /*weak*/ true, noreg);
9141     __ csetw($res$$Register, Assembler::EQ);
9142   %}
9143   ins_pipe(pipe_slow);
9144 %}
9145 
9146 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9147   predicate(n->as_LoadStore()->barrier_data() == 0);
9148   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9149   ins_cost(2 * VOLATILE_REF_COST);
9150   effect(KILL cr);
9151   format %{
9152     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9153     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9154   %}
9155   ins_encode %{
9156     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9157                Assembler::xword, /*acquire*/ false, /*release*/ true,
9158                /*weak*/ true, noreg);
9159     __ csetw($res$$Register, Assembler::EQ);
9160   %}
9161   ins_pipe(pipe_slow);
9162 %}
9163 
9164 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9165   predicate(needs_acquiring_load_exclusive(n));
9166   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9167   ins_cost(VOLATILE_REF_COST);
9168   effect(KILL cr);
9169   format %{
9170     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9171     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9172   %}
9173   ins_encode %{
9174     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9175                Assembler::byte, /*acquire*/ true, /*release*/ true,
9176                /*weak*/ true, noreg);
9177     __ csetw($res$$Register, Assembler::EQ);
9178   %}
9179   ins_pipe(pipe_slow);
9180 %}
9181 
9182 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9183   predicate(needs_acquiring_load_exclusive(n));
9184   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9185   ins_cost(VOLATILE_REF_COST);
9186   effect(KILL cr);
9187   format %{
9188     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9189     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9190   %}
9191   ins_encode %{
9192     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9193                Assembler::halfword, /*acquire*/ true, /*release*/ true,
9194                /*weak*/ true, noreg);
9195     __ csetw($res$$Register, Assembler::EQ);
9196   %}
9197   ins_pipe(pipe_slow);
9198 %}
9199 
9200 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9201   predicate(needs_acquiring_load_exclusive(n));
9202   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9203   ins_cost(VOLATILE_REF_COST);
9204   effect(KILL cr);
9205   format %{
9206     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9207     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9208   %}
9209   ins_encode %{
9210     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9211                Assembler::word, /*acquire*/ true, /*release*/ true,
9212                /*weak*/ true, noreg);
9213     __ csetw($res$$Register, Assembler::EQ);
9214   %}
9215   ins_pipe(pipe_slow);
9216 %}
9217 
9218 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9219   predicate(needs_acquiring_load_exclusive(n));
9220   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9221   ins_cost(VOLATILE_REF_COST);
9222   effect(KILL cr);
9223   format %{
9224     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9225     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9226   %}
9227   ins_encode %{
9228     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9229                Assembler::xword, /*acquire*/ true, /*release*/ true,
9230                /*weak*/ true, noreg);
9231     __ csetw($res$$Register, Assembler::EQ);
9232   %}
9233   ins_pipe(pipe_slow);
9234 %}
9235 
9236 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9237   predicate(needs_acquiring_load_exclusive(n));
9238   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9239   ins_cost(VOLATILE_REF_COST);
9240   effect(KILL cr);
9241   format %{
9242     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9243     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9244   %}
9245   ins_encode %{
9246     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9247                Assembler::word, /*acquire*/ true, /*release*/ true,
9248                /*weak*/ true, noreg);
9249     __ csetw($res$$Register, Assembler::EQ);
9250   %}
9251   ins_pipe(pipe_slow);
9252 %}
9253 
9254 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9255   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9256   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9257   ins_cost(VOLATILE_REF_COST);
9258   effect(KILL cr);
9259   format %{
9260     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9261     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9262   %}
9263   ins_encode %{
9264     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9265                Assembler::xword, /*acquire*/ true, /*release*/ true,
9266                /*weak*/ true, noreg);
9267     __ csetw($res$$Register, Assembler::EQ);
9268   %}
9269   ins_pipe(pipe_slow);
9270 %}
9271 
9272 // END This section of the file is automatically generated. Do not edit --------------
9273 // ---------------------------------------------------------------------
9274 
9275 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9276   match(Set prev (GetAndSetI mem newv));
9277   ins_cost(2 * VOLATILE_REF_COST);
9278   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9279   ins_encode %{
9280     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9281   %}
9282   ins_pipe(pipe_serial);
9283 %}
9284 
9285 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
9286   match(Set prev (GetAndSetL mem newv));
9287   ins_cost(2 * VOLATILE_REF_COST);
9288   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9289   ins_encode %{
9290     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9291   %}
9292   ins_pipe(pipe_serial);
9293 %}
9294 
9295 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
9296   match(Set prev (GetAndSetN mem newv));
9297   ins_cost(2 * VOLATILE_REF_COST);
9298   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9299   ins_encode %{
9300     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9301   %}
9302   ins_pipe(pipe_serial);
9303 %}
9304 
9305 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
9306   predicate(n->as_LoadStore()->barrier_data() == 0);
9307   match(Set prev (GetAndSetP mem newv));
9308   ins_cost(2 * VOLATILE_REF_COST);
9309   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9310   ins_encode %{
9311     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9312   %}
9313   ins_pipe(pipe_serial);
9314 %}
9315 
9316 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
9317   predicate(needs_acquiring_load_exclusive(n));
9318   match(Set prev (GetAndSetI mem newv));
9319   ins_cost(VOLATILE_REF_COST);
9320   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
9321   ins_encode %{
9322     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9323   %}
9324   ins_pipe(pipe_serial);
9325 %}
9326 
9327 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
9328   predicate(needs_acquiring_load_exclusive(n));
9329   match(Set prev (GetAndSetL mem newv));
9330   ins_cost(VOLATILE_REF_COST);
9331   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9332   ins_encode %{
9333     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9334   %}
9335   ins_pipe(pipe_serial);
9336 %}
9337 
9338 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
9339   predicate(needs_acquiring_load_exclusive(n));
9340   match(Set prev (GetAndSetN mem newv));
9341   ins_cost(VOLATILE_REF_COST);
9342   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9343   ins_encode %{
9344     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9345   %}
9346   ins_pipe(pipe_serial);
9347 %}
9348 
9349 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
9350   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9351   match(Set prev (GetAndSetP mem newv));
9352   ins_cost(VOLATILE_REF_COST);
9353   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9354   ins_encode %{
9355     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9356   %}
9357   ins_pipe(pipe_serial);
9358 %}
9359 
9360 
9361 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9362   match(Set newval (GetAndAddL mem incr));
9363   ins_cost(2 * VOLATILE_REF_COST + 1);
9364   format %{ "get_and_addL $newval, [$mem], $incr" %}
9365   ins_encode %{
9366     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9367   %}
9368   ins_pipe(pipe_serial);
9369 %}
9370 
9371 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9372   predicate(n->as_LoadStore()->result_not_used());
9373   match(Set dummy (GetAndAddL mem incr));
9374   ins_cost(2 * VOLATILE_REF_COST);
9375   format %{ "get_and_addL [$mem], $incr" %}
9376   ins_encode %{
9377     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9378   %}
9379   ins_pipe(pipe_serial);
9380 %}
9381 
9382 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9383   match(Set newval (GetAndAddL mem incr));
9384   ins_cost(2 * VOLATILE_REF_COST + 1);
9385   format %{ "get_and_addL $newval, [$mem], $incr" %}
9386   ins_encode %{
9387     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9388   %}
9389   ins_pipe(pipe_serial);
9390 %}
9391 
9392 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9393   predicate(n->as_LoadStore()->result_not_used());
9394   match(Set dummy (GetAndAddL mem incr));
9395   ins_cost(2 * VOLATILE_REF_COST);
9396   format %{ "get_and_addL [$mem], $incr" %}
9397   ins_encode %{
9398     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9399   %}
9400   ins_pipe(pipe_serial);
9401 %}
9402 
9403 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9404   match(Set newval (GetAndAddI mem incr));
9405   ins_cost(2 * VOLATILE_REF_COST + 1);
9406   format %{ "get_and_addI $newval, [$mem], $incr" %}
9407   ins_encode %{
9408     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9409   %}
9410   ins_pipe(pipe_serial);
9411 %}
9412 
9413 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9414   predicate(n->as_LoadStore()->result_not_used());
9415   match(Set dummy (GetAndAddI mem incr));
9416   ins_cost(2 * VOLATILE_REF_COST);
9417   format %{ "get_and_addI [$mem], $incr" %}
9418   ins_encode %{
9419     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9420   %}
9421   ins_pipe(pipe_serial);
9422 %}
9423 
9424 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9425   match(Set newval (GetAndAddI mem incr));
9426   ins_cost(2 * VOLATILE_REF_COST + 1);
9427   format %{ "get_and_addI $newval, [$mem], $incr" %}
9428   ins_encode %{
9429     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9430   %}
9431   ins_pipe(pipe_serial);
9432 %}
9433 
9434 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9435   predicate(n->as_LoadStore()->result_not_used());
9436   match(Set dummy (GetAndAddI mem incr));
9437   ins_cost(2 * VOLATILE_REF_COST);
9438   format %{ "get_and_addI [$mem], $incr" %}
9439   ins_encode %{
9440     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9441   %}
9442   ins_pipe(pipe_serial);
9443 %}
9444 
9445 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
9446   predicate(needs_acquiring_load_exclusive(n));
9447   match(Set newval (GetAndAddL mem incr));
9448   ins_cost(VOLATILE_REF_COST + 1);
9449   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9450   ins_encode %{
9451     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
9452   %}
9453   ins_pipe(pipe_serial);
9454 %}
9455 
9456 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
9457   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9458   match(Set dummy (GetAndAddL mem incr));
9459   ins_cost(VOLATILE_REF_COST);
9460   format %{ "get_and_addL_acq [$mem], $incr" %}
9461   ins_encode %{
9462     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
9463   %}
9464   ins_pipe(pipe_serial);
9465 %}
9466 
9467 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9468   predicate(needs_acquiring_load_exclusive(n));
9469   match(Set newval (GetAndAddL mem incr));
9470   ins_cost(VOLATILE_REF_COST + 1);
9471   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9472   ins_encode %{
9473     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9474   %}
9475   ins_pipe(pipe_serial);
9476 %}
9477 
9478 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9479   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9480   match(Set dummy (GetAndAddL mem incr));
9481   ins_cost(VOLATILE_REF_COST);
9482   format %{ "get_and_addL_acq [$mem], $incr" %}
9483   ins_encode %{
9484     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9485   %}
9486   ins_pipe(pipe_serial);
9487 %}
9488 
9489 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9490   predicate(needs_acquiring_load_exclusive(n));
9491   match(Set newval (GetAndAddI mem incr));
9492   ins_cost(VOLATILE_REF_COST + 1);
9493   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9494   ins_encode %{
9495     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9496   %}
9497   ins_pipe(pipe_serial);
9498 %}
9499 
9500 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9501   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9502   match(Set dummy (GetAndAddI mem incr));
9503   ins_cost(VOLATILE_REF_COST);
9504   format %{ "get_and_addI_acq [$mem], $incr" %}
9505   ins_encode %{
9506     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9507   %}
9508   ins_pipe(pipe_serial);
9509 %}
9510 
9511 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9512   predicate(needs_acquiring_load_exclusive(n));
9513   match(Set newval (GetAndAddI mem incr));
9514   ins_cost(VOLATILE_REF_COST + 1);
9515   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9516   ins_encode %{
9517     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9518   %}
9519   ins_pipe(pipe_serial);
9520 %}
9521 
9522 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9523   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9524   match(Set dummy (GetAndAddI mem incr));
9525   ins_cost(VOLATILE_REF_COST);
9526   format %{ "get_and_addI_acq [$mem], $incr" %}
9527   ins_encode %{
9528     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9529   %}
9530   ins_pipe(pipe_serial);
9531 %}
9532 
9533 // Manifest a CmpL result in an integer register.
9534 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9535 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9536 %{
9537   match(Set dst (CmpL3 src1 src2));
9538   effect(KILL flags);
9539 
9540   ins_cost(INSN_COST * 6);
9541   format %{
9542       "cmp $src1, $src2"
9543       "csetw $dst, ne"
9544       "cnegw $dst, lt"
9545   %}
9546   // format %{ "CmpL3 $dst, $src1, $src2" %}
9547   ins_encode %{
9548     __ cmp($src1$$Register, $src2$$Register);
9549     __ csetw($dst$$Register, Assembler::NE);
9550     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9551   %}
9552 
9553   ins_pipe(pipe_class_default);
9554 %}
9555 
9556 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9557 %{
9558   match(Set dst (CmpL3 src1 src2));
9559   effect(KILL flags);
9560 
9561   ins_cost(INSN_COST * 6);
9562   format %{
9563       "cmp $src1, $src2"
9564       "csetw $dst, ne"
9565       "cnegw $dst, lt"
9566   %}
9567   ins_encode %{
9568     int32_t con = (int32_t)$src2$$constant;
9569      if (con < 0) {
9570       __ adds(zr, $src1$$Register, -con);
9571     } else {
9572       __ subs(zr, $src1$$Register, con);
9573     }
9574     __ csetw($dst$$Register, Assembler::NE);
9575     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9576   %}
9577 
9578   ins_pipe(pipe_class_default);
9579 %}
9580 
9581 // ============================================================================
9582 // Conditional Move Instructions
9583 
9584 // n.b. we have identical rules for both a signed compare op (cmpOp)
9585 // and an unsigned compare op (cmpOpU). it would be nice if we could
9586 // define an op class which merged both inputs and use it to type the
9587 // argument to a single rule. unfortunatelyt his fails because the
9588 // opclass does not live up to the COND_INTER interface of its
9589 // component operands. When the generic code tries to negate the
9590 // operand it ends up running the generci Machoper::negate method
9591 // which throws a ShouldNotHappen. So, we have to provide two flavours
9592 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9593 
9594 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9595   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9596 
9597   ins_cost(INSN_COST * 2);
9598   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9599 
9600   ins_encode %{
9601     __ cselw(as_Register($dst$$reg),
9602              as_Register($src2$$reg),
9603              as_Register($src1$$reg),
9604              (Assembler::Condition)$cmp$$cmpcode);
9605   %}
9606 
9607   ins_pipe(icond_reg_reg);
9608 %}
9609 
9610 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9611   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9612 
9613   ins_cost(INSN_COST * 2);
9614   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9615 
9616   ins_encode %{
9617     __ cselw(as_Register($dst$$reg),
9618              as_Register($src2$$reg),
9619              as_Register($src1$$reg),
9620              (Assembler::Condition)$cmp$$cmpcode);
9621   %}
9622 
9623   ins_pipe(icond_reg_reg);
9624 %}
9625 
9626 // special cases where one arg is zero
9627 
9628 // n.b. this is selected in preference to the rule above because it
9629 // avoids loading constant 0 into a source register
9630 
9631 // TODO
9632 // we ought only to be able to cull one of these variants as the ideal
9633 // transforms ought always to order the zero consistently (to left/right?)
9634 
9635 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9636   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9637 
9638   ins_cost(INSN_COST * 2);
9639   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9640 
9641   ins_encode %{
9642     __ cselw(as_Register($dst$$reg),
9643              as_Register($src$$reg),
9644              zr,
9645              (Assembler::Condition)$cmp$$cmpcode);
9646   %}
9647 
9648   ins_pipe(icond_reg);
9649 %}
9650 
9651 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9652   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9653 
9654   ins_cost(INSN_COST * 2);
9655   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9656 
9657   ins_encode %{
9658     __ cselw(as_Register($dst$$reg),
9659              as_Register($src$$reg),
9660              zr,
9661              (Assembler::Condition)$cmp$$cmpcode);
9662   %}
9663 
9664   ins_pipe(icond_reg);
9665 %}
9666 
9667 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9668   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9669 
9670   ins_cost(INSN_COST * 2);
9671   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9672 
9673   ins_encode %{
9674     __ cselw(as_Register($dst$$reg),
9675              zr,
9676              as_Register($src$$reg),
9677              (Assembler::Condition)$cmp$$cmpcode);
9678   %}
9679 
9680   ins_pipe(icond_reg);
9681 %}
9682 
9683 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9684   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9685 
9686   ins_cost(INSN_COST * 2);
9687   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9688 
9689   ins_encode %{
9690     __ cselw(as_Register($dst$$reg),
9691              zr,
9692              as_Register($src$$reg),
9693              (Assembler::Condition)$cmp$$cmpcode);
9694   %}
9695 
9696   ins_pipe(icond_reg);
9697 %}
9698 
9699 // special case for creating a boolean 0 or 1
9700 
9701 // n.b. this is selected in preference to the rule above because it
9702 // avoids loading constants 0 and 1 into a source register
9703 
9704 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9705   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9706 
9707   ins_cost(INSN_COST * 2);
9708   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9709 
9710   ins_encode %{
9711     // equivalently
9712     // cset(as_Register($dst$$reg),
9713     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9714     __ csincw(as_Register($dst$$reg),
9715              zr,
9716              zr,
9717              (Assembler::Condition)$cmp$$cmpcode);
9718   %}
9719 
9720   ins_pipe(icond_none);
9721 %}
9722 
9723 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9724   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9725 
9726   ins_cost(INSN_COST * 2);
9727   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9728 
9729   ins_encode %{
9730     // equivalently
9731     // cset(as_Register($dst$$reg),
9732     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9733     __ csincw(as_Register($dst$$reg),
9734              zr,
9735              zr,
9736              (Assembler::Condition)$cmp$$cmpcode);
9737   %}
9738 
9739   ins_pipe(icond_none);
9740 %}
9741 
9742 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9743   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9744 
9745   ins_cost(INSN_COST * 2);
9746   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9747 
9748   ins_encode %{
9749     __ csel(as_Register($dst$$reg),
9750             as_Register($src2$$reg),
9751             as_Register($src1$$reg),
9752             (Assembler::Condition)$cmp$$cmpcode);
9753   %}
9754 
9755   ins_pipe(icond_reg_reg);
9756 %}
9757 
9758 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9759   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9760 
9761   ins_cost(INSN_COST * 2);
9762   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9763 
9764   ins_encode %{
9765     __ csel(as_Register($dst$$reg),
9766             as_Register($src2$$reg),
9767             as_Register($src1$$reg),
9768             (Assembler::Condition)$cmp$$cmpcode);
9769   %}
9770 
9771   ins_pipe(icond_reg_reg);
9772 %}
9773 
9774 // special cases where one arg is zero
9775 
9776 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9777   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9778 
9779   ins_cost(INSN_COST * 2);
9780   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9781 
9782   ins_encode %{
9783     __ csel(as_Register($dst$$reg),
9784             zr,
9785             as_Register($src$$reg),
9786             (Assembler::Condition)$cmp$$cmpcode);
9787   %}
9788 
9789   ins_pipe(icond_reg);
9790 %}
9791 
9792 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9793   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9794 
9795   ins_cost(INSN_COST * 2);
9796   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9797 
9798   ins_encode %{
9799     __ csel(as_Register($dst$$reg),
9800             zr,
9801             as_Register($src$$reg),
9802             (Assembler::Condition)$cmp$$cmpcode);
9803   %}
9804 
9805   ins_pipe(icond_reg);
9806 %}
9807 
9808 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9809   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9810 
9811   ins_cost(INSN_COST * 2);
9812   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9813 
9814   ins_encode %{
9815     __ csel(as_Register($dst$$reg),
9816             as_Register($src$$reg),
9817             zr,
9818             (Assembler::Condition)$cmp$$cmpcode);
9819   %}
9820 
9821   ins_pipe(icond_reg);
9822 %}
9823 
9824 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9825   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9826 
9827   ins_cost(INSN_COST * 2);
9828   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9829 
9830   ins_encode %{
9831     __ csel(as_Register($dst$$reg),
9832             as_Register($src$$reg),
9833             zr,
9834             (Assembler::Condition)$cmp$$cmpcode);
9835   %}
9836 
9837   ins_pipe(icond_reg);
9838 %}
9839 
9840 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9841   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9842 
9843   ins_cost(INSN_COST * 2);
9844   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9845 
9846   ins_encode %{
9847     __ csel(as_Register($dst$$reg),
9848             as_Register($src2$$reg),
9849             as_Register($src1$$reg),
9850             (Assembler::Condition)$cmp$$cmpcode);
9851   %}
9852 
9853   ins_pipe(icond_reg_reg);
9854 %}
9855 
9856 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9857   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9858 
9859   ins_cost(INSN_COST * 2);
9860   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9861 
9862   ins_encode %{
9863     __ csel(as_Register($dst$$reg),
9864             as_Register($src2$$reg),
9865             as_Register($src1$$reg),
9866             (Assembler::Condition)$cmp$$cmpcode);
9867   %}
9868 
9869   ins_pipe(icond_reg_reg);
9870 %}
9871 
9872 // special cases where one arg is zero
9873 
9874 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9875   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9876 
9877   ins_cost(INSN_COST * 2);
9878   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9879 
9880   ins_encode %{
9881     __ csel(as_Register($dst$$reg),
9882             zr,
9883             as_Register($src$$reg),
9884             (Assembler::Condition)$cmp$$cmpcode);
9885   %}
9886 
9887   ins_pipe(icond_reg);
9888 %}
9889 
9890 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9891   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9892 
9893   ins_cost(INSN_COST * 2);
9894   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9895 
9896   ins_encode %{
9897     __ csel(as_Register($dst$$reg),
9898             zr,
9899             as_Register($src$$reg),
9900             (Assembler::Condition)$cmp$$cmpcode);
9901   %}
9902 
9903   ins_pipe(icond_reg);
9904 %}
9905 
9906 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9907   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9908 
9909   ins_cost(INSN_COST * 2);
9910   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9911 
9912   ins_encode %{
9913     __ csel(as_Register($dst$$reg),
9914             as_Register($src$$reg),
9915             zr,
9916             (Assembler::Condition)$cmp$$cmpcode);
9917   %}
9918 
9919   ins_pipe(icond_reg);
9920 %}
9921 
9922 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9923   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9924 
9925   ins_cost(INSN_COST * 2);
9926   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9927 
9928   ins_encode %{
9929     __ csel(as_Register($dst$$reg),
9930             as_Register($src$$reg),
9931             zr,
9932             (Assembler::Condition)$cmp$$cmpcode);
9933   %}
9934 
9935   ins_pipe(icond_reg);
9936 %}
9937 
9938 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9939   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9940 
9941   ins_cost(INSN_COST * 2);
9942   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9943 
9944   ins_encode %{
9945     __ cselw(as_Register($dst$$reg),
9946              as_Register($src2$$reg),
9947              as_Register($src1$$reg),
9948              (Assembler::Condition)$cmp$$cmpcode);
9949   %}
9950 
9951   ins_pipe(icond_reg_reg);
9952 %}
9953 
9954 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9955   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9956 
9957   ins_cost(INSN_COST * 2);
9958   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9959 
9960   ins_encode %{
9961     __ cselw(as_Register($dst$$reg),
9962              as_Register($src2$$reg),
9963              as_Register($src1$$reg),
9964              (Assembler::Condition)$cmp$$cmpcode);
9965   %}
9966 
9967   ins_pipe(icond_reg_reg);
9968 %}
9969 
9970 // special cases where one arg is zero
9971 
9972 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9973   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9974 
9975   ins_cost(INSN_COST * 2);
9976   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9977 
9978   ins_encode %{
9979     __ cselw(as_Register($dst$$reg),
9980              zr,
9981              as_Register($src$$reg),
9982              (Assembler::Condition)$cmp$$cmpcode);
9983   %}
9984 
9985   ins_pipe(icond_reg);
9986 %}
9987 
9988 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9989   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9990 
9991   ins_cost(INSN_COST * 2);
9992   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9993 
9994   ins_encode %{
9995     __ cselw(as_Register($dst$$reg),
9996              zr,
9997              as_Register($src$$reg),
9998              (Assembler::Condition)$cmp$$cmpcode);
9999   %}
10000 
10001   ins_pipe(icond_reg);
10002 %}
10003 
10004 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10005   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10006 
10007   ins_cost(INSN_COST * 2);
10008   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10009 
10010   ins_encode %{
10011     __ cselw(as_Register($dst$$reg),
10012              as_Register($src$$reg),
10013              zr,
10014              (Assembler::Condition)$cmp$$cmpcode);
10015   %}
10016 
10017   ins_pipe(icond_reg);
10018 %}
10019 
10020 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10021   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10022 
10023   ins_cost(INSN_COST * 2);
10024   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10025 
10026   ins_encode %{
10027     __ cselw(as_Register($dst$$reg),
10028              as_Register($src$$reg),
10029              zr,
10030              (Assembler::Condition)$cmp$$cmpcode);
10031   %}
10032 
10033   ins_pipe(icond_reg);
10034 %}
10035 
10036 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10037 %{
10038   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10039 
10040   ins_cost(INSN_COST * 3);
10041 
10042   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10043   ins_encode %{
10044     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10045     __ fcsels(as_FloatRegister($dst$$reg),
10046               as_FloatRegister($src2$$reg),
10047               as_FloatRegister($src1$$reg),
10048               cond);
10049   %}
10050 
10051   ins_pipe(fp_cond_reg_reg_s);
10052 %}
10053 
10054 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10055 %{
10056   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10057 
10058   ins_cost(INSN_COST * 3);
10059 
10060   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10061   ins_encode %{
10062     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10063     __ fcsels(as_FloatRegister($dst$$reg),
10064               as_FloatRegister($src2$$reg),
10065               as_FloatRegister($src1$$reg),
10066               cond);
10067   %}
10068 
10069   ins_pipe(fp_cond_reg_reg_s);
10070 %}
10071 
10072 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10073 %{
10074   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10075 
10076   ins_cost(INSN_COST * 3);
10077 
10078   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10079   ins_encode %{
10080     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10081     __ fcseld(as_FloatRegister($dst$$reg),
10082               as_FloatRegister($src2$$reg),
10083               as_FloatRegister($src1$$reg),
10084               cond);
10085   %}
10086 
10087   ins_pipe(fp_cond_reg_reg_d);
10088 %}
10089 
10090 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10091 %{
10092   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10093 
10094   ins_cost(INSN_COST * 3);
10095 
10096   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10097   ins_encode %{
10098     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10099     __ fcseld(as_FloatRegister($dst$$reg),
10100               as_FloatRegister($src2$$reg),
10101               as_FloatRegister($src1$$reg),
10102               cond);
10103   %}
10104 
10105   ins_pipe(fp_cond_reg_reg_d);
10106 %}
10107 
10108 // ============================================================================
10109 // Arithmetic Instructions
10110 //
10111 
10112 // Integer Addition
10113 
10114 // TODO
10115 // these currently employ operations which do not set CR and hence are
10116 // not flagged as killing CR but we would like to isolate the cases
10117 // where we want to set flags from those where we don't. need to work
10118 // out how to do that.
10119 
10120 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10121   match(Set dst (AddI src1 src2));
10122 
10123   ins_cost(INSN_COST);
10124   format %{ "addw  $dst, $src1, $src2" %}
10125 
10126   ins_encode %{
10127     __ addw(as_Register($dst$$reg),
10128             as_Register($src1$$reg),
10129             as_Register($src2$$reg));
10130   %}
10131 
10132   ins_pipe(ialu_reg_reg);
10133 %}
10134 
10135 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10136   match(Set dst (AddI src1 src2));
10137 
10138   ins_cost(INSN_COST);
10139   format %{ "addw $dst, $src1, $src2" %}
10140 
10141   // use opcode to indicate that this is an add not a sub
10142   opcode(0x0);
10143 
10144   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10145 
10146   ins_pipe(ialu_reg_imm);
10147 %}
10148 
10149 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10150   match(Set dst (AddI (ConvL2I src1) src2));
10151 
10152   ins_cost(INSN_COST);
10153   format %{ "addw $dst, $src1, $src2" %}
10154 
10155   // use opcode to indicate that this is an add not a sub
10156   opcode(0x0);
10157 
10158   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10159 
10160   ins_pipe(ialu_reg_imm);
10161 %}
10162 
10163 // Pointer Addition
10164 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10165   match(Set dst (AddP src1 src2));
10166 
10167   ins_cost(INSN_COST);
10168   format %{ "add $dst, $src1, $src2\t# ptr" %}
10169 
10170   ins_encode %{
10171     __ add(as_Register($dst$$reg),
10172            as_Register($src1$$reg),
10173            as_Register($src2$$reg));
10174   %}
10175 
10176   ins_pipe(ialu_reg_reg);
10177 %}
10178 
10179 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10180   match(Set dst (AddP src1 (ConvI2L src2)));
10181 
10182   ins_cost(1.9 * INSN_COST);
10183   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10184 
10185   ins_encode %{
10186     __ add(as_Register($dst$$reg),
10187            as_Register($src1$$reg),
10188            as_Register($src2$$reg), ext::sxtw);
10189   %}
10190 
10191   ins_pipe(ialu_reg_reg);
10192 %}
10193 
10194 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10195   match(Set dst (AddP src1 (LShiftL src2 scale)));
10196 
10197   ins_cost(1.9 * INSN_COST);
10198   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10199 
10200   ins_encode %{
10201     __ lea(as_Register($dst$$reg),
10202            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10203                    Address::lsl($scale$$constant)));
10204   %}
10205 
10206   ins_pipe(ialu_reg_reg_shift);
10207 %}
10208 
10209 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10210   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10211 
10212   ins_cost(1.9 * INSN_COST);
10213   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10214 
10215   ins_encode %{
10216     __ lea(as_Register($dst$$reg),
10217            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10218                    Address::sxtw($scale$$constant)));
10219   %}
10220 
10221   ins_pipe(ialu_reg_reg_shift);
10222 %}
10223 
10224 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10225   match(Set dst (LShiftL (ConvI2L src) scale));
10226 
10227   ins_cost(INSN_COST);
10228   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10229 
10230   ins_encode %{
10231     __ sbfiz(as_Register($dst$$reg),
10232           as_Register($src$$reg),
10233           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10234   %}
10235 
10236   ins_pipe(ialu_reg_shift);
10237 %}
10238 
10239 // Pointer Immediate Addition
10240 // n.b. this needs to be more expensive than using an indirect memory
10241 // operand
10242 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10243   match(Set dst (AddP src1 src2));
10244 
10245   ins_cost(INSN_COST);
10246   format %{ "add $dst, $src1, $src2\t# ptr" %}
10247 
10248   // use opcode to indicate that this is an add not a sub
10249   opcode(0x0);
10250 
10251   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10252 
10253   ins_pipe(ialu_reg_imm);
10254 %}
10255 
10256 // Long Addition
10257 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10258 
10259   match(Set dst (AddL src1 src2));
10260 
10261   ins_cost(INSN_COST);
10262   format %{ "add  $dst, $src1, $src2" %}
10263 
10264   ins_encode %{
10265     __ add(as_Register($dst$$reg),
10266            as_Register($src1$$reg),
10267            as_Register($src2$$reg));
10268   %}
10269 
10270   ins_pipe(ialu_reg_reg);
10271 %}
10272 
10273 // No constant pool entries requiredLong Immediate Addition.
10274 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10275   match(Set dst (AddL src1 src2));
10276 
10277   ins_cost(INSN_COST);
10278   format %{ "add $dst, $src1, $src2" %}
10279 
10280   // use opcode to indicate that this is an add not a sub
10281   opcode(0x0);
10282 
10283   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10284 
10285   ins_pipe(ialu_reg_imm);
10286 %}
10287 
10288 // Integer Subtraction
10289 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10290   match(Set dst (SubI src1 src2));
10291 
10292   ins_cost(INSN_COST);
10293   format %{ "subw  $dst, $src1, $src2" %}
10294 
10295   ins_encode %{
10296     __ subw(as_Register($dst$$reg),
10297             as_Register($src1$$reg),
10298             as_Register($src2$$reg));
10299   %}
10300 
10301   ins_pipe(ialu_reg_reg);
10302 %}
10303 
10304 // Immediate Subtraction
10305 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10306   match(Set dst (SubI src1 src2));
10307 
10308   ins_cost(INSN_COST);
10309   format %{ "subw $dst, $src1, $src2" %}
10310 
10311   // use opcode to indicate that this is a sub not an add
10312   opcode(0x1);
10313 
10314   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10315 
10316   ins_pipe(ialu_reg_imm);
10317 %}
10318 
10319 // Long Subtraction
10320 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10321 
10322   match(Set dst (SubL src1 src2));
10323 
10324   ins_cost(INSN_COST);
10325   format %{ "sub  $dst, $src1, $src2" %}
10326 
10327   ins_encode %{
10328     __ sub(as_Register($dst$$reg),
10329            as_Register($src1$$reg),
10330            as_Register($src2$$reg));
10331   %}
10332 
10333   ins_pipe(ialu_reg_reg);
10334 %}
10335 
10336 // No constant pool entries requiredLong Immediate Subtraction.
10337 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10338   match(Set dst (SubL src1 src2));
10339 
10340   ins_cost(INSN_COST);
10341   format %{ "sub$dst, $src1, $src2" %}
10342 
10343   // use opcode to indicate that this is a sub not an add
10344   opcode(0x1);
10345 
10346   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10347 
10348   ins_pipe(ialu_reg_imm);
10349 %}
10350 
10351 // Integer Negation (special case for sub)
10352 
10353 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10354   match(Set dst (SubI zero src));
10355 
10356   ins_cost(INSN_COST);
10357   format %{ "negw $dst, $src\t# int" %}
10358 
10359   ins_encode %{
10360     __ negw(as_Register($dst$$reg),
10361             as_Register($src$$reg));
10362   %}
10363 
10364   ins_pipe(ialu_reg);
10365 %}
10366 
10367 // Long Negation
10368 
10369 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10370   match(Set dst (SubL zero src));
10371 
10372   ins_cost(INSN_COST);
10373   format %{ "neg $dst, $src\t# long" %}
10374 
10375   ins_encode %{
10376     __ neg(as_Register($dst$$reg),
10377            as_Register($src$$reg));
10378   %}
10379 
10380   ins_pipe(ialu_reg);
10381 %}
10382 
10383 // Integer Multiply
10384 
10385 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10386   match(Set dst (MulI src1 src2));
10387 
10388   ins_cost(INSN_COST * 3);
10389   format %{ "mulw  $dst, $src1, $src2" %}
10390 
10391   ins_encode %{
10392     __ mulw(as_Register($dst$$reg),
10393             as_Register($src1$$reg),
10394             as_Register($src2$$reg));
10395   %}
10396 
10397   ins_pipe(imul_reg_reg);
10398 %}
10399 
10400 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10401   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10402 
10403   ins_cost(INSN_COST * 3);
10404   format %{ "smull  $dst, $src1, $src2" %}
10405 
10406   ins_encode %{
10407     __ smull(as_Register($dst$$reg),
10408              as_Register($src1$$reg),
10409              as_Register($src2$$reg));
10410   %}
10411 
10412   ins_pipe(imul_reg_reg);
10413 %}
10414 
10415 // Long Multiply
10416 
10417 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10418   match(Set dst (MulL src1 src2));
10419 
10420   ins_cost(INSN_COST * 5);
10421   format %{ "mul  $dst, $src1, $src2" %}
10422 
10423   ins_encode %{
10424     __ mul(as_Register($dst$$reg),
10425            as_Register($src1$$reg),
10426            as_Register($src2$$reg));
10427   %}
10428 
10429   ins_pipe(lmul_reg_reg);
10430 %}
10431 
10432 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10433 %{
10434   match(Set dst (MulHiL src1 src2));
10435 
10436   ins_cost(INSN_COST * 7);
10437   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10438 
10439   ins_encode %{
10440     __ smulh(as_Register($dst$$reg),
10441              as_Register($src1$$reg),
10442              as_Register($src2$$reg));
10443   %}
10444 
10445   ins_pipe(lmul_reg_reg);
10446 %}
10447 
10448 // Combined Integer Multiply & Add/Sub
10449 
10450 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10451   match(Set dst (AddI src3 (MulI src1 src2)));
10452 
10453   ins_cost(INSN_COST * 3);
10454   format %{ "madd  $dst, $src1, $src2, $src3" %}
10455 
10456   ins_encode %{
10457     __ maddw(as_Register($dst$$reg),
10458              as_Register($src1$$reg),
10459              as_Register($src2$$reg),
10460              as_Register($src3$$reg));
10461   %}
10462 
10463   ins_pipe(imac_reg_reg);
10464 %}
10465 
10466 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10467   match(Set dst (SubI src3 (MulI src1 src2)));
10468 
10469   ins_cost(INSN_COST * 3);
10470   format %{ "msub  $dst, $src1, $src2, $src3" %}
10471 
10472   ins_encode %{
10473     __ msubw(as_Register($dst$$reg),
10474              as_Register($src1$$reg),
10475              as_Register($src2$$reg),
10476              as_Register($src3$$reg));
10477   %}
10478 
10479   ins_pipe(imac_reg_reg);
10480 %}
10481 
10482 // Combined Integer Multiply & Neg
10483 
10484 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10485   match(Set dst (MulI (SubI zero src1) src2));
10486   match(Set dst (MulI src1 (SubI zero src2)));
10487 
10488   ins_cost(INSN_COST * 3);
10489   format %{ "mneg  $dst, $src1, $src2" %}
10490 
10491   ins_encode %{
10492     __ mnegw(as_Register($dst$$reg),
10493              as_Register($src1$$reg),
10494              as_Register($src2$$reg));
10495   %}
10496 
10497   ins_pipe(imac_reg_reg);
10498 %}
10499 
10500 // Combined Long Multiply & Add/Sub
10501 
10502 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10503   match(Set dst (AddL src3 (MulL src1 src2)));
10504 
10505   ins_cost(INSN_COST * 5);
10506   format %{ "madd  $dst, $src1, $src2, $src3" %}
10507 
10508   ins_encode %{
10509     __ madd(as_Register($dst$$reg),
10510             as_Register($src1$$reg),
10511             as_Register($src2$$reg),
10512             as_Register($src3$$reg));
10513   %}
10514 
10515   ins_pipe(lmac_reg_reg);
10516 %}
10517 
10518 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10519   match(Set dst (SubL src3 (MulL src1 src2)));
10520 
10521   ins_cost(INSN_COST * 5);
10522   format %{ "msub  $dst, $src1, $src2, $src3" %}
10523 
10524   ins_encode %{
10525     __ msub(as_Register($dst$$reg),
10526             as_Register($src1$$reg),
10527             as_Register($src2$$reg),
10528             as_Register($src3$$reg));
10529   %}
10530 
10531   ins_pipe(lmac_reg_reg);
10532 %}
10533 
10534 // Combined Long Multiply & Neg
10535 
10536 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10537   match(Set dst (MulL (SubL zero src1) src2));
10538   match(Set dst (MulL src1 (SubL zero src2)));
10539 
10540   ins_cost(INSN_COST * 5);
10541   format %{ "mneg  $dst, $src1, $src2" %}
10542 
10543   ins_encode %{
10544     __ mneg(as_Register($dst$$reg),
10545             as_Register($src1$$reg),
10546             as_Register($src2$$reg));
10547   %}
10548 
10549   ins_pipe(lmac_reg_reg);
10550 %}
10551 
10552 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10553 
10554 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10555   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10556 
10557   ins_cost(INSN_COST * 3);
10558   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10559 
10560   ins_encode %{
10561     __ smaddl(as_Register($dst$$reg),
10562               as_Register($src1$$reg),
10563               as_Register($src2$$reg),
10564               as_Register($src3$$reg));
10565   %}
10566 
10567   ins_pipe(imac_reg_reg);
10568 %}
10569 
10570 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10571   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10572 
10573   ins_cost(INSN_COST * 3);
10574   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10575 
10576   ins_encode %{
10577     __ smsubl(as_Register($dst$$reg),
10578               as_Register($src1$$reg),
10579               as_Register($src2$$reg),
10580               as_Register($src3$$reg));
10581   %}
10582 
10583   ins_pipe(imac_reg_reg);
10584 %}
10585 
10586 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10587   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10588   match(Set dst (MulL (ConvI2L src1) (SubL zero (ConvI2L src2))));
10589 
10590   ins_cost(INSN_COST * 3);
10591   format %{ "smnegl  $dst, $src1, $src2" %}
10592 
10593   ins_encode %{
10594     __ smnegl(as_Register($dst$$reg),
10595               as_Register($src1$$reg),
10596               as_Register($src2$$reg));
10597   %}
10598 
10599   ins_pipe(imac_reg_reg);
10600 %}
10601 
10602 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10603 
10604 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10605   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10606 
10607   ins_cost(INSN_COST * 5);
10608   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10609             "maddw $dst, $src3, $src4, rscratch1" %}
10610 
10611   ins_encode %{
10612     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10613     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10614 
10615   ins_pipe(imac_reg_reg);
10616 %}
10617 
10618 // Integer Divide
10619 
10620 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10621   match(Set dst (DivI src1 src2));
10622 
10623   ins_cost(INSN_COST * 19);
10624   format %{ "sdivw  $dst, $src1, $src2" %}
10625 
10626   ins_encode(aarch64_enc_divw(dst, src1, src2));
10627   ins_pipe(idiv_reg_reg);
10628 %}
10629 
10630 // Long Divide
10631 
10632 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10633   match(Set dst (DivL src1 src2));
10634 
10635   ins_cost(INSN_COST * 35);
10636   format %{ "sdiv   $dst, $src1, $src2" %}
10637 
10638   ins_encode(aarch64_enc_div(dst, src1, src2));
10639   ins_pipe(ldiv_reg_reg);
10640 %}
10641 
10642 // Integer Remainder
10643 
10644 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10645   match(Set dst (ModI src1 src2));
10646 
10647   ins_cost(INSN_COST * 22);
10648   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10649             "msubw($dst, rscratch1, $src2, $src1" %}
10650 
10651   ins_encode(aarch64_enc_modw(dst, src1, src2));
10652   ins_pipe(idiv_reg_reg);
10653 %}
10654 
10655 // Long Remainder
10656 
10657 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10658   match(Set dst (ModL src1 src2));
10659 
10660   ins_cost(INSN_COST * 38);
10661   format %{ "sdiv   rscratch1, $src1, $src2\n"
10662             "msub($dst, rscratch1, $src2, $src1" %}
10663 
10664   ins_encode(aarch64_enc_mod(dst, src1, src2));
10665   ins_pipe(ldiv_reg_reg);
10666 %}
10667 
10668 // Integer Shifts
10669 
10670 // Shift Left Register
10671 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10672   match(Set dst (LShiftI src1 src2));
10673 
10674   ins_cost(INSN_COST * 2);
10675   format %{ "lslvw  $dst, $src1, $src2" %}
10676 
10677   ins_encode %{
10678     __ lslvw(as_Register($dst$$reg),
10679              as_Register($src1$$reg),
10680              as_Register($src2$$reg));
10681   %}
10682 
10683   ins_pipe(ialu_reg_reg_vshift);
10684 %}
10685 
10686 // Shift Left Immediate
10687 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10688   match(Set dst (LShiftI src1 src2));
10689 
10690   ins_cost(INSN_COST);
10691   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10692 
10693   ins_encode %{
10694     __ lslw(as_Register($dst$$reg),
10695             as_Register($src1$$reg),
10696             $src2$$constant & 0x1f);
10697   %}
10698 
10699   ins_pipe(ialu_reg_shift);
10700 %}
10701 
10702 // Shift Right Logical Register
10703 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10704   match(Set dst (URShiftI src1 src2));
10705 
10706   ins_cost(INSN_COST * 2);
10707   format %{ "lsrvw  $dst, $src1, $src2" %}
10708 
10709   ins_encode %{
10710     __ lsrvw(as_Register($dst$$reg),
10711              as_Register($src1$$reg),
10712              as_Register($src2$$reg));
10713   %}
10714 
10715   ins_pipe(ialu_reg_reg_vshift);
10716 %}
10717 
10718 // Shift Right Logical Immediate
10719 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10720   match(Set dst (URShiftI src1 src2));
10721 
10722   ins_cost(INSN_COST);
10723   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10724 
10725   ins_encode %{
10726     __ lsrw(as_Register($dst$$reg),
10727             as_Register($src1$$reg),
10728             $src2$$constant & 0x1f);
10729   %}
10730 
10731   ins_pipe(ialu_reg_shift);
10732 %}
10733 
10734 // Shift Right Arithmetic Register
10735 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10736   match(Set dst (RShiftI src1 src2));
10737 
10738   ins_cost(INSN_COST * 2);
10739   format %{ "asrvw  $dst, $src1, $src2" %}
10740 
10741   ins_encode %{
10742     __ asrvw(as_Register($dst$$reg),
10743              as_Register($src1$$reg),
10744              as_Register($src2$$reg));
10745   %}
10746 
10747   ins_pipe(ialu_reg_reg_vshift);
10748 %}
10749 
10750 // Shift Right Arithmetic Immediate
10751 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10752   match(Set dst (RShiftI src1 src2));
10753 
10754   ins_cost(INSN_COST);
10755   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10756 
10757   ins_encode %{
10758     __ asrw(as_Register($dst$$reg),
10759             as_Register($src1$$reg),
10760             $src2$$constant & 0x1f);
10761   %}
10762 
10763   ins_pipe(ialu_reg_shift);
10764 %}
10765 
10766 // Combined Int Mask and Right Shift (using UBFM)
10767 // TODO
10768 
10769 // Long Shifts
10770 
10771 // Shift Left Register
10772 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10773   match(Set dst (LShiftL src1 src2));
10774 
10775   ins_cost(INSN_COST * 2);
10776   format %{ "lslv  $dst, $src1, $src2" %}
10777 
10778   ins_encode %{
10779     __ lslv(as_Register($dst$$reg),
10780             as_Register($src1$$reg),
10781             as_Register($src2$$reg));
10782   %}
10783 
10784   ins_pipe(ialu_reg_reg_vshift);
10785 %}
10786 
10787 // Shift Left Immediate
10788 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10789   match(Set dst (LShiftL src1 src2));
10790 
10791   ins_cost(INSN_COST);
10792   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10793 
10794   ins_encode %{
10795     __ lsl(as_Register($dst$$reg),
10796             as_Register($src1$$reg),
10797             $src2$$constant & 0x3f);
10798   %}
10799 
10800   ins_pipe(ialu_reg_shift);
10801 %}
10802 
10803 // Shift Right Logical Register
10804 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10805   match(Set dst (URShiftL src1 src2));
10806 
10807   ins_cost(INSN_COST * 2);
10808   format %{ "lsrv  $dst, $src1, $src2" %}
10809 
10810   ins_encode %{
10811     __ lsrv(as_Register($dst$$reg),
10812             as_Register($src1$$reg),
10813             as_Register($src2$$reg));
10814   %}
10815 
10816   ins_pipe(ialu_reg_reg_vshift);
10817 %}
10818 
10819 // Shift Right Logical Immediate
10820 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10821   match(Set dst (URShiftL src1 src2));
10822 
10823   ins_cost(INSN_COST);
10824   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10825 
10826   ins_encode %{
10827     __ lsr(as_Register($dst$$reg),
10828            as_Register($src1$$reg),
10829            $src2$$constant & 0x3f);
10830   %}
10831 
10832   ins_pipe(ialu_reg_shift);
10833 %}
10834 
10835 // A special-case pattern for card table stores.
10836 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10837   match(Set dst (URShiftL (CastP2X src1) src2));
10838 
10839   ins_cost(INSN_COST);
10840   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10841 
10842   ins_encode %{
10843     __ lsr(as_Register($dst$$reg),
10844            as_Register($src1$$reg),
10845            $src2$$constant & 0x3f);
10846   %}
10847 
10848   ins_pipe(ialu_reg_shift);
10849 %}
10850 
10851 // Shift Right Arithmetic Register
10852 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10853   match(Set dst (RShiftL src1 src2));
10854 
10855   ins_cost(INSN_COST * 2);
10856   format %{ "asrv  $dst, $src1, $src2" %}
10857 
10858   ins_encode %{
10859     __ asrv(as_Register($dst$$reg),
10860             as_Register($src1$$reg),
10861             as_Register($src2$$reg));
10862   %}
10863 
10864   ins_pipe(ialu_reg_reg_vshift);
10865 %}
10866 
10867 // Shift Right Arithmetic Immediate
10868 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10869   match(Set dst (RShiftL src1 src2));
10870 
10871   ins_cost(INSN_COST);
10872   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10873 
10874   ins_encode %{
10875     __ asr(as_Register($dst$$reg),
10876            as_Register($src1$$reg),
10877            $src2$$constant & 0x3f);
10878   %}
10879 
10880   ins_pipe(ialu_reg_shift);
10881 %}
10882 
10883 // BEGIN This section of the file is automatically generated. Do not edit --------------
10884 // This section is generated from aarch64_ad.m4
10885 
10886 instruct regL_not_reg(iRegLNoSp dst,
10887                          iRegL src1, immL_M1 m1,
10888                          rFlagsReg cr) %{
10889   match(Set dst (XorL src1 m1));
10890   ins_cost(INSN_COST);
10891   format %{ "eon  $dst, $src1, zr" %}
10892 
10893   ins_encode %{
10894     __ eon(as_Register($dst$$reg),
10895               as_Register($src1$$reg),
10896               zr,
10897               Assembler::LSL, 0);
10898   %}
10899 
10900   ins_pipe(ialu_reg);
10901 %}
10902 instruct regI_not_reg(iRegINoSp dst,
10903                          iRegIorL2I src1, immI_M1 m1,
10904                          rFlagsReg cr) %{
10905   match(Set dst (XorI src1 m1));
10906   ins_cost(INSN_COST);
10907   format %{ "eonw  $dst, $src1, zr" %}
10908 
10909   ins_encode %{
10910     __ eonw(as_Register($dst$$reg),
10911               as_Register($src1$$reg),
10912               zr,
10913               Assembler::LSL, 0);
10914   %}
10915 
10916   ins_pipe(ialu_reg);
10917 %}
10918 
10919 instruct AndI_reg_not_reg(iRegINoSp dst,
10920                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10921                          rFlagsReg cr) %{
10922   match(Set dst (AndI src1 (XorI src2 m1)));
10923   ins_cost(INSN_COST);
10924   format %{ "bicw  $dst, $src1, $src2" %}
10925 
10926   ins_encode %{
10927     __ bicw(as_Register($dst$$reg),
10928               as_Register($src1$$reg),
10929               as_Register($src2$$reg),
10930               Assembler::LSL, 0);
10931   %}
10932 
10933   ins_pipe(ialu_reg_reg);
10934 %}
10935 
10936 instruct AndL_reg_not_reg(iRegLNoSp dst,
10937                          iRegL src1, iRegL src2, immL_M1 m1,
10938                          rFlagsReg cr) %{
10939   match(Set dst (AndL src1 (XorL src2 m1)));
10940   ins_cost(INSN_COST);
10941   format %{ "bic  $dst, $src1, $src2" %}
10942 
10943   ins_encode %{
10944     __ bic(as_Register($dst$$reg),
10945               as_Register($src1$$reg),
10946               as_Register($src2$$reg),
10947               Assembler::LSL, 0);
10948   %}
10949 
10950   ins_pipe(ialu_reg_reg);
10951 %}
10952 
10953 instruct OrI_reg_not_reg(iRegINoSp dst,
10954                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10955                          rFlagsReg cr) %{
10956   match(Set dst (OrI src1 (XorI src2 m1)));
10957   ins_cost(INSN_COST);
10958   format %{ "ornw  $dst, $src1, $src2" %}
10959 
10960   ins_encode %{
10961     __ ornw(as_Register($dst$$reg),
10962               as_Register($src1$$reg),
10963               as_Register($src2$$reg),
10964               Assembler::LSL, 0);
10965   %}
10966 
10967   ins_pipe(ialu_reg_reg);
10968 %}
10969 
10970 instruct OrL_reg_not_reg(iRegLNoSp dst,
10971                          iRegL src1, iRegL src2, immL_M1 m1,
10972                          rFlagsReg cr) %{
10973   match(Set dst (OrL src1 (XorL src2 m1)));
10974   ins_cost(INSN_COST);
10975   format %{ "orn  $dst, $src1, $src2" %}
10976 
10977   ins_encode %{
10978     __ orn(as_Register($dst$$reg),
10979               as_Register($src1$$reg),
10980               as_Register($src2$$reg),
10981               Assembler::LSL, 0);
10982   %}
10983 
10984   ins_pipe(ialu_reg_reg);
10985 %}
10986 
10987 instruct XorI_reg_not_reg(iRegINoSp dst,
10988                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10989                          rFlagsReg cr) %{
10990   match(Set dst (XorI m1 (XorI src2 src1)));
10991   ins_cost(INSN_COST);
10992   format %{ "eonw  $dst, $src1, $src2" %}
10993 
10994   ins_encode %{
10995     __ eonw(as_Register($dst$$reg),
10996               as_Register($src1$$reg),
10997               as_Register($src2$$reg),
10998               Assembler::LSL, 0);
10999   %}
11000 
11001   ins_pipe(ialu_reg_reg);
11002 %}
11003 
11004 instruct XorL_reg_not_reg(iRegLNoSp dst,
11005                          iRegL src1, iRegL src2, immL_M1 m1,
11006                          rFlagsReg cr) %{
11007   match(Set dst (XorL m1 (XorL src2 src1)));
11008   ins_cost(INSN_COST);
11009   format %{ "eon  $dst, $src1, $src2" %}
11010 
11011   ins_encode %{
11012     __ eon(as_Register($dst$$reg),
11013               as_Register($src1$$reg),
11014               as_Register($src2$$reg),
11015               Assembler::LSL, 0);
11016   %}
11017 
11018   ins_pipe(ialu_reg_reg);
11019 %}
11020 
11021 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11022                          iRegIorL2I src1, iRegIorL2I src2,
11023                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11024   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11025   ins_cost(1.9 * INSN_COST);
11026   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11027 
11028   ins_encode %{
11029     __ bicw(as_Register($dst$$reg),
11030               as_Register($src1$$reg),
11031               as_Register($src2$$reg),
11032               Assembler::LSR,
11033               $src3$$constant & 0x1f);
11034   %}
11035 
11036   ins_pipe(ialu_reg_reg_shift);
11037 %}
11038 
11039 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11040                          iRegL src1, iRegL src2,
11041                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11042   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11043   ins_cost(1.9 * INSN_COST);
11044   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11045 
11046   ins_encode %{
11047     __ bic(as_Register($dst$$reg),
11048               as_Register($src1$$reg),
11049               as_Register($src2$$reg),
11050               Assembler::LSR,
11051               $src3$$constant & 0x3f);
11052   %}
11053 
11054   ins_pipe(ialu_reg_reg_shift);
11055 %}
11056 
11057 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11058                          iRegIorL2I src1, iRegIorL2I src2,
11059                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11060   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11061   ins_cost(1.9 * INSN_COST);
11062   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11063 
11064   ins_encode %{
11065     __ bicw(as_Register($dst$$reg),
11066               as_Register($src1$$reg),
11067               as_Register($src2$$reg),
11068               Assembler::ASR,
11069               $src3$$constant & 0x1f);
11070   %}
11071 
11072   ins_pipe(ialu_reg_reg_shift);
11073 %}
11074 
11075 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11076                          iRegL src1, iRegL src2,
11077                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11078   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11079   ins_cost(1.9 * INSN_COST);
11080   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11081 
11082   ins_encode %{
11083     __ bic(as_Register($dst$$reg),
11084               as_Register($src1$$reg),
11085               as_Register($src2$$reg),
11086               Assembler::ASR,
11087               $src3$$constant & 0x3f);
11088   %}
11089 
11090   ins_pipe(ialu_reg_reg_shift);
11091 %}
11092 
11093 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11094                          iRegIorL2I src1, iRegIorL2I src2,
11095                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11096   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11097   ins_cost(1.9 * INSN_COST);
11098   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11099 
11100   ins_encode %{
11101     __ bicw(as_Register($dst$$reg),
11102               as_Register($src1$$reg),
11103               as_Register($src2$$reg),
11104               Assembler::LSL,
11105               $src3$$constant & 0x1f);
11106   %}
11107 
11108   ins_pipe(ialu_reg_reg_shift);
11109 %}
11110 
11111 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11112                          iRegL src1, iRegL src2,
11113                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11114   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11115   ins_cost(1.9 * INSN_COST);
11116   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11117 
11118   ins_encode %{
11119     __ bic(as_Register($dst$$reg),
11120               as_Register($src1$$reg),
11121               as_Register($src2$$reg),
11122               Assembler::LSL,
11123               $src3$$constant & 0x3f);
11124   %}
11125 
11126   ins_pipe(ialu_reg_reg_shift);
11127 %}
11128 
11129 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11130                          iRegIorL2I src1, iRegIorL2I src2,
11131                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11132   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11133   ins_cost(1.9 * INSN_COST);
11134   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11135 
11136   ins_encode %{
11137     __ eonw(as_Register($dst$$reg),
11138               as_Register($src1$$reg),
11139               as_Register($src2$$reg),
11140               Assembler::LSR,
11141               $src3$$constant & 0x1f);
11142   %}
11143 
11144   ins_pipe(ialu_reg_reg_shift);
11145 %}
11146 
11147 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11148                          iRegL src1, iRegL src2,
11149                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11150   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11151   ins_cost(1.9 * INSN_COST);
11152   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11153 
11154   ins_encode %{
11155     __ eon(as_Register($dst$$reg),
11156               as_Register($src1$$reg),
11157               as_Register($src2$$reg),
11158               Assembler::LSR,
11159               $src3$$constant & 0x3f);
11160   %}
11161 
11162   ins_pipe(ialu_reg_reg_shift);
11163 %}
11164 
11165 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11166                          iRegIorL2I src1, iRegIorL2I src2,
11167                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11168   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11169   ins_cost(1.9 * INSN_COST);
11170   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11171 
11172   ins_encode %{
11173     __ eonw(as_Register($dst$$reg),
11174               as_Register($src1$$reg),
11175               as_Register($src2$$reg),
11176               Assembler::ASR,
11177               $src3$$constant & 0x1f);
11178   %}
11179 
11180   ins_pipe(ialu_reg_reg_shift);
11181 %}
11182 
11183 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11184                          iRegL src1, iRegL src2,
11185                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11186   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11187   ins_cost(1.9 * INSN_COST);
11188   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11189 
11190   ins_encode %{
11191     __ eon(as_Register($dst$$reg),
11192               as_Register($src1$$reg),
11193               as_Register($src2$$reg),
11194               Assembler::ASR,
11195               $src3$$constant & 0x3f);
11196   %}
11197 
11198   ins_pipe(ialu_reg_reg_shift);
11199 %}
11200 
11201 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11202                          iRegIorL2I src1, iRegIorL2I src2,
11203                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11204   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11205   ins_cost(1.9 * INSN_COST);
11206   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11207 
11208   ins_encode %{
11209     __ eonw(as_Register($dst$$reg),
11210               as_Register($src1$$reg),
11211               as_Register($src2$$reg),
11212               Assembler::LSL,
11213               $src3$$constant & 0x1f);
11214   %}
11215 
11216   ins_pipe(ialu_reg_reg_shift);
11217 %}
11218 
11219 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11220                          iRegL src1, iRegL src2,
11221                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11222   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11223   ins_cost(1.9 * INSN_COST);
11224   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11225 
11226   ins_encode %{
11227     __ eon(as_Register($dst$$reg),
11228               as_Register($src1$$reg),
11229               as_Register($src2$$reg),
11230               Assembler::LSL,
11231               $src3$$constant & 0x3f);
11232   %}
11233 
11234   ins_pipe(ialu_reg_reg_shift);
11235 %}
11236 
11237 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11238                          iRegIorL2I src1, iRegIorL2I src2,
11239                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11240   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11241   ins_cost(1.9 * INSN_COST);
11242   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11243 
11244   ins_encode %{
11245     __ ornw(as_Register($dst$$reg),
11246               as_Register($src1$$reg),
11247               as_Register($src2$$reg),
11248               Assembler::LSR,
11249               $src3$$constant & 0x1f);
11250   %}
11251 
11252   ins_pipe(ialu_reg_reg_shift);
11253 %}
11254 
11255 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11256                          iRegL src1, iRegL src2,
11257                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11258   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11259   ins_cost(1.9 * INSN_COST);
11260   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11261 
11262   ins_encode %{
11263     __ orn(as_Register($dst$$reg),
11264               as_Register($src1$$reg),
11265               as_Register($src2$$reg),
11266               Assembler::LSR,
11267               $src3$$constant & 0x3f);
11268   %}
11269 
11270   ins_pipe(ialu_reg_reg_shift);
11271 %}
11272 
11273 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11274                          iRegIorL2I src1, iRegIorL2I src2,
11275                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11276   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11277   ins_cost(1.9 * INSN_COST);
11278   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11279 
11280   ins_encode %{
11281     __ ornw(as_Register($dst$$reg),
11282               as_Register($src1$$reg),
11283               as_Register($src2$$reg),
11284               Assembler::ASR,
11285               $src3$$constant & 0x1f);
11286   %}
11287 
11288   ins_pipe(ialu_reg_reg_shift);
11289 %}
11290 
11291 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11292                          iRegL src1, iRegL src2,
11293                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11294   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11295   ins_cost(1.9 * INSN_COST);
11296   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11297 
11298   ins_encode %{
11299     __ orn(as_Register($dst$$reg),
11300               as_Register($src1$$reg),
11301               as_Register($src2$$reg),
11302               Assembler::ASR,
11303               $src3$$constant & 0x3f);
11304   %}
11305 
11306   ins_pipe(ialu_reg_reg_shift);
11307 %}
11308 
11309 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11310                          iRegIorL2I src1, iRegIorL2I src2,
11311                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11312   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11313   ins_cost(1.9 * INSN_COST);
11314   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11315 
11316   ins_encode %{
11317     __ ornw(as_Register($dst$$reg),
11318               as_Register($src1$$reg),
11319               as_Register($src2$$reg),
11320               Assembler::LSL,
11321               $src3$$constant & 0x1f);
11322   %}
11323 
11324   ins_pipe(ialu_reg_reg_shift);
11325 %}
11326 
11327 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11328                          iRegL src1, iRegL src2,
11329                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11330   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11331   ins_cost(1.9 * INSN_COST);
11332   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11333 
11334   ins_encode %{
11335     __ orn(as_Register($dst$$reg),
11336               as_Register($src1$$reg),
11337               as_Register($src2$$reg),
11338               Assembler::LSL,
11339               $src3$$constant & 0x3f);
11340   %}
11341 
11342   ins_pipe(ialu_reg_reg_shift);
11343 %}
11344 
11345 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11346                          iRegIorL2I src1, iRegIorL2I src2,
11347                          immI src3, rFlagsReg cr) %{
11348   match(Set dst (AndI src1 (URShiftI src2 src3)));
11349 
11350   ins_cost(1.9 * INSN_COST);
11351   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11352 
11353   ins_encode %{
11354     __ andw(as_Register($dst$$reg),
11355               as_Register($src1$$reg),
11356               as_Register($src2$$reg),
11357               Assembler::LSR,
11358               $src3$$constant & 0x1f);
11359   %}
11360 
11361   ins_pipe(ialu_reg_reg_shift);
11362 %}
11363 
11364 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11365                          iRegL src1, iRegL src2,
11366                          immI src3, rFlagsReg cr) %{
11367   match(Set dst (AndL src1 (URShiftL src2 src3)));
11368 
11369   ins_cost(1.9 * INSN_COST);
11370   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11371 
11372   ins_encode %{
11373     __ andr(as_Register($dst$$reg),
11374               as_Register($src1$$reg),
11375               as_Register($src2$$reg),
11376               Assembler::LSR,
11377               $src3$$constant & 0x3f);
11378   %}
11379 
11380   ins_pipe(ialu_reg_reg_shift);
11381 %}
11382 
11383 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11384                          iRegIorL2I src1, iRegIorL2I src2,
11385                          immI src3, rFlagsReg cr) %{
11386   match(Set dst (AndI src1 (RShiftI src2 src3)));
11387 
11388   ins_cost(1.9 * INSN_COST);
11389   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11390 
11391   ins_encode %{
11392     __ andw(as_Register($dst$$reg),
11393               as_Register($src1$$reg),
11394               as_Register($src2$$reg),
11395               Assembler::ASR,
11396               $src3$$constant & 0x1f);
11397   %}
11398 
11399   ins_pipe(ialu_reg_reg_shift);
11400 %}
11401 
11402 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11403                          iRegL src1, iRegL src2,
11404                          immI src3, rFlagsReg cr) %{
11405   match(Set dst (AndL src1 (RShiftL src2 src3)));
11406 
11407   ins_cost(1.9 * INSN_COST);
11408   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11409 
11410   ins_encode %{
11411     __ andr(as_Register($dst$$reg),
11412               as_Register($src1$$reg),
11413               as_Register($src2$$reg),
11414               Assembler::ASR,
11415               $src3$$constant & 0x3f);
11416   %}
11417 
11418   ins_pipe(ialu_reg_reg_shift);
11419 %}
11420 
11421 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11422                          iRegIorL2I src1, iRegIorL2I src2,
11423                          immI src3, rFlagsReg cr) %{
11424   match(Set dst (AndI src1 (LShiftI src2 src3)));
11425 
11426   ins_cost(1.9 * INSN_COST);
11427   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11428 
11429   ins_encode %{
11430     __ andw(as_Register($dst$$reg),
11431               as_Register($src1$$reg),
11432               as_Register($src2$$reg),
11433               Assembler::LSL,
11434               $src3$$constant & 0x1f);
11435   %}
11436 
11437   ins_pipe(ialu_reg_reg_shift);
11438 %}
11439 
11440 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11441                          iRegL src1, iRegL src2,
11442                          immI src3, rFlagsReg cr) %{
11443   match(Set dst (AndL src1 (LShiftL src2 src3)));
11444 
11445   ins_cost(1.9 * INSN_COST);
11446   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11447 
11448   ins_encode %{
11449     __ andr(as_Register($dst$$reg),
11450               as_Register($src1$$reg),
11451               as_Register($src2$$reg),
11452               Assembler::LSL,
11453               $src3$$constant & 0x3f);
11454   %}
11455 
11456   ins_pipe(ialu_reg_reg_shift);
11457 %}
11458 
11459 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11460                          iRegIorL2I src1, iRegIorL2I src2,
11461                          immI src3, rFlagsReg cr) %{
11462   match(Set dst (XorI src1 (URShiftI src2 src3)));
11463 
11464   ins_cost(1.9 * INSN_COST);
11465   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11466 
11467   ins_encode %{
11468     __ eorw(as_Register($dst$$reg),
11469               as_Register($src1$$reg),
11470               as_Register($src2$$reg),
11471               Assembler::LSR,
11472               $src3$$constant & 0x1f);
11473   %}
11474 
11475   ins_pipe(ialu_reg_reg_shift);
11476 %}
11477 
11478 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11479                          iRegL src1, iRegL src2,
11480                          immI src3, rFlagsReg cr) %{
11481   match(Set dst (XorL src1 (URShiftL src2 src3)));
11482 
11483   ins_cost(1.9 * INSN_COST);
11484   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11485 
11486   ins_encode %{
11487     __ eor(as_Register($dst$$reg),
11488               as_Register($src1$$reg),
11489               as_Register($src2$$reg),
11490               Assembler::LSR,
11491               $src3$$constant & 0x3f);
11492   %}
11493 
11494   ins_pipe(ialu_reg_reg_shift);
11495 %}
11496 
11497 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11498                          iRegIorL2I src1, iRegIorL2I src2,
11499                          immI src3, rFlagsReg cr) %{
11500   match(Set dst (XorI src1 (RShiftI src2 src3)));
11501 
11502   ins_cost(1.9 * INSN_COST);
11503   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11504 
11505   ins_encode %{
11506     __ eorw(as_Register($dst$$reg),
11507               as_Register($src1$$reg),
11508               as_Register($src2$$reg),
11509               Assembler::ASR,
11510               $src3$$constant & 0x1f);
11511   %}
11512 
11513   ins_pipe(ialu_reg_reg_shift);
11514 %}
11515 
11516 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11517                          iRegL src1, iRegL src2,
11518                          immI src3, rFlagsReg cr) %{
11519   match(Set dst (XorL src1 (RShiftL src2 src3)));
11520 
11521   ins_cost(1.9 * INSN_COST);
11522   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11523 
11524   ins_encode %{
11525     __ eor(as_Register($dst$$reg),
11526               as_Register($src1$$reg),
11527               as_Register($src2$$reg),
11528               Assembler::ASR,
11529               $src3$$constant & 0x3f);
11530   %}
11531 
11532   ins_pipe(ialu_reg_reg_shift);
11533 %}
11534 
11535 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11536                          iRegIorL2I src1, iRegIorL2I src2,
11537                          immI src3, rFlagsReg cr) %{
11538   match(Set dst (XorI src1 (LShiftI src2 src3)));
11539 
11540   ins_cost(1.9 * INSN_COST);
11541   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11542 
11543   ins_encode %{
11544     __ eorw(as_Register($dst$$reg),
11545               as_Register($src1$$reg),
11546               as_Register($src2$$reg),
11547               Assembler::LSL,
11548               $src3$$constant & 0x1f);
11549   %}
11550 
11551   ins_pipe(ialu_reg_reg_shift);
11552 %}
11553 
11554 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11555                          iRegL src1, iRegL src2,
11556                          immI src3, rFlagsReg cr) %{
11557   match(Set dst (XorL src1 (LShiftL src2 src3)));
11558 
11559   ins_cost(1.9 * INSN_COST);
11560   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11561 
11562   ins_encode %{
11563     __ eor(as_Register($dst$$reg),
11564               as_Register($src1$$reg),
11565               as_Register($src2$$reg),
11566               Assembler::LSL,
11567               $src3$$constant & 0x3f);
11568   %}
11569 
11570   ins_pipe(ialu_reg_reg_shift);
11571 %}
11572 
11573 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11574                          iRegIorL2I src1, iRegIorL2I src2,
11575                          immI src3, rFlagsReg cr) %{
11576   match(Set dst (OrI src1 (URShiftI src2 src3)));
11577 
11578   ins_cost(1.9 * INSN_COST);
11579   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11580 
11581   ins_encode %{
11582     __ orrw(as_Register($dst$$reg),
11583               as_Register($src1$$reg),
11584               as_Register($src2$$reg),
11585               Assembler::LSR,
11586               $src3$$constant & 0x1f);
11587   %}
11588 
11589   ins_pipe(ialu_reg_reg_shift);
11590 %}
11591 
11592 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11593                          iRegL src1, iRegL src2,
11594                          immI src3, rFlagsReg cr) %{
11595   match(Set dst (OrL src1 (URShiftL src2 src3)));
11596 
11597   ins_cost(1.9 * INSN_COST);
11598   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11599 
11600   ins_encode %{
11601     __ orr(as_Register($dst$$reg),
11602               as_Register($src1$$reg),
11603               as_Register($src2$$reg),
11604               Assembler::LSR,
11605               $src3$$constant & 0x3f);
11606   %}
11607 
11608   ins_pipe(ialu_reg_reg_shift);
11609 %}
11610 
11611 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11612                          iRegIorL2I src1, iRegIorL2I src2,
11613                          immI src3, rFlagsReg cr) %{
11614   match(Set dst (OrI src1 (RShiftI src2 src3)));
11615 
11616   ins_cost(1.9 * INSN_COST);
11617   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11618 
11619   ins_encode %{
11620     __ orrw(as_Register($dst$$reg),
11621               as_Register($src1$$reg),
11622               as_Register($src2$$reg),
11623               Assembler::ASR,
11624               $src3$$constant & 0x1f);
11625   %}
11626 
11627   ins_pipe(ialu_reg_reg_shift);
11628 %}
11629 
11630 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11631                          iRegL src1, iRegL src2,
11632                          immI src3, rFlagsReg cr) %{
11633   match(Set dst (OrL src1 (RShiftL src2 src3)));
11634 
11635   ins_cost(1.9 * INSN_COST);
11636   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11637 
11638   ins_encode %{
11639     __ orr(as_Register($dst$$reg),
11640               as_Register($src1$$reg),
11641               as_Register($src2$$reg),
11642               Assembler::ASR,
11643               $src3$$constant & 0x3f);
11644   %}
11645 
11646   ins_pipe(ialu_reg_reg_shift);
11647 %}
11648 
11649 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11650                          iRegIorL2I src1, iRegIorL2I src2,
11651                          immI src3, rFlagsReg cr) %{
11652   match(Set dst (OrI src1 (LShiftI src2 src3)));
11653 
11654   ins_cost(1.9 * INSN_COST);
11655   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11656 
11657   ins_encode %{
11658     __ orrw(as_Register($dst$$reg),
11659               as_Register($src1$$reg),
11660               as_Register($src2$$reg),
11661               Assembler::LSL,
11662               $src3$$constant & 0x1f);
11663   %}
11664 
11665   ins_pipe(ialu_reg_reg_shift);
11666 %}
11667 
11668 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11669                          iRegL src1, iRegL src2,
11670                          immI src3, rFlagsReg cr) %{
11671   match(Set dst (OrL src1 (LShiftL src2 src3)));
11672 
11673   ins_cost(1.9 * INSN_COST);
11674   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11675 
11676   ins_encode %{
11677     __ orr(as_Register($dst$$reg),
11678               as_Register($src1$$reg),
11679               as_Register($src2$$reg),
11680               Assembler::LSL,
11681               $src3$$constant & 0x3f);
11682   %}
11683 
11684   ins_pipe(ialu_reg_reg_shift);
11685 %}
11686 
11687 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11688                          iRegIorL2I src1, iRegIorL2I src2,
11689                          immI src3, rFlagsReg cr) %{
11690   match(Set dst (AddI src1 (URShiftI src2 src3)));
11691 
11692   ins_cost(1.9 * INSN_COST);
11693   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11694 
11695   ins_encode %{
11696     __ addw(as_Register($dst$$reg),
11697               as_Register($src1$$reg),
11698               as_Register($src2$$reg),
11699               Assembler::LSR,
11700               $src3$$constant & 0x1f);
11701   %}
11702 
11703   ins_pipe(ialu_reg_reg_shift);
11704 %}
11705 
11706 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11707                          iRegL src1, iRegL src2,
11708                          immI src3, rFlagsReg cr) %{
11709   match(Set dst (AddL src1 (URShiftL src2 src3)));
11710 
11711   ins_cost(1.9 * INSN_COST);
11712   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11713 
11714   ins_encode %{
11715     __ add(as_Register($dst$$reg),
11716               as_Register($src1$$reg),
11717               as_Register($src2$$reg),
11718               Assembler::LSR,
11719               $src3$$constant & 0x3f);
11720   %}
11721 
11722   ins_pipe(ialu_reg_reg_shift);
11723 %}
11724 
11725 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11726                          iRegIorL2I src1, iRegIorL2I src2,
11727                          immI src3, rFlagsReg cr) %{
11728   match(Set dst (AddI src1 (RShiftI src2 src3)));
11729 
11730   ins_cost(1.9 * INSN_COST);
11731   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11732 
11733   ins_encode %{
11734     __ addw(as_Register($dst$$reg),
11735               as_Register($src1$$reg),
11736               as_Register($src2$$reg),
11737               Assembler::ASR,
11738               $src3$$constant & 0x1f);
11739   %}
11740 
11741   ins_pipe(ialu_reg_reg_shift);
11742 %}
11743 
11744 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11745                          iRegL src1, iRegL src2,
11746                          immI src3, rFlagsReg cr) %{
11747   match(Set dst (AddL src1 (RShiftL src2 src3)));
11748 
11749   ins_cost(1.9 * INSN_COST);
11750   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11751 
11752   ins_encode %{
11753     __ add(as_Register($dst$$reg),
11754               as_Register($src1$$reg),
11755               as_Register($src2$$reg),
11756               Assembler::ASR,
11757               $src3$$constant & 0x3f);
11758   %}
11759 
11760   ins_pipe(ialu_reg_reg_shift);
11761 %}
11762 
11763 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11764                          iRegIorL2I src1, iRegIorL2I src2,
11765                          immI src3, rFlagsReg cr) %{
11766   match(Set dst (AddI src1 (LShiftI src2 src3)));
11767 
11768   ins_cost(1.9 * INSN_COST);
11769   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11770 
11771   ins_encode %{
11772     __ addw(as_Register($dst$$reg),
11773               as_Register($src1$$reg),
11774               as_Register($src2$$reg),
11775               Assembler::LSL,
11776               $src3$$constant & 0x1f);
11777   %}
11778 
11779   ins_pipe(ialu_reg_reg_shift);
11780 %}
11781 
11782 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11783                          iRegL src1, iRegL src2,
11784                          immI src3, rFlagsReg cr) %{
11785   match(Set dst (AddL src1 (LShiftL src2 src3)));
11786 
11787   ins_cost(1.9 * INSN_COST);
11788   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11789 
11790   ins_encode %{
11791     __ add(as_Register($dst$$reg),
11792               as_Register($src1$$reg),
11793               as_Register($src2$$reg),
11794               Assembler::LSL,
11795               $src3$$constant & 0x3f);
11796   %}
11797 
11798   ins_pipe(ialu_reg_reg_shift);
11799 %}
11800 
11801 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11802                          iRegIorL2I src1, iRegIorL2I src2,
11803                          immI src3, rFlagsReg cr) %{
11804   match(Set dst (SubI src1 (URShiftI src2 src3)));
11805 
11806   ins_cost(1.9 * INSN_COST);
11807   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11808 
11809   ins_encode %{
11810     __ subw(as_Register($dst$$reg),
11811               as_Register($src1$$reg),
11812               as_Register($src2$$reg),
11813               Assembler::LSR,
11814               $src3$$constant & 0x1f);
11815   %}
11816 
11817   ins_pipe(ialu_reg_reg_shift);
11818 %}
11819 
11820 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11821                          iRegL src1, iRegL src2,
11822                          immI src3, rFlagsReg cr) %{
11823   match(Set dst (SubL src1 (URShiftL src2 src3)));
11824 
11825   ins_cost(1.9 * INSN_COST);
11826   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11827 
11828   ins_encode %{
11829     __ sub(as_Register($dst$$reg),
11830               as_Register($src1$$reg),
11831               as_Register($src2$$reg),
11832               Assembler::LSR,
11833               $src3$$constant & 0x3f);
11834   %}
11835 
11836   ins_pipe(ialu_reg_reg_shift);
11837 %}
11838 
11839 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11840                          iRegIorL2I src1, iRegIorL2I src2,
11841                          immI src3, rFlagsReg cr) %{
11842   match(Set dst (SubI src1 (RShiftI src2 src3)));
11843 
11844   ins_cost(1.9 * INSN_COST);
11845   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11846 
11847   ins_encode %{
11848     __ subw(as_Register($dst$$reg),
11849               as_Register($src1$$reg),
11850               as_Register($src2$$reg),
11851               Assembler::ASR,
11852               $src3$$constant & 0x1f);
11853   %}
11854 
11855   ins_pipe(ialu_reg_reg_shift);
11856 %}
11857 
11858 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11859                          iRegL src1, iRegL src2,
11860                          immI src3, rFlagsReg cr) %{
11861   match(Set dst (SubL src1 (RShiftL src2 src3)));
11862 
11863   ins_cost(1.9 * INSN_COST);
11864   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11865 
11866   ins_encode %{
11867     __ sub(as_Register($dst$$reg),
11868               as_Register($src1$$reg),
11869               as_Register($src2$$reg),
11870               Assembler::ASR,
11871               $src3$$constant & 0x3f);
11872   %}
11873 
11874   ins_pipe(ialu_reg_reg_shift);
11875 %}
11876 
11877 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11878                          iRegIorL2I src1, iRegIorL2I src2,
11879                          immI src3, rFlagsReg cr) %{
11880   match(Set dst (SubI src1 (LShiftI src2 src3)));
11881 
11882   ins_cost(1.9 * INSN_COST);
11883   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11884 
11885   ins_encode %{
11886     __ subw(as_Register($dst$$reg),
11887               as_Register($src1$$reg),
11888               as_Register($src2$$reg),
11889               Assembler::LSL,
11890               $src3$$constant & 0x1f);
11891   %}
11892 
11893   ins_pipe(ialu_reg_reg_shift);
11894 %}
11895 
11896 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11897                          iRegL src1, iRegL src2,
11898                          immI src3, rFlagsReg cr) %{
11899   match(Set dst (SubL src1 (LShiftL src2 src3)));
11900 
11901   ins_cost(1.9 * INSN_COST);
11902   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11903 
11904   ins_encode %{
11905     __ sub(as_Register($dst$$reg),
11906               as_Register($src1$$reg),
11907               as_Register($src2$$reg),
11908               Assembler::LSL,
11909               $src3$$constant & 0x3f);
11910   %}
11911 
11912   ins_pipe(ialu_reg_reg_shift);
11913 %}
11914 
11915 
11916 
11917 // Shift Left followed by Shift Right.
11918 // This idiom is used by the compiler for the i2b bytecode etc.
11919 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11920 %{
11921   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11922   ins_cost(INSN_COST * 2);
11923   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11924   ins_encode %{
11925     int lshift = $lshift_count$$constant & 63;
11926     int rshift = $rshift_count$$constant & 63;
11927     int s = 63 - lshift;
11928     int r = (rshift - lshift) & 63;
11929     __ sbfm(as_Register($dst$$reg),
11930             as_Register($src$$reg),
11931             r, s);
11932   %}
11933 
11934   ins_pipe(ialu_reg_shift);
11935 %}
11936 
11937 // Shift Left followed by Shift Right.
11938 // This idiom is used by the compiler for the i2b bytecode etc.
11939 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11940 %{
11941   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11942   ins_cost(INSN_COST * 2);
11943   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11944   ins_encode %{
11945     int lshift = $lshift_count$$constant & 31;
11946     int rshift = $rshift_count$$constant & 31;
11947     int s = 31 - lshift;
11948     int r = (rshift - lshift) & 31;
11949     __ sbfmw(as_Register($dst$$reg),
11950             as_Register($src$$reg),
11951             r, s);
11952   %}
11953 
11954   ins_pipe(ialu_reg_shift);
11955 %}
11956 
11957 // Shift Left followed by Shift Right.
11958 // This idiom is used by the compiler for the i2b bytecode etc.
11959 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11960 %{
11961   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11962   ins_cost(INSN_COST * 2);
11963   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11964   ins_encode %{
11965     int lshift = $lshift_count$$constant & 63;
11966     int rshift = $rshift_count$$constant & 63;
11967     int s = 63 - lshift;
11968     int r = (rshift - lshift) & 63;
11969     __ ubfm(as_Register($dst$$reg),
11970             as_Register($src$$reg),
11971             r, s);
11972   %}
11973 
11974   ins_pipe(ialu_reg_shift);
11975 %}
11976 
11977 // Shift Left followed by Shift Right.
11978 // This idiom is used by the compiler for the i2b bytecode etc.
11979 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11980 %{
11981   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11982   ins_cost(INSN_COST * 2);
11983   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11984   ins_encode %{
11985     int lshift = $lshift_count$$constant & 31;
11986     int rshift = $rshift_count$$constant & 31;
11987     int s = 31 - lshift;
11988     int r = (rshift - lshift) & 31;
11989     __ ubfmw(as_Register($dst$$reg),
11990             as_Register($src$$reg),
11991             r, s);
11992   %}
11993 
11994   ins_pipe(ialu_reg_shift);
11995 %}
11996 // Bitfield extract with shift & mask
11997 
11998 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11999 %{
12000   match(Set dst (AndI (URShiftI src rshift) mask));
12001   // Make sure we are not going to exceed what ubfxw can do.
12002   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12003 
12004   ins_cost(INSN_COST);
12005   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12006   ins_encode %{
12007     int rshift = $rshift$$constant & 31;
12008     long mask = $mask$$constant;
12009     int width = exact_log2(mask+1);
12010     __ ubfxw(as_Register($dst$$reg),
12011             as_Register($src$$reg), rshift, width);
12012   %}
12013   ins_pipe(ialu_reg_shift);
12014 %}
12015 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12016 %{
12017   match(Set dst (AndL (URShiftL src rshift) mask));
12018   // Make sure we are not going to exceed what ubfx can do.
12019   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12020 
12021   ins_cost(INSN_COST);
12022   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12023   ins_encode %{
12024     int rshift = $rshift$$constant & 63;
12025     long mask = $mask$$constant;
12026     int width = exact_log2_long(mask+1);
12027     __ ubfx(as_Register($dst$$reg),
12028             as_Register($src$$reg), rshift, width);
12029   %}
12030   ins_pipe(ialu_reg_shift);
12031 %}
12032 
12033 // We can use ubfx when extending an And with a mask when we know mask
12034 // is positive.  We know that because immI_bitmask guarantees it.
12035 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12036 %{
12037   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12038   // Make sure we are not going to exceed what ubfxw can do.
12039   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12040 
12041   ins_cost(INSN_COST * 2);
12042   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12043   ins_encode %{
12044     int rshift = $rshift$$constant & 31;
12045     long mask = $mask$$constant;
12046     int width = exact_log2(mask+1);
12047     __ ubfx(as_Register($dst$$reg),
12048             as_Register($src$$reg), rshift, width);
12049   %}
12050   ins_pipe(ialu_reg_shift);
12051 %}
12052 
12053 // We can use ubfiz when masking by a positive number and then left shifting the result.
12054 // We know that the mask is positive because immI_bitmask guarantees it.
12055 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12056 %{
12057   match(Set dst (LShiftI (AndI src mask) lshift));
12058   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12059 
12060   ins_cost(INSN_COST);
12061   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12062   ins_encode %{
12063     int lshift = $lshift$$constant & 31;
12064     long mask = $mask$$constant;
12065     int width = exact_log2(mask+1);
12066     __ ubfizw(as_Register($dst$$reg),
12067           as_Register($src$$reg), lshift, width);
12068   %}
12069   ins_pipe(ialu_reg_shift);
12070 %}
12071 // We can use ubfiz when masking by a positive number and then left shifting the result.
12072 // We know that the mask is positive because immL_bitmask guarantees it.
12073 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12074 %{
12075   match(Set dst (LShiftL (AndL src mask) lshift));
12076   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12077 
12078   ins_cost(INSN_COST);
12079   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12080   ins_encode %{
12081     int lshift = $lshift$$constant & 63;
12082     long mask = $mask$$constant;
12083     int width = exact_log2_long(mask+1);
12084     __ ubfiz(as_Register($dst$$reg),
12085           as_Register($src$$reg), lshift, width);
12086   %}
12087   ins_pipe(ialu_reg_shift);
12088 %}
12089 
12090 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12091 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12092 %{
12093   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12094   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12095 
12096   ins_cost(INSN_COST);
12097   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12098   ins_encode %{
12099     int lshift = $lshift$$constant & 63;
12100     long mask = $mask$$constant;
12101     int width = exact_log2(mask+1);
12102     __ ubfiz(as_Register($dst$$reg),
12103              as_Register($src$$reg), lshift, width);
12104   %}
12105   ins_pipe(ialu_reg_shift);
12106 %}
12107 
12108 // Rotations
12109 
12110 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12111 %{
12112   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12113   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12114 
12115   ins_cost(INSN_COST);
12116   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12117 
12118   ins_encode %{
12119     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12120             $rshift$$constant & 63);
12121   %}
12122   ins_pipe(ialu_reg_reg_extr);
12123 %}
12124 
12125 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12126 %{
12127   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12128   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12129 
12130   ins_cost(INSN_COST);
12131   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12132 
12133   ins_encode %{
12134     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12135             $rshift$$constant & 31);
12136   %}
12137   ins_pipe(ialu_reg_reg_extr);
12138 %}
12139 
12140 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12141 %{
12142   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12143   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12144 
12145   ins_cost(INSN_COST);
12146   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12147 
12148   ins_encode %{
12149     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12150             $rshift$$constant & 63);
12151   %}
12152   ins_pipe(ialu_reg_reg_extr);
12153 %}
12154 
12155 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12156 %{
12157   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12158   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12159 
12160   ins_cost(INSN_COST);
12161   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12162 
12163   ins_encode %{
12164     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12165             $rshift$$constant & 31);
12166   %}
12167   ins_pipe(ialu_reg_reg_extr);
12168 %}
12169 
12170 
12171 // rol expander
12172 
12173 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12174 %{
12175   effect(DEF dst, USE src, USE shift);
12176 
12177   format %{ "rol    $dst, $src, $shift" %}
12178   ins_cost(INSN_COST * 3);
12179   ins_encode %{
12180     __ subw(rscratch1, zr, as_Register($shift$$reg));
12181     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12182             rscratch1);
12183     %}
12184   ins_pipe(ialu_reg_reg_vshift);
12185 %}
12186 
12187 // rol expander
12188 
12189 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12190 %{
12191   effect(DEF dst, USE src, USE shift);
12192 
12193   format %{ "rol    $dst, $src, $shift" %}
12194   ins_cost(INSN_COST * 3);
12195   ins_encode %{
12196     __ subw(rscratch1, zr, as_Register($shift$$reg));
12197     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12198             rscratch1);
12199     %}
12200   ins_pipe(ialu_reg_reg_vshift);
12201 %}
12202 
12203 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12204 %{
12205   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12206 
12207   expand %{
12208     rolL_rReg(dst, src, shift, cr);
12209   %}
12210 %}
12211 
12212 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12213 %{
12214   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12215 
12216   expand %{
12217     rolL_rReg(dst, src, shift, cr);
12218   %}
12219 %}
12220 
12221 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12222 %{
12223   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12224 
12225   expand %{
12226     rolI_rReg(dst, src, shift, cr);
12227   %}
12228 %}
12229 
12230 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12231 %{
12232   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12233 
12234   expand %{
12235     rolI_rReg(dst, src, shift, cr);
12236   %}
12237 %}
12238 
12239 // ror expander
12240 
12241 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12242 %{
12243   effect(DEF dst, USE src, USE shift);
12244 
12245   format %{ "ror    $dst, $src, $shift" %}
12246   ins_cost(INSN_COST);
12247   ins_encode %{
12248     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12249             as_Register($shift$$reg));
12250     %}
12251   ins_pipe(ialu_reg_reg_vshift);
12252 %}
12253 
12254 // ror expander
12255 
12256 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12257 %{
12258   effect(DEF dst, USE src, USE shift);
12259 
12260   format %{ "ror    $dst, $src, $shift" %}
12261   ins_cost(INSN_COST);
12262   ins_encode %{
12263     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12264             as_Register($shift$$reg));
12265     %}
12266   ins_pipe(ialu_reg_reg_vshift);
12267 %}
12268 
12269 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12270 %{
12271   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12272 
12273   expand %{
12274     rorL_rReg(dst, src, shift, cr);
12275   %}
12276 %}
12277 
12278 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12279 %{
12280   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12281 
12282   expand %{
12283     rorL_rReg(dst, src, shift, cr);
12284   %}
12285 %}
12286 
12287 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12288 %{
12289   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12290 
12291   expand %{
12292     rorI_rReg(dst, src, shift, cr);
12293   %}
12294 %}
12295 
12296 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12297 %{
12298   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12299 
12300   expand %{
12301     rorI_rReg(dst, src, shift, cr);
12302   %}
12303 %}
12304 
12305 // Add/subtract (extended)
12306 
12307 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12308 %{
12309   match(Set dst (AddL src1 (ConvI2L src2)));
12310   ins_cost(INSN_COST);
12311   format %{ "add  $dst, $src1, $src2, sxtw" %}
12312 
12313    ins_encode %{
12314      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12315             as_Register($src2$$reg), ext::sxtw);
12316    %}
12317   ins_pipe(ialu_reg_reg);
12318 %};
12319 
12320 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12321 %{
12322   match(Set dst (SubL src1 (ConvI2L src2)));
12323   ins_cost(INSN_COST);
12324   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12325 
12326    ins_encode %{
12327      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12328             as_Register($src2$$reg), ext::sxtw);
12329    %}
12330   ins_pipe(ialu_reg_reg);
12331 %};
12332 
12333 
12334 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12335 %{
12336   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12337   ins_cost(INSN_COST);
12338   format %{ "add  $dst, $src1, $src2, sxth" %}
12339 
12340    ins_encode %{
12341      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12342             as_Register($src2$$reg), ext::sxth);
12343    %}
12344   ins_pipe(ialu_reg_reg);
12345 %}
12346 
12347 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12348 %{
12349   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12350   ins_cost(INSN_COST);
12351   format %{ "add  $dst, $src1, $src2, sxtb" %}
12352 
12353    ins_encode %{
12354      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12355             as_Register($src2$$reg), ext::sxtb);
12356    %}
12357   ins_pipe(ialu_reg_reg);
12358 %}
12359 
12360 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12361 %{
12362   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12363   ins_cost(INSN_COST);
12364   format %{ "add  $dst, $src1, $src2, uxtb" %}
12365 
12366    ins_encode %{
12367      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12368             as_Register($src2$$reg), ext::uxtb);
12369    %}
12370   ins_pipe(ialu_reg_reg);
12371 %}
12372 
12373 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12374 %{
12375   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12376   ins_cost(INSN_COST);
12377   format %{ "add  $dst, $src1, $src2, sxth" %}
12378 
12379    ins_encode %{
12380      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12381             as_Register($src2$$reg), ext::sxth);
12382    %}
12383   ins_pipe(ialu_reg_reg);
12384 %}
12385 
12386 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12387 %{
12388   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12389   ins_cost(INSN_COST);
12390   format %{ "add  $dst, $src1, $src2, sxtw" %}
12391 
12392    ins_encode %{
12393      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12394             as_Register($src2$$reg), ext::sxtw);
12395    %}
12396   ins_pipe(ialu_reg_reg);
12397 %}
12398 
12399 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12400 %{
12401   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12402   ins_cost(INSN_COST);
12403   format %{ "add  $dst, $src1, $src2, sxtb" %}
12404 
12405    ins_encode %{
12406      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12407             as_Register($src2$$reg), ext::sxtb);
12408    %}
12409   ins_pipe(ialu_reg_reg);
12410 %}
12411 
12412 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12413 %{
12414   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12415   ins_cost(INSN_COST);
12416   format %{ "add  $dst, $src1, $src2, uxtb" %}
12417 
12418    ins_encode %{
12419      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12420             as_Register($src2$$reg), ext::uxtb);
12421    %}
12422   ins_pipe(ialu_reg_reg);
12423 %}
12424 
12425 
12426 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12427 %{
12428   match(Set dst (AddI src1 (AndI src2 mask)));
12429   ins_cost(INSN_COST);
12430   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12431 
12432    ins_encode %{
12433      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12434             as_Register($src2$$reg), ext::uxtb);
12435    %}
12436   ins_pipe(ialu_reg_reg);
12437 %}
12438 
12439 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12440 %{
12441   match(Set dst (AddI src1 (AndI src2 mask)));
12442   ins_cost(INSN_COST);
12443   format %{ "addw  $dst, $src1, $src2, uxth" %}
12444 
12445    ins_encode %{
12446      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12447             as_Register($src2$$reg), ext::uxth);
12448    %}
12449   ins_pipe(ialu_reg_reg);
12450 %}
12451 
12452 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12453 %{
12454   match(Set dst (AddL src1 (AndL src2 mask)));
12455   ins_cost(INSN_COST);
12456   format %{ "add  $dst, $src1, $src2, uxtb" %}
12457 
12458    ins_encode %{
12459      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12460             as_Register($src2$$reg), ext::uxtb);
12461    %}
12462   ins_pipe(ialu_reg_reg);
12463 %}
12464 
12465 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12466 %{
12467   match(Set dst (AddL src1 (AndL src2 mask)));
12468   ins_cost(INSN_COST);
12469   format %{ "add  $dst, $src1, $src2, uxth" %}
12470 
12471    ins_encode %{
12472      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12473             as_Register($src2$$reg), ext::uxth);
12474    %}
12475   ins_pipe(ialu_reg_reg);
12476 %}
12477 
12478 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12479 %{
12480   match(Set dst (AddL src1 (AndL src2 mask)));
12481   ins_cost(INSN_COST);
12482   format %{ "add  $dst, $src1, $src2, uxtw" %}
12483 
12484    ins_encode %{
12485      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12486             as_Register($src2$$reg), ext::uxtw);
12487    %}
12488   ins_pipe(ialu_reg_reg);
12489 %}
12490 
12491 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12492 %{
12493   match(Set dst (SubI src1 (AndI src2 mask)));
12494   ins_cost(INSN_COST);
12495   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12496 
12497    ins_encode %{
12498      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12499             as_Register($src2$$reg), ext::uxtb);
12500    %}
12501   ins_pipe(ialu_reg_reg);
12502 %}
12503 
12504 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12505 %{
12506   match(Set dst (SubI src1 (AndI src2 mask)));
12507   ins_cost(INSN_COST);
12508   format %{ "subw  $dst, $src1, $src2, uxth" %}
12509 
12510    ins_encode %{
12511      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12512             as_Register($src2$$reg), ext::uxth);
12513    %}
12514   ins_pipe(ialu_reg_reg);
12515 %}
12516 
12517 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12518 %{
12519   match(Set dst (SubL src1 (AndL src2 mask)));
12520   ins_cost(INSN_COST);
12521   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12522 
12523    ins_encode %{
12524      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12525             as_Register($src2$$reg), ext::uxtb);
12526    %}
12527   ins_pipe(ialu_reg_reg);
12528 %}
12529 
12530 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12531 %{
12532   match(Set dst (SubL src1 (AndL src2 mask)));
12533   ins_cost(INSN_COST);
12534   format %{ "sub  $dst, $src1, $src2, uxth" %}
12535 
12536    ins_encode %{
12537      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12538             as_Register($src2$$reg), ext::uxth);
12539    %}
12540   ins_pipe(ialu_reg_reg);
12541 %}
12542 
12543 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12544 %{
12545   match(Set dst (SubL src1 (AndL src2 mask)));
12546   ins_cost(INSN_COST);
12547   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12548 
12549    ins_encode %{
12550      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12551             as_Register($src2$$reg), ext::uxtw);
12552    %}
12553   ins_pipe(ialu_reg_reg);
12554 %}
12555 
12556 
12557 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12558 %{
12559   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12560   ins_cost(1.9 * INSN_COST);
12561   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12562 
12563    ins_encode %{
12564      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12565             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12566    %}
12567   ins_pipe(ialu_reg_reg_shift);
12568 %}
12569 
12570 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12571 %{
12572   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12573   ins_cost(1.9 * INSN_COST);
12574   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12575 
12576    ins_encode %{
12577      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12578             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12579    %}
12580   ins_pipe(ialu_reg_reg_shift);
12581 %}
12582 
12583 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12584 %{
12585   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12586   ins_cost(1.9 * INSN_COST);
12587   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12588 
12589    ins_encode %{
12590      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12591             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12592    %}
12593   ins_pipe(ialu_reg_reg_shift);
12594 %}
12595 
12596 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12597 %{
12598   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12599   ins_cost(1.9 * INSN_COST);
12600   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12601 
12602    ins_encode %{
12603      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12604             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12605    %}
12606   ins_pipe(ialu_reg_reg_shift);
12607 %}
12608 
12609 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12610 %{
12611   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12612   ins_cost(1.9 * INSN_COST);
12613   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12614 
12615    ins_encode %{
12616      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12617             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12618    %}
12619   ins_pipe(ialu_reg_reg_shift);
12620 %}
12621 
12622 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12623 %{
12624   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12625   ins_cost(1.9 * INSN_COST);
12626   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12627 
12628    ins_encode %{
12629      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12630             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12631    %}
12632   ins_pipe(ialu_reg_reg_shift);
12633 %}
12634 
12635 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12636 %{
12637   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12638   ins_cost(1.9 * INSN_COST);
12639   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12640 
12641    ins_encode %{
12642      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12643             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12644    %}
12645   ins_pipe(ialu_reg_reg_shift);
12646 %}
12647 
12648 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12649 %{
12650   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12651   ins_cost(1.9 * INSN_COST);
12652   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12653 
12654    ins_encode %{
12655      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12656             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12657    %}
12658   ins_pipe(ialu_reg_reg_shift);
12659 %}
12660 
12661 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12662 %{
12663   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12664   ins_cost(1.9 * INSN_COST);
12665   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12666 
12667    ins_encode %{
12668      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12669             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12670    %}
12671   ins_pipe(ialu_reg_reg_shift);
12672 %}
12673 
12674 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12675 %{
12676   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12677   ins_cost(1.9 * INSN_COST);
12678   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12679 
12680    ins_encode %{
12681      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12682             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12683    %}
12684   ins_pipe(ialu_reg_reg_shift);
12685 %}
12686 
12687 
12688 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12689 %{
12690   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12691   ins_cost(1.9 * INSN_COST);
12692   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12693 
12694    ins_encode %{
12695      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12696             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12697    %}
12698   ins_pipe(ialu_reg_reg_shift);
12699 %};
12700 
12701 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12702 %{
12703   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12704   ins_cost(1.9 * INSN_COST);
12705   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12706 
12707    ins_encode %{
12708      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12709             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12710    %}
12711   ins_pipe(ialu_reg_reg_shift);
12712 %};
12713 
12714 
12715 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12716 %{
12717   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12718   ins_cost(1.9 * INSN_COST);
12719   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12720 
12721    ins_encode %{
12722      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12723             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12724    %}
12725   ins_pipe(ialu_reg_reg_shift);
12726 %}
12727 
12728 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12729 %{
12730   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12731   ins_cost(1.9 * INSN_COST);
12732   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12733 
12734    ins_encode %{
12735      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12736             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12737    %}
12738   ins_pipe(ialu_reg_reg_shift);
12739 %}
12740 
12741 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12742 %{
12743   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12744   ins_cost(1.9 * INSN_COST);
12745   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12746 
12747    ins_encode %{
12748      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12749             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12750    %}
12751   ins_pipe(ialu_reg_reg_shift);
12752 %}
12753 
12754 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12755 %{
12756   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12757   ins_cost(1.9 * INSN_COST);
12758   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12759 
12760    ins_encode %{
12761      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12762             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12763    %}
12764   ins_pipe(ialu_reg_reg_shift);
12765 %}
12766 
12767 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12768 %{
12769   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12770   ins_cost(1.9 * INSN_COST);
12771   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12772 
12773    ins_encode %{
12774      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12775             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12776    %}
12777   ins_pipe(ialu_reg_reg_shift);
12778 %}
12779 
12780 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12781 %{
12782   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12783   ins_cost(1.9 * INSN_COST);
12784   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12785 
12786    ins_encode %{
12787      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12788             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12789    %}
12790   ins_pipe(ialu_reg_reg_shift);
12791 %}
12792 
12793 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12794 %{
12795   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12796   ins_cost(1.9 * INSN_COST);
12797   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12798 
12799    ins_encode %{
12800      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12801             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12802    %}
12803   ins_pipe(ialu_reg_reg_shift);
12804 %}
12805 
12806 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12807 %{
12808   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12809   ins_cost(1.9 * INSN_COST);
12810   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12811 
12812    ins_encode %{
12813      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12814             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12815    %}
12816   ins_pipe(ialu_reg_reg_shift);
12817 %}
12818 
12819 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12820 %{
12821   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12822   ins_cost(1.9 * INSN_COST);
12823   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12824 
12825    ins_encode %{
12826      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12827             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12828    %}
12829   ins_pipe(ialu_reg_reg_shift);
12830 %}
12831 
12832 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12833 %{
12834   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12835   ins_cost(1.9 * INSN_COST);
12836   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12837 
12838    ins_encode %{
12839      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12840             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12841    %}
12842   ins_pipe(ialu_reg_reg_shift);
12843 %}
12844 // END This section of the file is automatically generated. Do not edit --------------
12845 
12846 // ============================================================================
12847 // Floating Point Arithmetic Instructions
12848 
12849 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12850   match(Set dst (AddF src1 src2));
12851 
12852   ins_cost(INSN_COST * 5);
12853   format %{ "fadds   $dst, $src1, $src2" %}
12854 
12855   ins_encode %{
12856     __ fadds(as_FloatRegister($dst$$reg),
12857              as_FloatRegister($src1$$reg),
12858              as_FloatRegister($src2$$reg));
12859   %}
12860 
12861   ins_pipe(fp_dop_reg_reg_s);
12862 %}
12863 
12864 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12865   match(Set dst (AddD src1 src2));
12866 
12867   ins_cost(INSN_COST * 5);
12868   format %{ "faddd   $dst, $src1, $src2" %}
12869 
12870   ins_encode %{
12871     __ faddd(as_FloatRegister($dst$$reg),
12872              as_FloatRegister($src1$$reg),
12873              as_FloatRegister($src2$$reg));
12874   %}
12875 
12876   ins_pipe(fp_dop_reg_reg_d);
12877 %}
12878 
12879 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12880   match(Set dst (SubF src1 src2));
12881 
12882   ins_cost(INSN_COST * 5);
12883   format %{ "fsubs   $dst, $src1, $src2" %}
12884 
12885   ins_encode %{
12886     __ fsubs(as_FloatRegister($dst$$reg),
12887              as_FloatRegister($src1$$reg),
12888              as_FloatRegister($src2$$reg));
12889   %}
12890 
12891   ins_pipe(fp_dop_reg_reg_s);
12892 %}
12893 
12894 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12895   match(Set dst (SubD src1 src2));
12896 
12897   ins_cost(INSN_COST * 5);
12898   format %{ "fsubd   $dst, $src1, $src2" %}
12899 
12900   ins_encode %{
12901     __ fsubd(as_FloatRegister($dst$$reg),
12902              as_FloatRegister($src1$$reg),
12903              as_FloatRegister($src2$$reg));
12904   %}
12905 
12906   ins_pipe(fp_dop_reg_reg_d);
12907 %}
12908 
12909 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12910   match(Set dst (MulF src1 src2));
12911 
12912   ins_cost(INSN_COST * 6);
12913   format %{ "fmuls   $dst, $src1, $src2" %}
12914 
12915   ins_encode %{
12916     __ fmuls(as_FloatRegister($dst$$reg),
12917              as_FloatRegister($src1$$reg),
12918              as_FloatRegister($src2$$reg));
12919   %}
12920 
12921   ins_pipe(fp_dop_reg_reg_s);
12922 %}
12923 
12924 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12925   match(Set dst (MulD src1 src2));
12926 
12927   ins_cost(INSN_COST * 6);
12928   format %{ "fmuld   $dst, $src1, $src2" %}
12929 
12930   ins_encode %{
12931     __ fmuld(as_FloatRegister($dst$$reg),
12932              as_FloatRegister($src1$$reg),
12933              as_FloatRegister($src2$$reg));
12934   %}
12935 
12936   ins_pipe(fp_dop_reg_reg_d);
12937 %}
12938 
12939 // src1 * src2 + src3
12940 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12941   predicate(UseFMA);
12942   match(Set dst (FmaF src3 (Binary src1 src2)));
12943 
12944   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12945 
12946   ins_encode %{
12947     __ fmadds(as_FloatRegister($dst$$reg),
12948              as_FloatRegister($src1$$reg),
12949              as_FloatRegister($src2$$reg),
12950              as_FloatRegister($src3$$reg));
12951   %}
12952 
12953   ins_pipe(pipe_class_default);
12954 %}
12955 
12956 // src1 * src2 + src3
12957 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12958   predicate(UseFMA);
12959   match(Set dst (FmaD src3 (Binary src1 src2)));
12960 
12961   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12962 
12963   ins_encode %{
12964     __ fmaddd(as_FloatRegister($dst$$reg),
12965              as_FloatRegister($src1$$reg),
12966              as_FloatRegister($src2$$reg),
12967              as_FloatRegister($src3$$reg));
12968   %}
12969 
12970   ins_pipe(pipe_class_default);
12971 %}
12972 
12973 // -src1 * src2 + src3
12974 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12975   predicate(UseFMA);
12976   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12977   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12978 
12979   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12980 
12981   ins_encode %{
12982     __ fmsubs(as_FloatRegister($dst$$reg),
12983               as_FloatRegister($src1$$reg),
12984               as_FloatRegister($src2$$reg),
12985               as_FloatRegister($src3$$reg));
12986   %}
12987 
12988   ins_pipe(pipe_class_default);
12989 %}
12990 
12991 // -src1 * src2 + src3
12992 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12993   predicate(UseFMA);
12994   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12995   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12996 
12997   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12998 
12999   ins_encode %{
13000     __ fmsubd(as_FloatRegister($dst$$reg),
13001               as_FloatRegister($src1$$reg),
13002               as_FloatRegister($src2$$reg),
13003               as_FloatRegister($src3$$reg));
13004   %}
13005 
13006   ins_pipe(pipe_class_default);
13007 %}
13008 
13009 // -src1 * src2 - src3
13010 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13011   predicate(UseFMA);
13012   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13013   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13014 
13015   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13016 
13017   ins_encode %{
13018     __ fnmadds(as_FloatRegister($dst$$reg),
13019                as_FloatRegister($src1$$reg),
13020                as_FloatRegister($src2$$reg),
13021                as_FloatRegister($src3$$reg));
13022   %}
13023 
13024   ins_pipe(pipe_class_default);
13025 %}
13026 
13027 // -src1 * src2 - src3
13028 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13029   predicate(UseFMA);
13030   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13031   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13032 
13033   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13034 
13035   ins_encode %{
13036     __ fnmaddd(as_FloatRegister($dst$$reg),
13037                as_FloatRegister($src1$$reg),
13038                as_FloatRegister($src2$$reg),
13039                as_FloatRegister($src3$$reg));
13040   %}
13041 
13042   ins_pipe(pipe_class_default);
13043 %}
13044 
13045 // src1 * src2 - src3
13046 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13047   predicate(UseFMA);
13048   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13049 
13050   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13051 
13052   ins_encode %{
13053     __ fnmsubs(as_FloatRegister($dst$$reg),
13054                as_FloatRegister($src1$$reg),
13055                as_FloatRegister($src2$$reg),
13056                as_FloatRegister($src3$$reg));
13057   %}
13058 
13059   ins_pipe(pipe_class_default);
13060 %}
13061 
13062 // src1 * src2 - src3
13063 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13064   predicate(UseFMA);
13065   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13066 
13067   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13068 
13069   ins_encode %{
13070   // n.b. insn name should be fnmsubd
13071     __ fnmsub(as_FloatRegister($dst$$reg),
13072               as_FloatRegister($src1$$reg),
13073               as_FloatRegister($src2$$reg),
13074               as_FloatRegister($src3$$reg));
13075   %}
13076 
13077   ins_pipe(pipe_class_default);
13078 %}
13079 
13080 
13081 // Math.max(FF)F
13082 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13083   match(Set dst (MaxF src1 src2));
13084 
13085   format %{ "fmaxs   $dst, $src1, $src2" %}
13086   ins_encode %{
13087     __ fmaxs(as_FloatRegister($dst$$reg),
13088              as_FloatRegister($src1$$reg),
13089              as_FloatRegister($src2$$reg));
13090   %}
13091 
13092   ins_pipe(fp_dop_reg_reg_s);
13093 %}
13094 
13095 // Math.min(FF)F
13096 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13097   match(Set dst (MinF src1 src2));
13098 
13099   format %{ "fmins   $dst, $src1, $src2" %}
13100   ins_encode %{
13101     __ fmins(as_FloatRegister($dst$$reg),
13102              as_FloatRegister($src1$$reg),
13103              as_FloatRegister($src2$$reg));
13104   %}
13105 
13106   ins_pipe(fp_dop_reg_reg_s);
13107 %}
13108 
13109 // Math.max(DD)D
13110 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13111   match(Set dst (MaxD src1 src2));
13112 
13113   format %{ "fmaxd   $dst, $src1, $src2" %}
13114   ins_encode %{
13115     __ fmaxd(as_FloatRegister($dst$$reg),
13116              as_FloatRegister($src1$$reg),
13117              as_FloatRegister($src2$$reg));
13118   %}
13119 
13120   ins_pipe(fp_dop_reg_reg_d);
13121 %}
13122 
13123 // Math.min(DD)D
13124 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13125   match(Set dst (MinD src1 src2));
13126 
13127   format %{ "fmind   $dst, $src1, $src2" %}
13128   ins_encode %{
13129     __ fmind(as_FloatRegister($dst$$reg),
13130              as_FloatRegister($src1$$reg),
13131              as_FloatRegister($src2$$reg));
13132   %}
13133 
13134   ins_pipe(fp_dop_reg_reg_d);
13135 %}
13136 
13137 
13138 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13139   match(Set dst (DivF src1  src2));
13140 
13141   ins_cost(INSN_COST * 18);
13142   format %{ "fdivs   $dst, $src1, $src2" %}
13143 
13144   ins_encode %{
13145     __ fdivs(as_FloatRegister($dst$$reg),
13146              as_FloatRegister($src1$$reg),
13147              as_FloatRegister($src2$$reg));
13148   %}
13149 
13150   ins_pipe(fp_div_s);
13151 %}
13152 
13153 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13154   match(Set dst (DivD src1  src2));
13155 
13156   ins_cost(INSN_COST * 32);
13157   format %{ "fdivd   $dst, $src1, $src2" %}
13158 
13159   ins_encode %{
13160     __ fdivd(as_FloatRegister($dst$$reg),
13161              as_FloatRegister($src1$$reg),
13162              as_FloatRegister($src2$$reg));
13163   %}
13164 
13165   ins_pipe(fp_div_d);
13166 %}
13167 
13168 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13169   match(Set dst (NegF src));
13170 
13171   ins_cost(INSN_COST * 3);
13172   format %{ "fneg   $dst, $src" %}
13173 
13174   ins_encode %{
13175     __ fnegs(as_FloatRegister($dst$$reg),
13176              as_FloatRegister($src$$reg));
13177   %}
13178 
13179   ins_pipe(fp_uop_s);
13180 %}
13181 
13182 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13183   match(Set dst (NegD src));
13184 
13185   ins_cost(INSN_COST * 3);
13186   format %{ "fnegd   $dst, $src" %}
13187 
13188   ins_encode %{
13189     __ fnegd(as_FloatRegister($dst$$reg),
13190              as_FloatRegister($src$$reg));
13191   %}
13192 
13193   ins_pipe(fp_uop_d);
13194 %}
13195 
13196 instruct absF_reg(vRegF dst, vRegF src) %{
13197   match(Set dst (AbsF src));
13198 
13199   ins_cost(INSN_COST * 3);
13200   format %{ "fabss   $dst, $src" %}
13201   ins_encode %{
13202     __ fabss(as_FloatRegister($dst$$reg),
13203              as_FloatRegister($src$$reg));
13204   %}
13205 
13206   ins_pipe(fp_uop_s);
13207 %}
13208 
13209 instruct absD_reg(vRegD dst, vRegD src) %{
13210   match(Set dst (AbsD src));
13211 
13212   ins_cost(INSN_COST * 3);
13213   format %{ "fabsd   $dst, $src" %}
13214   ins_encode %{
13215     __ fabsd(as_FloatRegister($dst$$reg),
13216              as_FloatRegister($src$$reg));
13217   %}
13218 
13219   ins_pipe(fp_uop_d);
13220 %}
13221 
13222 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13223   match(Set dst (SqrtD src));
13224 
13225   ins_cost(INSN_COST * 50);
13226   format %{ "fsqrtd  $dst, $src" %}
13227   ins_encode %{
13228     __ fsqrtd(as_FloatRegister($dst$$reg),
13229              as_FloatRegister($src$$reg));
13230   %}
13231 
13232   ins_pipe(fp_div_s);
13233 %}
13234 
13235 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13236   match(Set dst (SqrtF src));
13237 
13238   ins_cost(INSN_COST * 50);
13239   format %{ "fsqrts  $dst, $src" %}
13240   ins_encode %{
13241     __ fsqrts(as_FloatRegister($dst$$reg),
13242              as_FloatRegister($src$$reg));
13243   %}
13244 
13245   ins_pipe(fp_div_d);
13246 %}
13247 
13248 // Math.rint, floor, ceil
13249 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
13250   match(Set dst (RoundDoubleMode src rmode));
13251   format %{ "frint  $dst, $src, $rmode" %}
13252   ins_encode %{
13253     switch ($rmode$$constant) {
13254       case RoundDoubleModeNode::rmode_rint:
13255         __ frintnd(as_FloatRegister($dst$$reg),
13256                    as_FloatRegister($src$$reg));
13257         break;
13258       case RoundDoubleModeNode::rmode_floor:
13259         __ frintmd(as_FloatRegister($dst$$reg),
13260                    as_FloatRegister($src$$reg));
13261         break;
13262       case RoundDoubleModeNode::rmode_ceil:
13263         __ frintpd(as_FloatRegister($dst$$reg),
13264                    as_FloatRegister($src$$reg));
13265         break;
13266     }
13267   %}
13268   ins_pipe(fp_uop_d);
13269 %}
13270 
13271 // ============================================================================
13272 // Logical Instructions
13273 
13274 // Integer Logical Instructions
13275 
13276 // And Instructions
13277 
13278 
13279 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13280   match(Set dst (AndI src1 src2));
13281 
13282   format %{ "andw  $dst, $src1, $src2\t# int" %}
13283 
13284   ins_cost(INSN_COST);
13285   ins_encode %{
13286     __ andw(as_Register($dst$$reg),
13287             as_Register($src1$$reg),
13288             as_Register($src2$$reg));
13289   %}
13290 
13291   ins_pipe(ialu_reg_reg);
13292 %}
13293 
13294 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13295   match(Set dst (AndI src1 src2));
13296 
13297   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13298 
13299   ins_cost(INSN_COST);
13300   ins_encode %{
13301     __ andw(as_Register($dst$$reg),
13302             as_Register($src1$$reg),
13303             (unsigned long)($src2$$constant));
13304   %}
13305 
13306   ins_pipe(ialu_reg_imm);
13307 %}
13308 
13309 // Or Instructions
13310 
13311 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13312   match(Set dst (OrI src1 src2));
13313 
13314   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13315 
13316   ins_cost(INSN_COST);
13317   ins_encode %{
13318     __ orrw(as_Register($dst$$reg),
13319             as_Register($src1$$reg),
13320             as_Register($src2$$reg));
13321   %}
13322 
13323   ins_pipe(ialu_reg_reg);
13324 %}
13325 
13326 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13327   match(Set dst (OrI src1 src2));
13328 
13329   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13330 
13331   ins_cost(INSN_COST);
13332   ins_encode %{
13333     __ orrw(as_Register($dst$$reg),
13334             as_Register($src1$$reg),
13335             (unsigned long)($src2$$constant));
13336   %}
13337 
13338   ins_pipe(ialu_reg_imm);
13339 %}
13340 
13341 // Xor Instructions
13342 
13343 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13344   match(Set dst (XorI src1 src2));
13345 
13346   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13347 
13348   ins_cost(INSN_COST);
13349   ins_encode %{
13350     __ eorw(as_Register($dst$$reg),
13351             as_Register($src1$$reg),
13352             as_Register($src2$$reg));
13353   %}
13354 
13355   ins_pipe(ialu_reg_reg);
13356 %}
13357 
13358 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13359   match(Set dst (XorI src1 src2));
13360 
13361   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13362 
13363   ins_cost(INSN_COST);
13364   ins_encode %{
13365     __ eorw(as_Register($dst$$reg),
13366             as_Register($src1$$reg),
13367             (unsigned long)($src2$$constant));
13368   %}
13369 
13370   ins_pipe(ialu_reg_imm);
13371 %}
13372 
13373 // Long Logical Instructions
13374 // TODO
13375 
13376 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13377   match(Set dst (AndL src1 src2));
13378 
13379   format %{ "and  $dst, $src1, $src2\t# int" %}
13380 
13381   ins_cost(INSN_COST);
13382   ins_encode %{
13383     __ andr(as_Register($dst$$reg),
13384             as_Register($src1$$reg),
13385             as_Register($src2$$reg));
13386   %}
13387 
13388   ins_pipe(ialu_reg_reg);
13389 %}
13390 
13391 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13392   match(Set dst (AndL src1 src2));
13393 
13394   format %{ "and  $dst, $src1, $src2\t# int" %}
13395 
13396   ins_cost(INSN_COST);
13397   ins_encode %{
13398     __ andr(as_Register($dst$$reg),
13399             as_Register($src1$$reg),
13400             (unsigned long)($src2$$constant));
13401   %}
13402 
13403   ins_pipe(ialu_reg_imm);
13404 %}
13405 
13406 // Or Instructions
13407 
13408 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13409   match(Set dst (OrL src1 src2));
13410 
13411   format %{ "orr  $dst, $src1, $src2\t# int" %}
13412 
13413   ins_cost(INSN_COST);
13414   ins_encode %{
13415     __ orr(as_Register($dst$$reg),
13416            as_Register($src1$$reg),
13417            as_Register($src2$$reg));
13418   %}
13419 
13420   ins_pipe(ialu_reg_reg);
13421 %}
13422 
13423 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13424   match(Set dst (OrL src1 src2));
13425 
13426   format %{ "orr  $dst, $src1, $src2\t# int" %}
13427 
13428   ins_cost(INSN_COST);
13429   ins_encode %{
13430     __ orr(as_Register($dst$$reg),
13431            as_Register($src1$$reg),
13432            (unsigned long)($src2$$constant));
13433   %}
13434 
13435   ins_pipe(ialu_reg_imm);
13436 %}
13437 
13438 // Xor Instructions
13439 
13440 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13441   match(Set dst (XorL src1 src2));
13442 
13443   format %{ "eor  $dst, $src1, $src2\t# int" %}
13444 
13445   ins_cost(INSN_COST);
13446   ins_encode %{
13447     __ eor(as_Register($dst$$reg),
13448            as_Register($src1$$reg),
13449            as_Register($src2$$reg));
13450   %}
13451 
13452   ins_pipe(ialu_reg_reg);
13453 %}
13454 
13455 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13456   match(Set dst (XorL src1 src2));
13457 
13458   ins_cost(INSN_COST);
13459   format %{ "eor  $dst, $src1, $src2\t# int" %}
13460 
13461   ins_encode %{
13462     __ eor(as_Register($dst$$reg),
13463            as_Register($src1$$reg),
13464            (unsigned long)($src2$$constant));
13465   %}
13466 
13467   ins_pipe(ialu_reg_imm);
13468 %}
13469 
13470 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13471 %{
13472   match(Set dst (ConvI2L src));
13473 
13474   ins_cost(INSN_COST);
13475   format %{ "sxtw  $dst, $src\t# i2l" %}
13476   ins_encode %{
13477     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13478   %}
13479   ins_pipe(ialu_reg_shift);
13480 %}
13481 
13482 // this pattern occurs in bigmath arithmetic
13483 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13484 %{
13485   match(Set dst (AndL (ConvI2L src) mask));
13486 
13487   ins_cost(INSN_COST);
13488   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13489   ins_encode %{
13490     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13491   %}
13492 
13493   ins_pipe(ialu_reg_shift);
13494 %}
13495 
13496 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13497   match(Set dst (ConvL2I src));
13498 
13499   ins_cost(INSN_COST);
13500   format %{ "movw  $dst, $src \t// l2i" %}
13501 
13502   ins_encode %{
13503     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13504   %}
13505 
13506   ins_pipe(ialu_reg);
13507 %}
13508 
13509 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13510 %{
13511   match(Set dst (Conv2B src));
13512   effect(KILL cr);
13513 
13514   format %{
13515     "cmpw $src, zr\n\t"
13516     "cset $dst, ne"
13517   %}
13518 
13519   ins_encode %{
13520     __ cmpw(as_Register($src$$reg), zr);
13521     __ cset(as_Register($dst$$reg), Assembler::NE);
13522   %}
13523 
13524   ins_pipe(ialu_reg);
13525 %}
13526 
13527 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13528 %{
13529   match(Set dst (Conv2B src));
13530   effect(KILL cr);
13531 
13532   format %{
13533     "cmp  $src, zr\n\t"
13534     "cset $dst, ne"
13535   %}
13536 
13537   ins_encode %{
13538     __ cmp(as_Register($src$$reg), zr);
13539     __ cset(as_Register($dst$$reg), Assembler::NE);
13540   %}
13541 
13542   ins_pipe(ialu_reg);
13543 %}
13544 
13545 instruct convD2F_reg(vRegF dst, vRegD src) %{
13546   match(Set dst (ConvD2F src));
13547 
13548   ins_cost(INSN_COST * 5);
13549   format %{ "fcvtd  $dst, $src \t// d2f" %}
13550 
13551   ins_encode %{
13552     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13553   %}
13554 
13555   ins_pipe(fp_d2f);
13556 %}
13557 
13558 instruct convF2D_reg(vRegD dst, vRegF src) %{
13559   match(Set dst (ConvF2D src));
13560 
13561   ins_cost(INSN_COST * 5);
13562   format %{ "fcvts  $dst, $src \t// f2d" %}
13563 
13564   ins_encode %{
13565     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13566   %}
13567 
13568   ins_pipe(fp_f2d);
13569 %}
13570 
13571 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13572   match(Set dst (ConvF2I src));
13573 
13574   ins_cost(INSN_COST * 5);
13575   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13576 
13577   ins_encode %{
13578     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13579   %}
13580 
13581   ins_pipe(fp_f2i);
13582 %}
13583 
13584 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13585   match(Set dst (ConvF2L src));
13586 
13587   ins_cost(INSN_COST * 5);
13588   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13589 
13590   ins_encode %{
13591     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13592   %}
13593 
13594   ins_pipe(fp_f2l);
13595 %}
13596 
13597 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13598   match(Set dst (ConvI2F src));
13599 
13600   ins_cost(INSN_COST * 5);
13601   format %{ "scvtfws  $dst, $src \t// i2f" %}
13602 
13603   ins_encode %{
13604     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13605   %}
13606 
13607   ins_pipe(fp_i2f);
13608 %}
13609 
13610 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13611   match(Set dst (ConvL2F src));
13612 
13613   ins_cost(INSN_COST * 5);
13614   format %{ "scvtfs  $dst, $src \t// l2f" %}
13615 
13616   ins_encode %{
13617     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13618   %}
13619 
13620   ins_pipe(fp_l2f);
13621 %}
13622 
13623 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13624   match(Set dst (ConvD2I src));
13625 
13626   ins_cost(INSN_COST * 5);
13627   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13628 
13629   ins_encode %{
13630     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13631   %}
13632 
13633   ins_pipe(fp_d2i);
13634 %}
13635 
13636 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13637   match(Set dst (ConvD2L src));
13638 
13639   ins_cost(INSN_COST * 5);
13640   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13641 
13642   ins_encode %{
13643     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13644   %}
13645 
13646   ins_pipe(fp_d2l);
13647 %}
13648 
13649 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13650   match(Set dst (ConvI2D src));
13651 
13652   ins_cost(INSN_COST * 5);
13653   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13654 
13655   ins_encode %{
13656     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13657   %}
13658 
13659   ins_pipe(fp_i2d);
13660 %}
13661 
13662 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13663   match(Set dst (ConvL2D src));
13664 
13665   ins_cost(INSN_COST * 5);
13666   format %{ "scvtfd  $dst, $src \t// l2d" %}
13667 
13668   ins_encode %{
13669     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13670   %}
13671 
13672   ins_pipe(fp_l2d);
13673 %}
13674 
13675 // stack <-> reg and reg <-> reg shuffles with no conversion
13676 
13677 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13678 
13679   match(Set dst (MoveF2I src));
13680 
13681   effect(DEF dst, USE src);
13682 
13683   ins_cost(4 * INSN_COST);
13684 
13685   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13686 
13687   ins_encode %{
13688     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13689   %}
13690 
13691   ins_pipe(iload_reg_reg);
13692 
13693 %}
13694 
13695 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13696 
13697   match(Set dst (MoveI2F src));
13698 
13699   effect(DEF dst, USE src);
13700 
13701   ins_cost(4 * INSN_COST);
13702 
13703   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13704 
13705   ins_encode %{
13706     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13707   %}
13708 
13709   ins_pipe(pipe_class_memory);
13710 
13711 %}
13712 
13713 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13714 
13715   match(Set dst (MoveD2L src));
13716 
13717   effect(DEF dst, USE src);
13718 
13719   ins_cost(4 * INSN_COST);
13720 
13721   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13722 
13723   ins_encode %{
13724     __ ldr($dst$$Register, Address(sp, $src$$disp));
13725   %}
13726 
13727   ins_pipe(iload_reg_reg);
13728 
13729 %}
13730 
13731 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13732 
13733   match(Set dst (MoveL2D src));
13734 
13735   effect(DEF dst, USE src);
13736 
13737   ins_cost(4 * INSN_COST);
13738 
13739   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13740 
13741   ins_encode %{
13742     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13743   %}
13744 
13745   ins_pipe(pipe_class_memory);
13746 
13747 %}
13748 
13749 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13750 
13751   match(Set dst (MoveF2I src));
13752 
13753   effect(DEF dst, USE src);
13754 
13755   ins_cost(INSN_COST);
13756 
13757   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13758 
13759   ins_encode %{
13760     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13761   %}
13762 
13763   ins_pipe(pipe_class_memory);
13764 
13765 %}
13766 
13767 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13768 
13769   match(Set dst (MoveI2F src));
13770 
13771   effect(DEF dst, USE src);
13772 
13773   ins_cost(INSN_COST);
13774 
13775   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13776 
13777   ins_encode %{
13778     __ strw($src$$Register, Address(sp, $dst$$disp));
13779   %}
13780 
13781   ins_pipe(istore_reg_reg);
13782 
13783 %}
13784 
13785 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13786 
13787   match(Set dst (MoveD2L src));
13788 
13789   effect(DEF dst, USE src);
13790 
13791   ins_cost(INSN_COST);
13792 
13793   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13794 
13795   ins_encode %{
13796     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13797   %}
13798 
13799   ins_pipe(pipe_class_memory);
13800 
13801 %}
13802 
13803 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13804 
13805   match(Set dst (MoveL2D src));
13806 
13807   effect(DEF dst, USE src);
13808 
13809   ins_cost(INSN_COST);
13810 
13811   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13812 
13813   ins_encode %{
13814     __ str($src$$Register, Address(sp, $dst$$disp));
13815   %}
13816 
13817   ins_pipe(istore_reg_reg);
13818 
13819 %}
13820 
13821 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13822 
13823   match(Set dst (MoveF2I src));
13824 
13825   effect(DEF dst, USE src);
13826 
13827   ins_cost(INSN_COST);
13828 
13829   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13830 
13831   ins_encode %{
13832     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13833   %}
13834 
13835   ins_pipe(fp_f2i);
13836 
13837 %}
13838 
13839 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13840 
13841   match(Set dst (MoveI2F src));
13842 
13843   effect(DEF dst, USE src);
13844 
13845   ins_cost(INSN_COST);
13846 
13847   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13848 
13849   ins_encode %{
13850     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13851   %}
13852 
13853   ins_pipe(fp_i2f);
13854 
13855 %}
13856 
13857 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13858 
13859   match(Set dst (MoveD2L src));
13860 
13861   effect(DEF dst, USE src);
13862 
13863   ins_cost(INSN_COST);
13864 
13865   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13866 
13867   ins_encode %{
13868     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13869   %}
13870 
13871   ins_pipe(fp_d2l);
13872 
13873 %}
13874 
13875 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13876 
13877   match(Set dst (MoveL2D src));
13878 
13879   effect(DEF dst, USE src);
13880 
13881   ins_cost(INSN_COST);
13882 
13883   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13884 
13885   ins_encode %{
13886     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13887   %}
13888 
13889   ins_pipe(fp_l2d);
13890 
13891 %}
13892 
13893 // ============================================================================
13894 // clearing of an array
13895 
13896 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13897 %{
13898   match(Set dummy (ClearArray cnt base));
13899   effect(USE_KILL cnt, USE_KILL base);
13900 
13901   ins_cost(4 * INSN_COST);
13902   format %{ "ClearArray $cnt, $base" %}
13903 
13904   ins_encode %{
13905     __ zero_words($base$$Register, $cnt$$Register);
13906   %}
13907 
13908   ins_pipe(pipe_class_memory);
13909 %}
13910 
13911 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13912 %{
13913   predicate((u_int64_t)n->in(2)->get_long()
13914             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13915   match(Set dummy (ClearArray cnt base));
13916   effect(USE_KILL base);
13917 
13918   ins_cost(4 * INSN_COST);
13919   format %{ "ClearArray $cnt, $base" %}
13920 
13921   ins_encode %{
13922     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13923   %}
13924 
13925   ins_pipe(pipe_class_memory);
13926 %}
13927 
13928 // ============================================================================
13929 // Overflow Math Instructions
13930 
13931 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13932 %{
13933   match(Set cr (OverflowAddI op1 op2));
13934 
13935   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13936   ins_cost(INSN_COST);
13937   ins_encode %{
13938     __ cmnw($op1$$Register, $op2$$Register);
13939   %}
13940 
13941   ins_pipe(icmp_reg_reg);
13942 %}
13943 
13944 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13945 %{
13946   match(Set cr (OverflowAddI op1 op2));
13947 
13948   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13949   ins_cost(INSN_COST);
13950   ins_encode %{
13951     __ cmnw($op1$$Register, $op2$$constant);
13952   %}
13953 
13954   ins_pipe(icmp_reg_imm);
13955 %}
13956 
13957 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13958 %{
13959   match(Set cr (OverflowAddL op1 op2));
13960 
13961   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13962   ins_cost(INSN_COST);
13963   ins_encode %{
13964     __ cmn($op1$$Register, $op2$$Register);
13965   %}
13966 
13967   ins_pipe(icmp_reg_reg);
13968 %}
13969 
13970 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13971 %{
13972   match(Set cr (OverflowAddL op1 op2));
13973 
13974   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13975   ins_cost(INSN_COST);
13976   ins_encode %{
13977     __ cmn($op1$$Register, $op2$$constant);
13978   %}
13979 
13980   ins_pipe(icmp_reg_imm);
13981 %}
13982 
13983 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13984 %{
13985   match(Set cr (OverflowSubI op1 op2));
13986 
13987   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13988   ins_cost(INSN_COST);
13989   ins_encode %{
13990     __ cmpw($op1$$Register, $op2$$Register);
13991   %}
13992 
13993   ins_pipe(icmp_reg_reg);
13994 %}
13995 
13996 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13997 %{
13998   match(Set cr (OverflowSubI op1 op2));
13999 
14000   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14001   ins_cost(INSN_COST);
14002   ins_encode %{
14003     __ cmpw($op1$$Register, $op2$$constant);
14004   %}
14005 
14006   ins_pipe(icmp_reg_imm);
14007 %}
14008 
14009 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14010 %{
14011   match(Set cr (OverflowSubL op1 op2));
14012 
14013   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14014   ins_cost(INSN_COST);
14015   ins_encode %{
14016     __ cmp($op1$$Register, $op2$$Register);
14017   %}
14018 
14019   ins_pipe(icmp_reg_reg);
14020 %}
14021 
14022 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14023 %{
14024   match(Set cr (OverflowSubL op1 op2));
14025 
14026   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14027   ins_cost(INSN_COST);
14028   ins_encode %{
14029     __ subs(zr, $op1$$Register, $op2$$constant);
14030   %}
14031 
14032   ins_pipe(icmp_reg_imm);
14033 %}
14034 
14035 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14036 %{
14037   match(Set cr (OverflowSubI zero op1));
14038 
14039   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14040   ins_cost(INSN_COST);
14041   ins_encode %{
14042     __ cmpw(zr, $op1$$Register);
14043   %}
14044 
14045   ins_pipe(icmp_reg_imm);
14046 %}
14047 
14048 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14049 %{
14050   match(Set cr (OverflowSubL zero op1));
14051 
14052   format %{ "cmp   zr, $op1\t# overflow check long" %}
14053   ins_cost(INSN_COST);
14054   ins_encode %{
14055     __ cmp(zr, $op1$$Register);
14056   %}
14057 
14058   ins_pipe(icmp_reg_imm);
14059 %}
14060 
14061 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14062 %{
14063   match(Set cr (OverflowMulI op1 op2));
14064 
14065   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14066             "cmp   rscratch1, rscratch1, sxtw\n\t"
14067             "movw  rscratch1, #0x80000000\n\t"
14068             "cselw rscratch1, rscratch1, zr, NE\n\t"
14069             "cmpw  rscratch1, #1" %}
14070   ins_cost(5 * INSN_COST);
14071   ins_encode %{
14072     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14073     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14074     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14075     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14076     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14077   %}
14078 
14079   ins_pipe(pipe_slow);
14080 %}
14081 
14082 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14083 %{
14084   match(If cmp (OverflowMulI op1 op2));
14085   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14086             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14087   effect(USE labl, KILL cr);
14088 
14089   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14090             "cmp   rscratch1, rscratch1, sxtw\n\t"
14091             "b$cmp   $labl" %}
14092   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14093   ins_encode %{
14094     Label* L = $labl$$label;
14095     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14096     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14097     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14098     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14099   %}
14100 
14101   ins_pipe(pipe_serial);
14102 %}
14103 
14104 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14105 %{
14106   match(Set cr (OverflowMulL op1 op2));
14107 
14108   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14109             "smulh rscratch2, $op1, $op2\n\t"
14110             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14111             "movw  rscratch1, #0x80000000\n\t"
14112             "cselw rscratch1, rscratch1, zr, NE\n\t"
14113             "cmpw  rscratch1, #1" %}
14114   ins_cost(6 * INSN_COST);
14115   ins_encode %{
14116     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14117     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14118     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14119     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14120     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14121     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14122   %}
14123 
14124   ins_pipe(pipe_slow);
14125 %}
14126 
14127 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14128 %{
14129   match(If cmp (OverflowMulL op1 op2));
14130   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14131             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14132   effect(USE labl, KILL cr);
14133 
14134   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14135             "smulh rscratch2, $op1, $op2\n\t"
14136             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14137             "b$cmp $labl" %}
14138   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14139   ins_encode %{
14140     Label* L = $labl$$label;
14141     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14142     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14143     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14144     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14145     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14146   %}
14147 
14148   ins_pipe(pipe_serial);
14149 %}
14150 
14151 // ============================================================================
14152 // Compare Instructions
14153 
14154 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14155 %{
14156   match(Set cr (CmpI op1 op2));
14157 
14158   effect(DEF cr, USE op1, USE op2);
14159 
14160   ins_cost(INSN_COST);
14161   format %{ "cmpw  $op1, $op2" %}
14162 
14163   ins_encode(aarch64_enc_cmpw(op1, op2));
14164 
14165   ins_pipe(icmp_reg_reg);
14166 %}
14167 
14168 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14169 %{
14170   match(Set cr (CmpI op1 zero));
14171 
14172   effect(DEF cr, USE op1);
14173 
14174   ins_cost(INSN_COST);
14175   format %{ "cmpw $op1, 0" %}
14176 
14177   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14178 
14179   ins_pipe(icmp_reg_imm);
14180 %}
14181 
14182 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14183 %{
14184   match(Set cr (CmpI op1 op2));
14185 
14186   effect(DEF cr, USE op1);
14187 
14188   ins_cost(INSN_COST);
14189   format %{ "cmpw  $op1, $op2" %}
14190 
14191   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14192 
14193   ins_pipe(icmp_reg_imm);
14194 %}
14195 
14196 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14197 %{
14198   match(Set cr (CmpI op1 op2));
14199 
14200   effect(DEF cr, USE op1);
14201 
14202   ins_cost(INSN_COST * 2);
14203   format %{ "cmpw  $op1, $op2" %}
14204 
14205   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14206 
14207   ins_pipe(icmp_reg_imm);
14208 %}
14209 
14210 // Unsigned compare Instructions; really, same as signed compare
14211 // except it should only be used to feed an If or a CMovI which takes a
14212 // cmpOpU.
14213 
14214 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14215 %{
14216   match(Set cr (CmpU op1 op2));
14217 
14218   effect(DEF cr, USE op1, USE op2);
14219 
14220   ins_cost(INSN_COST);
14221   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14222 
14223   ins_encode(aarch64_enc_cmpw(op1, op2));
14224 
14225   ins_pipe(icmp_reg_reg);
14226 %}
14227 
14228 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14229 %{
14230   match(Set cr (CmpU op1 zero));
14231 
14232   effect(DEF cr, USE op1);
14233 
14234   ins_cost(INSN_COST);
14235   format %{ "cmpw $op1, #0\t# unsigned" %}
14236 
14237   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14238 
14239   ins_pipe(icmp_reg_imm);
14240 %}
14241 
14242 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14243 %{
14244   match(Set cr (CmpU op1 op2));
14245 
14246   effect(DEF cr, USE op1);
14247 
14248   ins_cost(INSN_COST);
14249   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14250 
14251   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14252 
14253   ins_pipe(icmp_reg_imm);
14254 %}
14255 
14256 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14257 %{
14258   match(Set cr (CmpU op1 op2));
14259 
14260   effect(DEF cr, USE op1);
14261 
14262   ins_cost(INSN_COST * 2);
14263   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14264 
14265   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14266 
14267   ins_pipe(icmp_reg_imm);
14268 %}
14269 
14270 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14271 %{
14272   match(Set cr (CmpL op1 op2));
14273 
14274   effect(DEF cr, USE op1, USE op2);
14275 
14276   ins_cost(INSN_COST);
14277   format %{ "cmp  $op1, $op2" %}
14278 
14279   ins_encode(aarch64_enc_cmp(op1, op2));
14280 
14281   ins_pipe(icmp_reg_reg);
14282 %}
14283 
14284 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14285 %{
14286   match(Set cr (CmpL op1 zero));
14287 
14288   effect(DEF cr, USE op1);
14289 
14290   ins_cost(INSN_COST);
14291   format %{ "tst  $op1" %}
14292 
14293   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14294 
14295   ins_pipe(icmp_reg_imm);
14296 %}
14297 
14298 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14299 %{
14300   match(Set cr (CmpL op1 op2));
14301 
14302   effect(DEF cr, USE op1);
14303 
14304   ins_cost(INSN_COST);
14305   format %{ "cmp  $op1, $op2" %}
14306 
14307   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14308 
14309   ins_pipe(icmp_reg_imm);
14310 %}
14311 
14312 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14313 %{
14314   match(Set cr (CmpL op1 op2));
14315 
14316   effect(DEF cr, USE op1);
14317 
14318   ins_cost(INSN_COST * 2);
14319   format %{ "cmp  $op1, $op2" %}
14320 
14321   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14322 
14323   ins_pipe(icmp_reg_imm);
14324 %}
14325 
14326 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14327 %{
14328   match(Set cr (CmpUL op1 op2));
14329 
14330   effect(DEF cr, USE op1, USE op2);
14331 
14332   ins_cost(INSN_COST);
14333   format %{ "cmp  $op1, $op2" %}
14334 
14335   ins_encode(aarch64_enc_cmp(op1, op2));
14336 
14337   ins_pipe(icmp_reg_reg);
14338 %}
14339 
14340 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14341 %{
14342   match(Set cr (CmpUL op1 zero));
14343 
14344   effect(DEF cr, USE op1);
14345 
14346   ins_cost(INSN_COST);
14347   format %{ "tst  $op1" %}
14348 
14349   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14350 
14351   ins_pipe(icmp_reg_imm);
14352 %}
14353 
14354 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14355 %{
14356   match(Set cr (CmpUL op1 op2));
14357 
14358   effect(DEF cr, USE op1);
14359 
14360   ins_cost(INSN_COST);
14361   format %{ "cmp  $op1, $op2" %}
14362 
14363   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14364 
14365   ins_pipe(icmp_reg_imm);
14366 %}
14367 
14368 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14369 %{
14370   match(Set cr (CmpUL op1 op2));
14371 
14372   effect(DEF cr, USE op1);
14373 
14374   ins_cost(INSN_COST * 2);
14375   format %{ "cmp  $op1, $op2" %}
14376 
14377   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14378 
14379   ins_pipe(icmp_reg_imm);
14380 %}
14381 
14382 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14383 %{
14384   match(Set cr (CmpP op1 op2));
14385 
14386   effect(DEF cr, USE op1, USE op2);
14387 
14388   ins_cost(INSN_COST);
14389   format %{ "cmp  $op1, $op2\t // ptr" %}
14390 
14391   ins_encode(aarch64_enc_cmpp(op1, op2));
14392 
14393   ins_pipe(icmp_reg_reg);
14394 %}
14395 
14396 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14397 %{
14398   match(Set cr (CmpN op1 op2));
14399 
14400   effect(DEF cr, USE op1, USE op2);
14401 
14402   ins_cost(INSN_COST);
14403   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14404 
14405   ins_encode(aarch64_enc_cmpn(op1, op2));
14406 
14407   ins_pipe(icmp_reg_reg);
14408 %}
14409 
14410 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14411 %{
14412   match(Set cr (CmpP op1 zero));
14413 
14414   effect(DEF cr, USE op1, USE zero);
14415 
14416   ins_cost(INSN_COST);
14417   format %{ "cmp  $op1, 0\t // ptr" %}
14418 
14419   ins_encode(aarch64_enc_testp(op1));
14420 
14421   ins_pipe(icmp_reg_imm);
14422 %}
14423 
14424 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14425 %{
14426   match(Set cr (CmpN op1 zero));
14427 
14428   effect(DEF cr, USE op1, USE zero);
14429 
14430   ins_cost(INSN_COST);
14431   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14432 
14433   ins_encode(aarch64_enc_testn(op1));
14434 
14435   ins_pipe(icmp_reg_imm);
14436 %}
14437 
14438 // FP comparisons
14439 //
14440 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14441 // using normal cmpOp. See declaration of rFlagsReg for details.
14442 
14443 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14444 %{
14445   match(Set cr (CmpF src1 src2));
14446 
14447   ins_cost(3 * INSN_COST);
14448   format %{ "fcmps $src1, $src2" %}
14449 
14450   ins_encode %{
14451     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14452   %}
14453 
14454   ins_pipe(pipe_class_compare);
14455 %}
14456 
14457 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14458 %{
14459   match(Set cr (CmpF src1 src2));
14460 
14461   ins_cost(3 * INSN_COST);
14462   format %{ "fcmps $src1, 0.0" %}
14463 
14464   ins_encode %{
14465     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
14466   %}
14467 
14468   ins_pipe(pipe_class_compare);
14469 %}
14470 // FROM HERE
14471 
14472 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14473 %{
14474   match(Set cr (CmpD src1 src2));
14475 
14476   ins_cost(3 * INSN_COST);
14477   format %{ "fcmpd $src1, $src2" %}
14478 
14479   ins_encode %{
14480     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14481   %}
14482 
14483   ins_pipe(pipe_class_compare);
14484 %}
14485 
14486 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14487 %{
14488   match(Set cr (CmpD src1 src2));
14489 
14490   ins_cost(3 * INSN_COST);
14491   format %{ "fcmpd $src1, 0.0" %}
14492 
14493   ins_encode %{
14494     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
14495   %}
14496 
14497   ins_pipe(pipe_class_compare);
14498 %}
14499 
14500 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14501 %{
14502   match(Set dst (CmpF3 src1 src2));
14503   effect(KILL cr);
14504 
14505   ins_cost(5 * INSN_COST);
14506   format %{ "fcmps $src1, $src2\n\t"
14507             "csinvw($dst, zr, zr, eq\n\t"
14508             "csnegw($dst, $dst, $dst, lt)"
14509   %}
14510 
14511   ins_encode %{
14512     Label done;
14513     FloatRegister s1 = as_FloatRegister($src1$$reg);
14514     FloatRegister s2 = as_FloatRegister($src2$$reg);
14515     Register d = as_Register($dst$$reg);
14516     __ fcmps(s1, s2);
14517     // installs 0 if EQ else -1
14518     __ csinvw(d, zr, zr, Assembler::EQ);
14519     // keeps -1 if less or unordered else installs 1
14520     __ csnegw(d, d, d, Assembler::LT);
14521     __ bind(done);
14522   %}
14523 
14524   ins_pipe(pipe_class_default);
14525 
14526 %}
14527 
14528 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14529 %{
14530   match(Set dst (CmpD3 src1 src2));
14531   effect(KILL cr);
14532 
14533   ins_cost(5 * INSN_COST);
14534   format %{ "fcmpd $src1, $src2\n\t"
14535             "csinvw($dst, zr, zr, eq\n\t"
14536             "csnegw($dst, $dst, $dst, lt)"
14537   %}
14538 
14539   ins_encode %{
14540     Label done;
14541     FloatRegister s1 = as_FloatRegister($src1$$reg);
14542     FloatRegister s2 = as_FloatRegister($src2$$reg);
14543     Register d = as_Register($dst$$reg);
14544     __ fcmpd(s1, s2);
14545     // installs 0 if EQ else -1
14546     __ csinvw(d, zr, zr, Assembler::EQ);
14547     // keeps -1 if less or unordered else installs 1
14548     __ csnegw(d, d, d, Assembler::LT);
14549     __ bind(done);
14550   %}
14551   ins_pipe(pipe_class_default);
14552 
14553 %}
14554 
14555 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14556 %{
14557   match(Set dst (CmpF3 src1 zero));
14558   effect(KILL cr);
14559 
14560   ins_cost(5 * INSN_COST);
14561   format %{ "fcmps $src1, 0.0\n\t"
14562             "csinvw($dst, zr, zr, eq\n\t"
14563             "csnegw($dst, $dst, $dst, lt)"
14564   %}
14565 
14566   ins_encode %{
14567     Label done;
14568     FloatRegister s1 = as_FloatRegister($src1$$reg);
14569     Register d = as_Register($dst$$reg);
14570     __ fcmps(s1, 0.0);
14571     // installs 0 if EQ else -1
14572     __ csinvw(d, zr, zr, Assembler::EQ);
14573     // keeps -1 if less or unordered else installs 1
14574     __ csnegw(d, d, d, Assembler::LT);
14575     __ bind(done);
14576   %}
14577 
14578   ins_pipe(pipe_class_default);
14579 
14580 %}
14581 
14582 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14583 %{
14584   match(Set dst (CmpD3 src1 zero));
14585   effect(KILL cr);
14586 
14587   ins_cost(5 * INSN_COST);
14588   format %{ "fcmpd $src1, 0.0\n\t"
14589             "csinvw($dst, zr, zr, eq\n\t"
14590             "csnegw($dst, $dst, $dst, lt)"
14591   %}
14592 
14593   ins_encode %{
14594     Label done;
14595     FloatRegister s1 = as_FloatRegister($src1$$reg);
14596     Register d = as_Register($dst$$reg);
14597     __ fcmpd(s1, 0.0);
14598     // installs 0 if EQ else -1
14599     __ csinvw(d, zr, zr, Assembler::EQ);
14600     // keeps -1 if less or unordered else installs 1
14601     __ csnegw(d, d, d, Assembler::LT);
14602     __ bind(done);
14603   %}
14604   ins_pipe(pipe_class_default);
14605 
14606 %}
14607 
14608 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14609 %{
14610   match(Set dst (CmpLTMask p q));
14611   effect(KILL cr);
14612 
14613   ins_cost(3 * INSN_COST);
14614 
14615   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14616             "csetw $dst, lt\n\t"
14617             "subw $dst, zr, $dst"
14618   %}
14619 
14620   ins_encode %{
14621     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14622     __ csetw(as_Register($dst$$reg), Assembler::LT);
14623     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14624   %}
14625 
14626   ins_pipe(ialu_reg_reg);
14627 %}
14628 
14629 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14630 %{
14631   match(Set dst (CmpLTMask src zero));
14632   effect(KILL cr);
14633 
14634   ins_cost(INSN_COST);
14635 
14636   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14637 
14638   ins_encode %{
14639     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14640   %}
14641 
14642   ins_pipe(ialu_reg_shift);
14643 %}
14644 
14645 // ============================================================================
14646 // Max and Min
14647 
14648 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14649 %{
14650   effect( DEF dst, USE src1, USE src2, USE cr );
14651 
14652   ins_cost(INSN_COST * 2);
14653   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
14654 
14655   ins_encode %{
14656     __ cselw(as_Register($dst$$reg),
14657              as_Register($src1$$reg),
14658              as_Register($src2$$reg),
14659              Assembler::LT);
14660   %}
14661 
14662   ins_pipe(icond_reg_reg);
14663 %}
14664 
14665 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14666 %{
14667   match(Set dst (MinI src1 src2));
14668   ins_cost(INSN_COST * 3);
14669 
14670   expand %{
14671     rFlagsReg cr;
14672     compI_reg_reg(cr, src1, src2);
14673     cmovI_reg_reg_lt(dst, src1, src2, cr);
14674   %}
14675 
14676 %}
14677 // FROM HERE
14678 
14679 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14680 %{
14681   effect( DEF dst, USE src1, USE src2, USE cr );
14682 
14683   ins_cost(INSN_COST * 2);
14684   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
14685 
14686   ins_encode %{
14687     __ cselw(as_Register($dst$$reg),
14688              as_Register($src1$$reg),
14689              as_Register($src2$$reg),
14690              Assembler::GT);
14691   %}
14692 
14693   ins_pipe(icond_reg_reg);
14694 %}
14695 
14696 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14697 %{
14698   match(Set dst (MaxI src1 src2));
14699   ins_cost(INSN_COST * 3);
14700   expand %{
14701     rFlagsReg cr;
14702     compI_reg_reg(cr, src1, src2);
14703     cmovI_reg_reg_gt(dst, src1, src2, cr);
14704   %}
14705 %}
14706 
14707 // ============================================================================
14708 // Branch Instructions
14709 
14710 // Direct Branch.
14711 instruct branch(label lbl)
14712 %{
14713   match(Goto);
14714 
14715   effect(USE lbl);
14716 
14717   ins_cost(BRANCH_COST);
14718   format %{ "b  $lbl" %}
14719 
14720   ins_encode(aarch64_enc_b(lbl));
14721 
14722   ins_pipe(pipe_branch);
14723 %}
14724 
14725 // Conditional Near Branch
14726 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14727 %{
14728   // Same match rule as `branchConFar'.
14729   match(If cmp cr);
14730 
14731   effect(USE lbl);
14732 
14733   ins_cost(BRANCH_COST);
14734   // If set to 1 this indicates that the current instruction is a
14735   // short variant of a long branch. This avoids using this
14736   // instruction in first-pass matching. It will then only be used in
14737   // the `Shorten_branches' pass.
14738   // ins_short_branch(1);
14739   format %{ "b$cmp  $lbl" %}
14740 
14741   ins_encode(aarch64_enc_br_con(cmp, lbl));
14742 
14743   ins_pipe(pipe_branch_cond);
14744 %}
14745 
14746 // Conditional Near Branch Unsigned
14747 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14748 %{
14749   // Same match rule as `branchConFar'.
14750   match(If cmp cr);
14751 
14752   effect(USE lbl);
14753 
14754   ins_cost(BRANCH_COST);
14755   // If set to 1 this indicates that the current instruction is a
14756   // short variant of a long branch. This avoids using this
14757   // instruction in first-pass matching. It will then only be used in
14758   // the `Shorten_branches' pass.
14759   // ins_short_branch(1);
14760   format %{ "b$cmp  $lbl\t# unsigned" %}
14761 
14762   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14763 
14764   ins_pipe(pipe_branch_cond);
14765 %}
14766 
14767 // Make use of CBZ and CBNZ.  These instructions, as well as being
14768 // shorter than (cmp; branch), have the additional benefit of not
14769 // killing the flags.
14770 
14771 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14772   match(If cmp (CmpI op1 op2));
14773   effect(USE labl);
14774 
14775   ins_cost(BRANCH_COST);
14776   format %{ "cbw$cmp   $op1, $labl" %}
14777   ins_encode %{
14778     Label* L = $labl$$label;
14779     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14780     if (cond == Assembler::EQ)
14781       __ cbzw($op1$$Register, *L);
14782     else
14783       __ cbnzw($op1$$Register, *L);
14784   %}
14785   ins_pipe(pipe_cmp_branch);
14786 %}
14787 
14788 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14789   match(If cmp (CmpL op1 op2));
14790   effect(USE labl);
14791 
14792   ins_cost(BRANCH_COST);
14793   format %{ "cb$cmp   $op1, $labl" %}
14794   ins_encode %{
14795     Label* L = $labl$$label;
14796     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14797     if (cond == Assembler::EQ)
14798       __ cbz($op1$$Register, *L);
14799     else
14800       __ cbnz($op1$$Register, *L);
14801   %}
14802   ins_pipe(pipe_cmp_branch);
14803 %}
14804 
14805 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14806   match(If cmp (CmpP op1 op2));
14807   effect(USE labl);
14808 
14809   ins_cost(BRANCH_COST);
14810   format %{ "cb$cmp   $op1, $labl" %}
14811   ins_encode %{
14812     Label* L = $labl$$label;
14813     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14814     if (cond == Assembler::EQ)
14815       __ cbz($op1$$Register, *L);
14816     else
14817       __ cbnz($op1$$Register, *L);
14818   %}
14819   ins_pipe(pipe_cmp_branch);
14820 %}
14821 
14822 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14823   match(If cmp (CmpN op1 op2));
14824   effect(USE labl);
14825 
14826   ins_cost(BRANCH_COST);
14827   format %{ "cbw$cmp   $op1, $labl" %}
14828   ins_encode %{
14829     Label* L = $labl$$label;
14830     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14831     if (cond == Assembler::EQ)
14832       __ cbzw($op1$$Register, *L);
14833     else
14834       __ cbnzw($op1$$Register, *L);
14835   %}
14836   ins_pipe(pipe_cmp_branch);
14837 %}
14838 
14839 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14840   match(If cmp (CmpP (DecodeN oop) zero));
14841   effect(USE labl);
14842 
14843   ins_cost(BRANCH_COST);
14844   format %{ "cb$cmp   $oop, $labl" %}
14845   ins_encode %{
14846     Label* L = $labl$$label;
14847     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14848     if (cond == Assembler::EQ)
14849       __ cbzw($oop$$Register, *L);
14850     else
14851       __ cbnzw($oop$$Register, *L);
14852   %}
14853   ins_pipe(pipe_cmp_branch);
14854 %}
14855 
14856 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14857   match(If cmp (CmpU op1 op2));
14858   effect(USE labl);
14859 
14860   ins_cost(BRANCH_COST);
14861   format %{ "cbw$cmp   $op1, $labl" %}
14862   ins_encode %{
14863     Label* L = $labl$$label;
14864     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14865     if (cond == Assembler::EQ || cond == Assembler::LS)
14866       __ cbzw($op1$$Register, *L);
14867     else
14868       __ cbnzw($op1$$Register, *L);
14869   %}
14870   ins_pipe(pipe_cmp_branch);
14871 %}
14872 
14873 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14874   match(If cmp (CmpUL op1 op2));
14875   effect(USE labl);
14876 
14877   ins_cost(BRANCH_COST);
14878   format %{ "cb$cmp   $op1, $labl" %}
14879   ins_encode %{
14880     Label* L = $labl$$label;
14881     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14882     if (cond == Assembler::EQ || cond == Assembler::LS)
14883       __ cbz($op1$$Register, *L);
14884     else
14885       __ cbnz($op1$$Register, *L);
14886   %}
14887   ins_pipe(pipe_cmp_branch);
14888 %}
14889 
14890 // Test bit and Branch
14891 
14892 // Patterns for short (< 32KiB) variants
14893 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14894   match(If cmp (CmpL op1 op2));
14895   effect(USE labl);
14896 
14897   ins_cost(BRANCH_COST);
14898   format %{ "cb$cmp   $op1, $labl # long" %}
14899   ins_encode %{
14900     Label* L = $labl$$label;
14901     Assembler::Condition cond =
14902       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14903     __ tbr(cond, $op1$$Register, 63, *L);
14904   %}
14905   ins_pipe(pipe_cmp_branch);
14906   ins_short_branch(1);
14907 %}
14908 
14909 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14910   match(If cmp (CmpI op1 op2));
14911   effect(USE labl);
14912 
14913   ins_cost(BRANCH_COST);
14914   format %{ "cb$cmp   $op1, $labl # int" %}
14915   ins_encode %{
14916     Label* L = $labl$$label;
14917     Assembler::Condition cond =
14918       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14919     __ tbr(cond, $op1$$Register, 31, *L);
14920   %}
14921   ins_pipe(pipe_cmp_branch);
14922   ins_short_branch(1);
14923 %}
14924 
14925 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14926   match(If cmp (CmpL (AndL op1 op2) op3));
14927   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
14928   effect(USE labl);
14929 
14930   ins_cost(BRANCH_COST);
14931   format %{ "tb$cmp   $op1, $op2, $labl" %}
14932   ins_encode %{
14933     Label* L = $labl$$label;
14934     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14935     int bit = exact_log2_long($op2$$constant);
14936     __ tbr(cond, $op1$$Register, bit, *L);
14937   %}
14938   ins_pipe(pipe_cmp_branch);
14939   ins_short_branch(1);
14940 %}
14941 
14942 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14943   match(If cmp (CmpI (AndI op1 op2) op3));
14944   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
14945   effect(USE labl);
14946 
14947   ins_cost(BRANCH_COST);
14948   format %{ "tb$cmp   $op1, $op2, $labl" %}
14949   ins_encode %{
14950     Label* L = $labl$$label;
14951     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14952     int bit = exact_log2((juint)$op2$$constant);
14953     __ tbr(cond, $op1$$Register, bit, *L);
14954   %}
14955   ins_pipe(pipe_cmp_branch);
14956   ins_short_branch(1);
14957 %}
14958 
14959 // And far variants
14960 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14961   match(If cmp (CmpL op1 op2));
14962   effect(USE labl);
14963 
14964   ins_cost(BRANCH_COST);
14965   format %{ "cb$cmp   $op1, $labl # long" %}
14966   ins_encode %{
14967     Label* L = $labl$$label;
14968     Assembler::Condition cond =
14969       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14970     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14971   %}
14972   ins_pipe(pipe_cmp_branch);
14973 %}
14974 
14975 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14976   match(If cmp (CmpI op1 op2));
14977   effect(USE labl);
14978 
14979   ins_cost(BRANCH_COST);
14980   format %{ "cb$cmp   $op1, $labl # int" %}
14981   ins_encode %{
14982     Label* L = $labl$$label;
14983     Assembler::Condition cond =
14984       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14985     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14986   %}
14987   ins_pipe(pipe_cmp_branch);
14988 %}
14989 
14990 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14991   match(If cmp (CmpL (AndL op1 op2) op3));
14992   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
14993   effect(USE labl);
14994 
14995   ins_cost(BRANCH_COST);
14996   format %{ "tb$cmp   $op1, $op2, $labl" %}
14997   ins_encode %{
14998     Label* L = $labl$$label;
14999     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15000     int bit = exact_log2_long($op2$$constant);
15001     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15002   %}
15003   ins_pipe(pipe_cmp_branch);
15004 %}
15005 
15006 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15007   match(If cmp (CmpI (AndI op1 op2) op3));
15008   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15009   effect(USE labl);
15010 
15011   ins_cost(BRANCH_COST);
15012   format %{ "tb$cmp   $op1, $op2, $labl" %}
15013   ins_encode %{
15014     Label* L = $labl$$label;
15015     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15016     int bit = exact_log2((juint)$op2$$constant);
15017     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15018   %}
15019   ins_pipe(pipe_cmp_branch);
15020 %}
15021 
15022 // Test bits
15023 
15024 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15025   match(Set cr (CmpL (AndL op1 op2) op3));
15026   predicate(Assembler::operand_valid_for_logical_immediate
15027             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15028 
15029   ins_cost(INSN_COST);
15030   format %{ "tst $op1, $op2 # long" %}
15031   ins_encode %{
15032     __ tst($op1$$Register, $op2$$constant);
15033   %}
15034   ins_pipe(ialu_reg_reg);
15035 %}
15036 
15037 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15038   match(Set cr (CmpI (AndI op1 op2) op3));
15039   predicate(Assembler::operand_valid_for_logical_immediate
15040             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15041 
15042   ins_cost(INSN_COST);
15043   format %{ "tst $op1, $op2 # int" %}
15044   ins_encode %{
15045     __ tstw($op1$$Register, $op2$$constant);
15046   %}
15047   ins_pipe(ialu_reg_reg);
15048 %}
15049 
15050 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15051   match(Set cr (CmpL (AndL op1 op2) op3));
15052 
15053   ins_cost(INSN_COST);
15054   format %{ "tst $op1, $op2 # long" %}
15055   ins_encode %{
15056     __ tst($op1$$Register, $op2$$Register);
15057   %}
15058   ins_pipe(ialu_reg_reg);
15059 %}
15060 
15061 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15062   match(Set cr (CmpI (AndI op1 op2) op3));
15063 
15064   ins_cost(INSN_COST);
15065   format %{ "tstw $op1, $op2 # int" %}
15066   ins_encode %{
15067     __ tstw($op1$$Register, $op2$$Register);
15068   %}
15069   ins_pipe(ialu_reg_reg);
15070 %}
15071 
15072 
15073 // Conditional Far Branch
15074 // Conditional Far Branch Unsigned
15075 // TODO: fixme
15076 
15077 // counted loop end branch near
15078 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15079 %{
15080   match(CountedLoopEnd cmp cr);
15081 
15082   effect(USE lbl);
15083 
15084   ins_cost(BRANCH_COST);
15085   // short variant.
15086   // ins_short_branch(1);
15087   format %{ "b$cmp $lbl \t// counted loop end" %}
15088 
15089   ins_encode(aarch64_enc_br_con(cmp, lbl));
15090 
15091   ins_pipe(pipe_branch);
15092 %}
15093 
15094 // counted loop end branch near Unsigned
15095 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15096 %{
15097   match(CountedLoopEnd cmp cr);
15098 
15099   effect(USE lbl);
15100 
15101   ins_cost(BRANCH_COST);
15102   // short variant.
15103   // ins_short_branch(1);
15104   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15105 
15106   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15107 
15108   ins_pipe(pipe_branch);
15109 %}
15110 
15111 // counted loop end branch far
15112 // counted loop end branch far unsigned
15113 // TODO: fixme
15114 
15115 // ============================================================================
15116 // inlined locking and unlocking
15117 
15118 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15119 %{
15120   match(Set cr (FastLock object box));
15121   effect(TEMP tmp, TEMP tmp2);
15122 
15123   // TODO
15124   // identify correct cost
15125   ins_cost(5 * INSN_COST);
15126   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15127 
15128   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15129 
15130   ins_pipe(pipe_serial);
15131 %}
15132 
15133 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15134 %{
15135   match(Set cr (FastUnlock object box));
15136   effect(TEMP tmp, TEMP tmp2);
15137 
15138   ins_cost(5 * INSN_COST);
15139   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15140 
15141   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15142 
15143   ins_pipe(pipe_serial);
15144 %}
15145 
15146 
15147 // ============================================================================
15148 // Safepoint Instructions
15149 
15150 // TODO
15151 // provide a near and far version of this code
15152 
15153 instruct safePoint(rFlagsReg cr, iRegP poll)
15154 %{
15155   match(SafePoint poll);
15156   effect(KILL cr);
15157 
15158   format %{
15159     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15160   %}
15161   ins_encode %{
15162     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15163   %}
15164   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15165 %}
15166 
15167 
15168 // ============================================================================
15169 // Procedure Call/Return Instructions
15170 
15171 // Call Java Static Instruction
15172 
15173 instruct CallStaticJavaDirect(method meth)
15174 %{
15175   match(CallStaticJava);
15176 
15177   effect(USE meth);
15178 
15179   ins_cost(CALL_COST);
15180 
15181   format %{ "call,static $meth \t// ==> " %}
15182 
15183   ins_encode( aarch64_enc_java_static_call(meth),
15184               aarch64_enc_call_epilog );
15185 
15186   ins_pipe(pipe_class_call);
15187 %}
15188 
15189 // TO HERE
15190 
15191 // Call Java Dynamic Instruction
15192 instruct CallDynamicJavaDirect(method meth)
15193 %{
15194   match(CallDynamicJava);
15195 
15196   effect(USE meth);
15197 
15198   ins_cost(CALL_COST);
15199 
15200   format %{ "CALL,dynamic $meth \t// ==> " %}
15201 
15202   ins_encode( aarch64_enc_java_dynamic_call(meth),
15203                aarch64_enc_call_epilog );
15204 
15205   ins_pipe(pipe_class_call);
15206 %}
15207 
15208 // Call Runtime Instruction
15209 
15210 instruct CallRuntimeDirect(method meth)
15211 %{
15212   match(CallRuntime);
15213 
15214   effect(USE meth);
15215 
15216   ins_cost(CALL_COST);
15217 
15218   format %{ "CALL, runtime $meth" %}
15219 
15220   ins_encode( aarch64_enc_java_to_runtime(meth) );
15221 
15222   ins_pipe(pipe_class_call);
15223 %}
15224 
15225 // Call Runtime Instruction
15226 
15227 instruct CallLeafDirect(method meth)
15228 %{
15229   match(CallLeaf);
15230 
15231   effect(USE meth);
15232 
15233   ins_cost(CALL_COST);
15234 
15235   format %{ "CALL, runtime leaf $meth" %}
15236 
15237   ins_encode( aarch64_enc_java_to_runtime(meth) );
15238 
15239   ins_pipe(pipe_class_call);
15240 %}
15241 
15242 // Call Runtime Instruction
15243 
15244 instruct CallLeafNoFPDirect(method meth)
15245 %{
15246   match(CallLeafNoFP);
15247 
15248   effect(USE meth);
15249 
15250   ins_cost(CALL_COST);
15251 
15252   format %{ "CALL, runtime leaf nofp $meth" %}
15253 
15254   ins_encode( aarch64_enc_java_to_runtime(meth) );
15255 
15256   ins_pipe(pipe_class_call);
15257 %}
15258 
15259 // Tail Call; Jump from runtime stub to Java code.
15260 // Also known as an 'interprocedural jump'.
15261 // Target of jump will eventually return to caller.
15262 // TailJump below removes the return address.
15263 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15264 %{
15265   match(TailCall jump_target method_oop);
15266 
15267   ins_cost(CALL_COST);
15268 
15269   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15270 
15271   ins_encode(aarch64_enc_tail_call(jump_target));
15272 
15273   ins_pipe(pipe_class_call);
15274 %}
15275 
15276 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15277 %{
15278   match(TailJump jump_target ex_oop);
15279 
15280   ins_cost(CALL_COST);
15281 
15282   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15283 
15284   ins_encode(aarch64_enc_tail_jmp(jump_target));
15285 
15286   ins_pipe(pipe_class_call);
15287 %}
15288 
15289 // Create exception oop: created by stack-crawling runtime code.
15290 // Created exception is now available to this handler, and is setup
15291 // just prior to jumping to this handler. No code emitted.
15292 // TODO check
15293 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15294 instruct CreateException(iRegP_R0 ex_oop)
15295 %{
15296   match(Set ex_oop (CreateEx));
15297 
15298   format %{ " -- \t// exception oop; no code emitted" %}
15299 
15300   size(0);
15301 
15302   ins_encode( /*empty*/ );
15303 
15304   ins_pipe(pipe_class_empty);
15305 %}
15306 
15307 // Rethrow exception: The exception oop will come in the first
15308 // argument position. Then JUMP (not call) to the rethrow stub code.
15309 instruct RethrowException() %{
15310   match(Rethrow);
15311   ins_cost(CALL_COST);
15312 
15313   format %{ "b rethrow_stub" %}
15314 
15315   ins_encode( aarch64_enc_rethrow() );
15316 
15317   ins_pipe(pipe_class_call);
15318 %}
15319 
15320 
15321 // Return Instruction
15322 // epilog node loads ret address into lr as part of frame pop
15323 instruct Ret()
15324 %{
15325   match(Return);
15326 
15327   format %{ "ret\t// return register" %}
15328 
15329   ins_encode( aarch64_enc_ret() );
15330 
15331   ins_pipe(pipe_branch);
15332 %}
15333 
15334 // Die now.
15335 instruct ShouldNotReachHere() %{
15336   match(Halt);
15337 
15338   ins_cost(CALL_COST);
15339   format %{ "ShouldNotReachHere" %}
15340 
15341   ins_encode %{
15342     if (is_reachable()) {
15343       __ dpcs1(0xdead + 1);
15344     }
15345   %}
15346 
15347   ins_pipe(pipe_class_default);
15348 %}
15349 
15350 // ============================================================================
15351 // Partial Subtype Check
15352 //
15353 // superklass array for an instance of the superklass.  Set a hidden
15354 // internal cache on a hit (cache is checked with exposed code in
15355 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15356 // encoding ALSO sets flags.
15357 
15358 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15359 %{
15360   match(Set result (PartialSubtypeCheck sub super));
15361   effect(KILL cr, KILL temp);
15362 
15363   ins_cost(1100);  // slightly larger than the next version
15364   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15365 
15366   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15367 
15368   opcode(0x1); // Force zero of result reg on hit
15369 
15370   ins_pipe(pipe_class_memory);
15371 %}
15372 
15373 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15374 %{
15375   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15376   effect(KILL temp, KILL result);
15377 
15378   ins_cost(1100);  // slightly larger than the next version
15379   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15380 
15381   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15382 
15383   opcode(0x0); // Don't zero result reg on hit
15384 
15385   ins_pipe(pipe_class_memory);
15386 %}
15387 
15388 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15389                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15390 %{
15391   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15392   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15393   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15394 
15395   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15396   ins_encode %{
15397     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15398     __ string_compare($str1$$Register, $str2$$Register,
15399                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15400                       $tmp1$$Register, $tmp2$$Register,
15401                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
15402   %}
15403   ins_pipe(pipe_class_memory);
15404 %}
15405 
15406 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15407                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15408 %{
15409   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15410   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15411   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15412 
15413   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15414   ins_encode %{
15415     __ string_compare($str1$$Register, $str2$$Register,
15416                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15417                       $tmp1$$Register, $tmp2$$Register,
15418                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
15419   %}
15420   ins_pipe(pipe_class_memory);
15421 %}
15422 
15423 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15424                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15425                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15426 %{
15427   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15428   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15429   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15430          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15431 
15432   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15433   ins_encode %{
15434     __ string_compare($str1$$Register, $str2$$Register,
15435                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15436                       $tmp1$$Register, $tmp2$$Register,
15437                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15438                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
15439   %}
15440   ins_pipe(pipe_class_memory);
15441 %}
15442 
15443 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15444                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15445                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15446 %{
15447   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15448   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15449   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15450          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15451 
15452   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15453   ins_encode %{
15454     __ string_compare($str1$$Register, $str2$$Register,
15455                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15456                       $tmp1$$Register, $tmp2$$Register,
15457                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15458                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
15459   %}
15460   ins_pipe(pipe_class_memory);
15461 %}
15462 
15463 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15464        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15465        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15466 %{
15467   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15468   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15469   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15470          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15471   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15472 
15473   ins_encode %{
15474     __ string_indexof($str1$$Register, $str2$$Register,
15475                       $cnt1$$Register, $cnt2$$Register,
15476                       $tmp1$$Register, $tmp2$$Register,
15477                       $tmp3$$Register, $tmp4$$Register,
15478                       $tmp5$$Register, $tmp6$$Register,
15479                       -1, $result$$Register, StrIntrinsicNode::UU);
15480   %}
15481   ins_pipe(pipe_class_memory);
15482 %}
15483 
15484 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15485        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15486        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15487 %{
15488   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15489   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15490   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15491          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15492   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15493 
15494   ins_encode %{
15495     __ string_indexof($str1$$Register, $str2$$Register,
15496                       $cnt1$$Register, $cnt2$$Register,
15497                       $tmp1$$Register, $tmp2$$Register,
15498                       $tmp3$$Register, $tmp4$$Register,
15499                       $tmp5$$Register, $tmp6$$Register,
15500                       -1, $result$$Register, StrIntrinsicNode::LL);
15501   %}
15502   ins_pipe(pipe_class_memory);
15503 %}
15504 
15505 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15506        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15507        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15508 %{
15509   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15510   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15511   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15512          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15513   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15514 
15515   ins_encode %{
15516     __ string_indexof($str1$$Register, $str2$$Register,
15517                       $cnt1$$Register, $cnt2$$Register,
15518                       $tmp1$$Register, $tmp2$$Register,
15519                       $tmp3$$Register, $tmp4$$Register,
15520                       $tmp5$$Register, $tmp6$$Register,
15521                       -1, $result$$Register, StrIntrinsicNode::UL);
15522   %}
15523   ins_pipe(pipe_class_memory);
15524 %}
15525 
15526 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15527                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15528                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15529 %{
15530   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15531   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15532   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15533          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15534   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15535 
15536   ins_encode %{
15537     int icnt2 = (int)$int_cnt2$$constant;
15538     __ string_indexof($str1$$Register, $str2$$Register,
15539                       $cnt1$$Register, zr,
15540                       $tmp1$$Register, $tmp2$$Register,
15541                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15542                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15543   %}
15544   ins_pipe(pipe_class_memory);
15545 %}
15546 
15547 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15548                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15549                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15550 %{
15551   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15552   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15553   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15554          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15555   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15556 
15557   ins_encode %{
15558     int icnt2 = (int)$int_cnt2$$constant;
15559     __ string_indexof($str1$$Register, $str2$$Register,
15560                       $cnt1$$Register, zr,
15561                       $tmp1$$Register, $tmp2$$Register,
15562                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15563                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15564   %}
15565   ins_pipe(pipe_class_memory);
15566 %}
15567 
15568 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15569                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15570                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15571 %{
15572   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15573   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15574   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15575          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15576   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15577 
15578   ins_encode %{
15579     int icnt2 = (int)$int_cnt2$$constant;
15580     __ string_indexof($str1$$Register, $str2$$Register,
15581                       $cnt1$$Register, zr,
15582                       $tmp1$$Register, $tmp2$$Register,
15583                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15584                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15585   %}
15586   ins_pipe(pipe_class_memory);
15587 %}
15588 
15589 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15590                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15591                               iRegINoSp tmp3, rFlagsReg cr)
15592 %{
15593   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15594   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15595          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15596 
15597   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15598 
15599   ins_encode %{
15600     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15601                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15602                            $tmp3$$Register);
15603   %}
15604   ins_pipe(pipe_class_memory);
15605 %}
15606 
15607 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15608                         iRegI_R0 result, rFlagsReg cr)
15609 %{
15610   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15611   match(Set result (StrEquals (Binary str1 str2) cnt));
15612   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15613 
15614   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15615   ins_encode %{
15616     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15617     __ string_equals($str1$$Register, $str2$$Register,
15618                      $result$$Register, $cnt$$Register, 1);
15619   %}
15620   ins_pipe(pipe_class_memory);
15621 %}
15622 
15623 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15624                         iRegI_R0 result, rFlagsReg cr)
15625 %{
15626   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15627   match(Set result (StrEquals (Binary str1 str2) cnt));
15628   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15629 
15630   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15631   ins_encode %{
15632     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15633     __ string_equals($str1$$Register, $str2$$Register,
15634                      $result$$Register, $cnt$$Register, 2);
15635   %}
15636   ins_pipe(pipe_class_memory);
15637 %}
15638 
15639 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15640                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15641                        iRegP_R10 tmp, rFlagsReg cr)
15642 %{
15643   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15644   match(Set result (AryEq ary1 ary2));
15645   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15646 
15647   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15648   ins_encode %{
15649     __ arrays_equals($ary1$$Register, $ary2$$Register,
15650                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15651                      $result$$Register, $tmp$$Register, 1);
15652     %}
15653   ins_pipe(pipe_class_memory);
15654 %}
15655 
15656 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15657                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15658                        iRegP_R10 tmp, rFlagsReg cr)
15659 %{
15660   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15661   match(Set result (AryEq ary1 ary2));
15662   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15663 
15664   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15665   ins_encode %{
15666     __ arrays_equals($ary1$$Register, $ary2$$Register,
15667                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15668                      $result$$Register, $tmp$$Register, 2);
15669   %}
15670   ins_pipe(pipe_class_memory);
15671 %}
15672 
15673 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15674 %{
15675   match(Set result (HasNegatives ary1 len));
15676   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15677   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15678   ins_encode %{
15679     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15680   %}
15681   ins_pipe( pipe_slow );
15682 %}
15683 
15684 // fast char[] to byte[] compression
15685 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15686                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15687                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15688                          iRegI_R0 result, rFlagsReg cr)
15689 %{
15690   match(Set result (StrCompressedCopy src (Binary dst len)));
15691   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15692 
15693   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15694   ins_encode %{
15695     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15696                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15697                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15698                            $result$$Register);
15699   %}
15700   ins_pipe( pipe_slow );
15701 %}
15702 
15703 // fast byte[] to char[] inflation
15704 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15705                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15706 %{
15707   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15708   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15709 
15710   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15711   ins_encode %{
15712     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15713                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15714   %}
15715   ins_pipe(pipe_class_memory);
15716 %}
15717 
15718 // encode char[] to byte[] in ISO_8859_1
15719 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15720                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15721                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15722                           iRegI_R0 result, rFlagsReg cr)
15723 %{
15724   match(Set result (EncodeISOArray src (Binary dst len)));
15725   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15726          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15727 
15728   format %{ "Encode array $src,$dst,$len -> $result" %}
15729   ins_encode %{
15730     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15731          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15732          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15733   %}
15734   ins_pipe( pipe_class_memory );
15735 %}
15736 
15737 // ============================================================================
15738 // This name is KNOWN by the ADLC and cannot be changed.
15739 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15740 // for this guy.
15741 instruct tlsLoadP(thread_RegP dst)
15742 %{
15743   match(Set dst (ThreadLocal));
15744 
15745   ins_cost(0);
15746 
15747   format %{ " -- \t// $dst=Thread::current(), empty" %}
15748 
15749   size(0);
15750 
15751   ins_encode( /*empty*/ );
15752 
15753   ins_pipe(pipe_class_empty);
15754 %}
15755 
15756 // ====================VECTOR INSTRUCTIONS=====================================
15757 
15758 // Load vector (32 bits)
15759 instruct loadV4(vecD dst, vmem4 mem)
15760 %{
15761   predicate(n->as_LoadVector()->memory_size() == 4);
15762   match(Set dst (LoadVector mem));
15763   ins_cost(4 * INSN_COST);
15764   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15765   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15766   ins_pipe(vload_reg_mem64);
15767 %}
15768 
15769 // Load vector (64 bits)
15770 instruct loadV8(vecD dst, vmem8 mem)
15771 %{
15772   predicate(n->as_LoadVector()->memory_size() == 8);
15773   match(Set dst (LoadVector mem));
15774   ins_cost(4 * INSN_COST);
15775   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15776   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15777   ins_pipe(vload_reg_mem64);
15778 %}
15779 
15780 // Load Vector (128 bits)
15781 instruct loadV16(vecX dst, vmem16 mem)
15782 %{
15783   predicate(n->as_LoadVector()->memory_size() == 16);
15784   match(Set dst (LoadVector mem));
15785   ins_cost(4 * INSN_COST);
15786   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15787   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15788   ins_pipe(vload_reg_mem128);
15789 %}
15790 
15791 // Store Vector (32 bits)
15792 instruct storeV4(vecD src, vmem4 mem)
15793 %{
15794   predicate(n->as_StoreVector()->memory_size() == 4);
15795   match(Set mem (StoreVector mem src));
15796   ins_cost(4 * INSN_COST);
15797   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15798   ins_encode( aarch64_enc_strvS(src, mem) );
15799   ins_pipe(vstore_reg_mem64);
15800 %}
15801 
15802 // Store Vector (64 bits)
15803 instruct storeV8(vecD src, vmem8 mem)
15804 %{
15805   predicate(n->as_StoreVector()->memory_size() == 8);
15806   match(Set mem (StoreVector mem src));
15807   ins_cost(4 * INSN_COST);
15808   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15809   ins_encode( aarch64_enc_strvD(src, mem) );
15810   ins_pipe(vstore_reg_mem64);
15811 %}
15812 
15813 // Store Vector (128 bits)
15814 instruct storeV16(vecX src, vmem16 mem)
15815 %{
15816   predicate(n->as_StoreVector()->memory_size() == 16);
15817   match(Set mem (StoreVector mem src));
15818   ins_cost(4 * INSN_COST);
15819   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15820   ins_encode( aarch64_enc_strvQ(src, mem) );
15821   ins_pipe(vstore_reg_mem128);
15822 %}
15823 
15824 instruct replicate8B(vecD dst, iRegIorL2I src)
15825 %{
15826   predicate(n->as_Vector()->length() == 4 ||
15827             n->as_Vector()->length() == 8);
15828   match(Set dst (ReplicateB src));
15829   ins_cost(INSN_COST);
15830   format %{ "dup  $dst, $src\t# vector (8B)" %}
15831   ins_encode %{
15832     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15833   %}
15834   ins_pipe(vdup_reg_reg64);
15835 %}
15836 
15837 instruct replicate16B(vecX dst, iRegIorL2I src)
15838 %{
15839   predicate(n->as_Vector()->length() == 16);
15840   match(Set dst (ReplicateB src));
15841   ins_cost(INSN_COST);
15842   format %{ "dup  $dst, $src\t# vector (16B)" %}
15843   ins_encode %{
15844     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15845   %}
15846   ins_pipe(vdup_reg_reg128);
15847 %}
15848 
15849 instruct replicate8B_imm(vecD dst, immI con)
15850 %{
15851   predicate(n->as_Vector()->length() == 4 ||
15852             n->as_Vector()->length() == 8);
15853   match(Set dst (ReplicateB con));
15854   ins_cost(INSN_COST);
15855   format %{ "movi  $dst, $con\t# vector(8B)" %}
15856   ins_encode %{
15857     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15858   %}
15859   ins_pipe(vmovi_reg_imm64);
15860 %}
15861 
15862 instruct replicate16B_imm(vecX dst, immI con)
15863 %{
15864   predicate(n->as_Vector()->length() == 16);
15865   match(Set dst (ReplicateB con));
15866   ins_cost(INSN_COST);
15867   format %{ "movi  $dst, $con\t# vector(16B)" %}
15868   ins_encode %{
15869     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15870   %}
15871   ins_pipe(vmovi_reg_imm128);
15872 %}
15873 
15874 instruct replicate4S(vecD dst, iRegIorL2I src)
15875 %{
15876   predicate(n->as_Vector()->length() == 2 ||
15877             n->as_Vector()->length() == 4);
15878   match(Set dst (ReplicateS src));
15879   ins_cost(INSN_COST);
15880   format %{ "dup  $dst, $src\t# vector (4S)" %}
15881   ins_encode %{
15882     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15883   %}
15884   ins_pipe(vdup_reg_reg64);
15885 %}
15886 
15887 instruct replicate8S(vecX dst, iRegIorL2I src)
15888 %{
15889   predicate(n->as_Vector()->length() == 8);
15890   match(Set dst (ReplicateS src));
15891   ins_cost(INSN_COST);
15892   format %{ "dup  $dst, $src\t# vector (8S)" %}
15893   ins_encode %{
15894     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15895   %}
15896   ins_pipe(vdup_reg_reg128);
15897 %}
15898 
15899 instruct replicate4S_imm(vecD dst, immI con)
15900 %{
15901   predicate(n->as_Vector()->length() == 2 ||
15902             n->as_Vector()->length() == 4);
15903   match(Set dst (ReplicateS con));
15904   ins_cost(INSN_COST);
15905   format %{ "movi  $dst, $con\t# vector(4H)" %}
15906   ins_encode %{
15907     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15908   %}
15909   ins_pipe(vmovi_reg_imm64);
15910 %}
15911 
15912 instruct replicate8S_imm(vecX dst, immI con)
15913 %{
15914   predicate(n->as_Vector()->length() == 8);
15915   match(Set dst (ReplicateS con));
15916   ins_cost(INSN_COST);
15917   format %{ "movi  $dst, $con\t# vector(8H)" %}
15918   ins_encode %{
15919     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15920   %}
15921   ins_pipe(vmovi_reg_imm128);
15922 %}
15923 
15924 instruct replicate2I(vecD dst, iRegIorL2I src)
15925 %{
15926   predicate(n->as_Vector()->length() == 2);
15927   match(Set dst (ReplicateI src));
15928   ins_cost(INSN_COST);
15929   format %{ "dup  $dst, $src\t# vector (2I)" %}
15930   ins_encode %{
15931     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15932   %}
15933   ins_pipe(vdup_reg_reg64);
15934 %}
15935 
15936 instruct replicate4I(vecX dst, iRegIorL2I src)
15937 %{
15938   predicate(n->as_Vector()->length() == 4);
15939   match(Set dst (ReplicateI src));
15940   ins_cost(INSN_COST);
15941   format %{ "dup  $dst, $src\t# vector (4I)" %}
15942   ins_encode %{
15943     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15944   %}
15945   ins_pipe(vdup_reg_reg128);
15946 %}
15947 
15948 instruct replicate2I_imm(vecD dst, immI con)
15949 %{
15950   predicate(n->as_Vector()->length() == 2);
15951   match(Set dst (ReplicateI con));
15952   ins_cost(INSN_COST);
15953   format %{ "movi  $dst, $con\t# vector(2I)" %}
15954   ins_encode %{
15955     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15956   %}
15957   ins_pipe(vmovi_reg_imm64);
15958 %}
15959 
15960 instruct replicate4I_imm(vecX dst, immI con)
15961 %{
15962   predicate(n->as_Vector()->length() == 4);
15963   match(Set dst (ReplicateI con));
15964   ins_cost(INSN_COST);
15965   format %{ "movi  $dst, $con\t# vector(4I)" %}
15966   ins_encode %{
15967     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15968   %}
15969   ins_pipe(vmovi_reg_imm128);
15970 %}
15971 
15972 instruct replicate2L(vecX dst, iRegL src)
15973 %{
15974   predicate(n->as_Vector()->length() == 2);
15975   match(Set dst (ReplicateL src));
15976   ins_cost(INSN_COST);
15977   format %{ "dup  $dst, $src\t# vector (2L)" %}
15978   ins_encode %{
15979     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15980   %}
15981   ins_pipe(vdup_reg_reg128);
15982 %}
15983 
15984 instruct replicate2L_zero(vecX dst, immI0 zero)
15985 %{
15986   predicate(n->as_Vector()->length() == 2);
15987   match(Set dst (ReplicateI zero));
15988   ins_cost(INSN_COST);
15989   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15990   ins_encode %{
15991     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15992            as_FloatRegister($dst$$reg),
15993            as_FloatRegister($dst$$reg));
15994   %}
15995   ins_pipe(vmovi_reg_imm128);
15996 %}
15997 
15998 instruct replicate2F(vecD dst, vRegF src)
15999 %{
16000   predicate(n->as_Vector()->length() == 2);
16001   match(Set dst (ReplicateF src));
16002   ins_cost(INSN_COST);
16003   format %{ "dup  $dst, $src\t# vector (2F)" %}
16004   ins_encode %{
16005     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16006            as_FloatRegister($src$$reg));
16007   %}
16008   ins_pipe(vdup_reg_freg64);
16009 %}
16010 
16011 instruct replicate4F(vecX dst, vRegF src)
16012 %{
16013   predicate(n->as_Vector()->length() == 4);
16014   match(Set dst (ReplicateF src));
16015   ins_cost(INSN_COST);
16016   format %{ "dup  $dst, $src\t# vector (4F)" %}
16017   ins_encode %{
16018     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16019            as_FloatRegister($src$$reg));
16020   %}
16021   ins_pipe(vdup_reg_freg128);
16022 %}
16023 
16024 instruct replicate2D(vecX dst, vRegD src)
16025 %{
16026   predicate(n->as_Vector()->length() == 2);
16027   match(Set dst (ReplicateD src));
16028   ins_cost(INSN_COST);
16029   format %{ "dup  $dst, $src\t# vector (2D)" %}
16030   ins_encode %{
16031     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16032            as_FloatRegister($src$$reg));
16033   %}
16034   ins_pipe(vdup_reg_dreg128);
16035 %}
16036 
16037 // ====================REDUCTION ARITHMETIC====================================
16038 
16039 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp, iRegINoSp tmp2)
16040 %{
16041   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16042   match(Set dst (AddReductionVI isrc vsrc));
16043   ins_cost(INSN_COST);
16044   effect(TEMP tmp, TEMP tmp2);
16045   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
16046             "umov  $tmp2, $vsrc, S, 1\n\t"
16047             "addw  $tmp, $isrc, $tmp\n\t"
16048             "addw  $dst, $tmp, $tmp2\t# add reduction2I"
16049   %}
16050   ins_encode %{
16051     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
16052     __ umov($tmp2$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
16053     __ addw($tmp$$Register, $isrc$$Register, $tmp$$Register);
16054     __ addw($dst$$Register, $tmp$$Register, $tmp2$$Register);
16055   %}
16056   ins_pipe(pipe_class_default);
16057 %}
16058 
16059 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
16060 %{
16061   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16062   match(Set dst (AddReductionVI isrc vsrc));
16063   ins_cost(INSN_COST);
16064   effect(TEMP vtmp, TEMP itmp);
16065   format %{ "addv  $vtmp, T4S, $vsrc\n\t"
16066             "umov  $itmp, $vtmp, S, 0\n\t"
16067             "addw  $dst, $itmp, $isrc\t# add reduction4I"
16068   %}
16069   ins_encode %{
16070     __ addv(as_FloatRegister($vtmp$$reg), __ T4S,
16071             as_FloatRegister($vsrc$$reg));
16072     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
16073     __ addw($dst$$Register, $itmp$$Register, $isrc$$Register);
16074   %}
16075   ins_pipe(pipe_class_default);
16076 %}
16077 
16078 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
16079 %{
16080   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16081   match(Set dst (MulReductionVI isrc vsrc));
16082   ins_cost(INSN_COST);
16083   effect(TEMP tmp, TEMP dst);
16084   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
16085             "mul   $dst, $tmp, $isrc\n\t"
16086             "umov  $tmp, $vsrc, S, 1\n\t"
16087             "mul   $dst, $tmp, $dst\t# mul reduction2I"
16088   %}
16089   ins_encode %{
16090     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
16091     __ mul($dst$$Register, $tmp$$Register, $isrc$$Register);
16092     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
16093     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16094   %}
16095   ins_pipe(pipe_class_default);
16096 %}
16097 
16098 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp, iRegINoSp itmp)
16099 %{
16100   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16101   match(Set dst (MulReductionVI isrc vsrc));
16102   ins_cost(INSN_COST);
16103   effect(TEMP vtmp, TEMP itmp, TEMP dst);
16104   format %{ "ins   $vtmp, D, $vsrc, 0, 1\n\t"
16105             "mulv  $vtmp, T2S, $vtmp, $vsrc\n\t"
16106             "umov  $itmp, $vtmp, S, 0\n\t"
16107             "mul   $dst, $itmp, $isrc\n\t"
16108             "umov  $itmp, $vtmp, S, 1\n\t"
16109             "mul   $dst, $itmp, $dst\t# mul reduction4I"
16110   %}
16111   ins_encode %{
16112     __ ins(as_FloatRegister($vtmp$$reg), __ D,
16113            as_FloatRegister($vsrc$$reg), 0, 1);
16114     __ mulv(as_FloatRegister($vtmp$$reg), __ T2S,
16115             as_FloatRegister($vtmp$$reg), as_FloatRegister($vsrc$$reg));
16116     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 0);
16117     __ mul($dst$$Register, $itmp$$Register, $isrc$$Register);
16118     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ S, 1);
16119     __ mul($dst$$Register, $itmp$$Register, $dst$$Register);
16120   %}
16121   ins_pipe(pipe_class_default);
16122 %}
16123 
16124 instruct reduce_add2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
16125 %{
16126   match(Set dst (AddReductionVF fsrc vsrc));
16127   ins_cost(INSN_COST);
16128   effect(TEMP tmp, TEMP dst);
16129   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
16130             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16131             "fadds $dst, $dst, $tmp\t# add reduction2F"
16132   %}
16133   ins_encode %{
16134     __ fadds(as_FloatRegister($dst$$reg),
16135              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16136     __ ins(as_FloatRegister($tmp$$reg), __ S,
16137            as_FloatRegister($vsrc$$reg), 0, 1);
16138     __ fadds(as_FloatRegister($dst$$reg),
16139              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16140   %}
16141   ins_pipe(pipe_class_default);
16142 %}
16143 
16144 instruct reduce_add4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
16145 %{
16146   match(Set dst (AddReductionVF fsrc vsrc));
16147   ins_cost(INSN_COST);
16148   effect(TEMP tmp, TEMP dst);
16149   format %{ "fadds $dst, $fsrc, $vsrc\n\t"
16150             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16151             "fadds $dst, $dst, $tmp\n\t"
16152             "ins   $tmp, S, $vsrc, 0, 2\n\t"
16153             "fadds $dst, $dst, $tmp\n\t"
16154             "ins   $tmp, S, $vsrc, 0, 3\n\t"
16155             "fadds $dst, $dst, $tmp\t# add reduction4F"
16156   %}
16157   ins_encode %{
16158     __ fadds(as_FloatRegister($dst$$reg),
16159              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16160     __ ins(as_FloatRegister($tmp$$reg), __ S,
16161            as_FloatRegister($vsrc$$reg), 0, 1);
16162     __ fadds(as_FloatRegister($dst$$reg),
16163              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16164     __ ins(as_FloatRegister($tmp$$reg), __ S,
16165            as_FloatRegister($vsrc$$reg), 0, 2);
16166     __ fadds(as_FloatRegister($dst$$reg),
16167              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16168     __ ins(as_FloatRegister($tmp$$reg), __ S,
16169            as_FloatRegister($vsrc$$reg), 0, 3);
16170     __ fadds(as_FloatRegister($dst$$reg),
16171              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16172   %}
16173   ins_pipe(pipe_class_default);
16174 %}
16175 
16176 instruct reduce_mul2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp)
16177 %{
16178   match(Set dst (MulReductionVF fsrc vsrc));
16179   ins_cost(INSN_COST);
16180   effect(TEMP tmp, TEMP dst);
16181   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
16182             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16183             "fmuls $dst, $dst, $tmp\t# mul reduction2F"
16184   %}
16185   ins_encode %{
16186     __ fmuls(as_FloatRegister($dst$$reg),
16187              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16188     __ ins(as_FloatRegister($tmp$$reg), __ S,
16189            as_FloatRegister($vsrc$$reg), 0, 1);
16190     __ fmuls(as_FloatRegister($dst$$reg),
16191              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16192   %}
16193   ins_pipe(pipe_class_default);
16194 %}
16195 
16196 instruct reduce_mul4F(vRegF dst, vRegF fsrc, vecX vsrc, vecX tmp)
16197 %{
16198   match(Set dst (MulReductionVF fsrc vsrc));
16199   ins_cost(INSN_COST);
16200   effect(TEMP tmp, TEMP dst);
16201   format %{ "fmuls $dst, $fsrc, $vsrc\n\t"
16202             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16203             "fmuls $dst, $dst, $tmp\n\t"
16204             "ins   $tmp, S, $vsrc, 0, 2\n\t"
16205             "fmuls $dst, $dst, $tmp\n\t"
16206             "ins   $tmp, S, $vsrc, 0, 3\n\t"
16207             "fmuls $dst, $dst, $tmp\t# mul reduction4F"
16208   %}
16209   ins_encode %{
16210     __ fmuls(as_FloatRegister($dst$$reg),
16211              as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16212     __ ins(as_FloatRegister($tmp$$reg), __ S,
16213            as_FloatRegister($vsrc$$reg), 0, 1);
16214     __ fmuls(as_FloatRegister($dst$$reg),
16215              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16216     __ ins(as_FloatRegister($tmp$$reg), __ S,
16217            as_FloatRegister($vsrc$$reg), 0, 2);
16218     __ fmuls(as_FloatRegister($dst$$reg),
16219              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16220     __ ins(as_FloatRegister($tmp$$reg), __ S,
16221            as_FloatRegister($vsrc$$reg), 0, 3);
16222     __ fmuls(as_FloatRegister($dst$$reg),
16223              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16224   %}
16225   ins_pipe(pipe_class_default);
16226 %}
16227 
16228 instruct reduce_add2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
16229 %{
16230   match(Set dst (AddReductionVD dsrc vsrc));
16231   ins_cost(INSN_COST);
16232   effect(TEMP tmp, TEMP dst);
16233   format %{ "faddd $dst, $dsrc, $vsrc\n\t"
16234             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16235             "faddd $dst, $dst, $tmp\t# add reduction2D"
16236   %}
16237   ins_encode %{
16238     __ faddd(as_FloatRegister($dst$$reg),
16239              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16240     __ ins(as_FloatRegister($tmp$$reg), __ D,
16241            as_FloatRegister($vsrc$$reg), 0, 1);
16242     __ faddd(as_FloatRegister($dst$$reg),
16243              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16244   %}
16245   ins_pipe(pipe_class_default);
16246 %}
16247 
16248 instruct reduce_mul2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp)
16249 %{
16250   match(Set dst (MulReductionVD dsrc vsrc));
16251   ins_cost(INSN_COST);
16252   effect(TEMP tmp, TEMP dst);
16253   format %{ "fmuld $dst, $dsrc, $vsrc\n\t"
16254             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16255             "fmuld $dst, $dst, $tmp\t# mul reduction2D"
16256   %}
16257   ins_encode %{
16258     __ fmuld(as_FloatRegister($dst$$reg),
16259              as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16260     __ ins(as_FloatRegister($tmp$$reg), __ D,
16261            as_FloatRegister($vsrc$$reg), 0, 1);
16262     __ fmuld(as_FloatRegister($dst$$reg),
16263              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16264   %}
16265   ins_pipe(pipe_class_default);
16266 %}
16267 
16268 instruct reduce_max2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
16269   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16270   match(Set dst (MaxReductionV fsrc vsrc));
16271   ins_cost(INSN_COST);
16272   effect(TEMP_DEF dst, TEMP tmp);
16273   format %{ "fmaxs $dst, $fsrc, $vsrc\n\t"
16274             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16275             "fmaxs $dst, $dst, $tmp\t# max reduction2F" %}
16276   ins_encode %{
16277     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16278     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
16279     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16280   %}
16281   ins_pipe(pipe_class_default);
16282 %}
16283 
16284 instruct reduce_max4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
16285   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16286   match(Set dst (MaxReductionV fsrc vsrc));
16287   ins_cost(INSN_COST);
16288   effect(TEMP_DEF dst);
16289   format %{ "fmaxv $dst, T4S, $vsrc\n\t"
16290             "fmaxs $dst, $dst, $fsrc\t# max reduction4F" %}
16291   ins_encode %{
16292     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
16293     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
16294   %}
16295   ins_pipe(pipe_class_default);
16296 %}
16297 
16298 instruct reduce_max2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
16299   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16300   match(Set dst (MaxReductionV dsrc vsrc));
16301   ins_cost(INSN_COST);
16302   effect(TEMP_DEF dst, TEMP tmp);
16303   format %{ "fmaxd $dst, $dsrc, $vsrc\n\t"
16304             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16305             "fmaxd $dst, $dst, $tmp\t# max reduction2D" %}
16306   ins_encode %{
16307     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16308     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
16309     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16310   %}
16311   ins_pipe(pipe_class_default);
16312 %}
16313 
16314 instruct reduce_min2F(vRegF dst, vRegF fsrc, vecD vsrc, vecD tmp) %{
16315   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16316   match(Set dst (MinReductionV fsrc vsrc));
16317   ins_cost(INSN_COST);
16318   effect(TEMP_DEF dst, TEMP tmp);
16319   format %{ "fmins $dst, $fsrc, $vsrc\n\t"
16320             "ins   $tmp, S, $vsrc, 0, 1\n\t"
16321             "fmins $dst, $dst, $tmp\t# min reduction2F" %}
16322   ins_encode %{
16323     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg), as_FloatRegister($vsrc$$reg));
16324     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($vsrc$$reg), 0, 1);
16325     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16326   %}
16327   ins_pipe(pipe_class_default);
16328 %}
16329 
16330 instruct reduce_min4F(vRegF dst, vRegF fsrc, vecX vsrc) %{
16331   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16332   match(Set dst (MinReductionV fsrc vsrc));
16333   ins_cost(INSN_COST);
16334   effect(TEMP_DEF dst);
16335   format %{ "fminv $dst, T4S, $vsrc\n\t"
16336             "fmins $dst, $dst, $fsrc\t# min reduction4F" %}
16337   ins_encode %{
16338     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
16339     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($fsrc$$reg));
16340   %}
16341   ins_pipe(pipe_class_default);
16342 %}
16343 
16344 instruct reduce_min2D(vRegD dst, vRegD dsrc, vecX vsrc, vecX tmp) %{
16345   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16346   match(Set dst (MinReductionV dsrc vsrc));
16347   ins_cost(INSN_COST);
16348   effect(TEMP_DEF dst, TEMP tmp);
16349   format %{ "fmind $dst, $dsrc, $vsrc\n\t"
16350             "ins   $tmp, D, $vsrc, 0, 1\n\t"
16351             "fmind $dst, $dst, $tmp\t# min reduction2D" %}
16352   ins_encode %{
16353     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dsrc$$reg), as_FloatRegister($vsrc$$reg));
16354     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($vsrc$$reg), 0, 1);
16355     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16356   %}
16357   ins_pipe(pipe_class_default);
16358 %}
16359 
16360 // ====================VECTOR ARITHMETIC=======================================
16361 
16362 // --------------------------------- ADD --------------------------------------
16363 
16364 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16365 %{
16366   predicate(n->as_Vector()->length() == 4 ||
16367             n->as_Vector()->length() == 8);
16368   match(Set dst (AddVB src1 src2));
16369   ins_cost(INSN_COST);
16370   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16371   ins_encode %{
16372     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16373             as_FloatRegister($src1$$reg),
16374             as_FloatRegister($src2$$reg));
16375   %}
16376   ins_pipe(vdop64);
16377 %}
16378 
16379 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16380 %{
16381   predicate(n->as_Vector()->length() == 16);
16382   match(Set dst (AddVB src1 src2));
16383   ins_cost(INSN_COST);
16384   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16385   ins_encode %{
16386     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16387             as_FloatRegister($src1$$reg),
16388             as_FloatRegister($src2$$reg));
16389   %}
16390   ins_pipe(vdop128);
16391 %}
16392 
16393 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16394 %{
16395   predicate(n->as_Vector()->length() == 2 ||
16396             n->as_Vector()->length() == 4);
16397   match(Set dst (AddVS src1 src2));
16398   ins_cost(INSN_COST);
16399   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16400   ins_encode %{
16401     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16402             as_FloatRegister($src1$$reg),
16403             as_FloatRegister($src2$$reg));
16404   %}
16405   ins_pipe(vdop64);
16406 %}
16407 
16408 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16409 %{
16410   predicate(n->as_Vector()->length() == 8);
16411   match(Set dst (AddVS src1 src2));
16412   ins_cost(INSN_COST);
16413   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16414   ins_encode %{
16415     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16416             as_FloatRegister($src1$$reg),
16417             as_FloatRegister($src2$$reg));
16418   %}
16419   ins_pipe(vdop128);
16420 %}
16421 
16422 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16423 %{
16424   predicate(n->as_Vector()->length() == 2);
16425   match(Set dst (AddVI src1 src2));
16426   ins_cost(INSN_COST);
16427   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16428   ins_encode %{
16429     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16430             as_FloatRegister($src1$$reg),
16431             as_FloatRegister($src2$$reg));
16432   %}
16433   ins_pipe(vdop64);
16434 %}
16435 
16436 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16437 %{
16438   predicate(n->as_Vector()->length() == 4);
16439   match(Set dst (AddVI src1 src2));
16440   ins_cost(INSN_COST);
16441   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16442   ins_encode %{
16443     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16444             as_FloatRegister($src1$$reg),
16445             as_FloatRegister($src2$$reg));
16446   %}
16447   ins_pipe(vdop128);
16448 %}
16449 
16450 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16451 %{
16452   predicate(n->as_Vector()->length() == 2);
16453   match(Set dst (AddVL src1 src2));
16454   ins_cost(INSN_COST);
16455   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16456   ins_encode %{
16457     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16458             as_FloatRegister($src1$$reg),
16459             as_FloatRegister($src2$$reg));
16460   %}
16461   ins_pipe(vdop128);
16462 %}
16463 
16464 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16465 %{
16466   predicate(n->as_Vector()->length() == 2);
16467   match(Set dst (AddVF src1 src2));
16468   ins_cost(INSN_COST);
16469   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16470   ins_encode %{
16471     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16472             as_FloatRegister($src1$$reg),
16473             as_FloatRegister($src2$$reg));
16474   %}
16475   ins_pipe(vdop_fp64);
16476 %}
16477 
16478 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16479 %{
16480   predicate(n->as_Vector()->length() == 4);
16481   match(Set dst (AddVF src1 src2));
16482   ins_cost(INSN_COST);
16483   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16484   ins_encode %{
16485     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16486             as_FloatRegister($src1$$reg),
16487             as_FloatRegister($src2$$reg));
16488   %}
16489   ins_pipe(vdop_fp128);
16490 %}
16491 
16492 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16493 %{
16494   match(Set dst (AddVD src1 src2));
16495   ins_cost(INSN_COST);
16496   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16497   ins_encode %{
16498     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16499             as_FloatRegister($src1$$reg),
16500             as_FloatRegister($src2$$reg));
16501   %}
16502   ins_pipe(vdop_fp128);
16503 %}
16504 
16505 // --------------------------------- SUB --------------------------------------
16506 
16507 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16508 %{
16509   predicate(n->as_Vector()->length() == 4 ||
16510             n->as_Vector()->length() == 8);
16511   match(Set dst (SubVB src1 src2));
16512   ins_cost(INSN_COST);
16513   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16514   ins_encode %{
16515     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16516             as_FloatRegister($src1$$reg),
16517             as_FloatRegister($src2$$reg));
16518   %}
16519   ins_pipe(vdop64);
16520 %}
16521 
16522 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16523 %{
16524   predicate(n->as_Vector()->length() == 16);
16525   match(Set dst (SubVB src1 src2));
16526   ins_cost(INSN_COST);
16527   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16528   ins_encode %{
16529     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16530             as_FloatRegister($src1$$reg),
16531             as_FloatRegister($src2$$reg));
16532   %}
16533   ins_pipe(vdop128);
16534 %}
16535 
16536 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16537 %{
16538   predicate(n->as_Vector()->length() == 2 ||
16539             n->as_Vector()->length() == 4);
16540   match(Set dst (SubVS src1 src2));
16541   ins_cost(INSN_COST);
16542   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16543   ins_encode %{
16544     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16545             as_FloatRegister($src1$$reg),
16546             as_FloatRegister($src2$$reg));
16547   %}
16548   ins_pipe(vdop64);
16549 %}
16550 
16551 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16552 %{
16553   predicate(n->as_Vector()->length() == 8);
16554   match(Set dst (SubVS src1 src2));
16555   ins_cost(INSN_COST);
16556   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16557   ins_encode %{
16558     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16559             as_FloatRegister($src1$$reg),
16560             as_FloatRegister($src2$$reg));
16561   %}
16562   ins_pipe(vdop128);
16563 %}
16564 
16565 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16566 %{
16567   predicate(n->as_Vector()->length() == 2);
16568   match(Set dst (SubVI src1 src2));
16569   ins_cost(INSN_COST);
16570   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16571   ins_encode %{
16572     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16573             as_FloatRegister($src1$$reg),
16574             as_FloatRegister($src2$$reg));
16575   %}
16576   ins_pipe(vdop64);
16577 %}
16578 
16579 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16580 %{
16581   predicate(n->as_Vector()->length() == 4);
16582   match(Set dst (SubVI src1 src2));
16583   ins_cost(INSN_COST);
16584   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16585   ins_encode %{
16586     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16587             as_FloatRegister($src1$$reg),
16588             as_FloatRegister($src2$$reg));
16589   %}
16590   ins_pipe(vdop128);
16591 %}
16592 
16593 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16594 %{
16595   predicate(n->as_Vector()->length() == 2);
16596   match(Set dst (SubVL src1 src2));
16597   ins_cost(INSN_COST);
16598   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16599   ins_encode %{
16600     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16601             as_FloatRegister($src1$$reg),
16602             as_FloatRegister($src2$$reg));
16603   %}
16604   ins_pipe(vdop128);
16605 %}
16606 
16607 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16608 %{
16609   predicate(n->as_Vector()->length() == 2);
16610   match(Set dst (SubVF src1 src2));
16611   ins_cost(INSN_COST);
16612   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16613   ins_encode %{
16614     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16615             as_FloatRegister($src1$$reg),
16616             as_FloatRegister($src2$$reg));
16617   %}
16618   ins_pipe(vdop_fp64);
16619 %}
16620 
16621 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16622 %{
16623   predicate(n->as_Vector()->length() == 4);
16624   match(Set dst (SubVF src1 src2));
16625   ins_cost(INSN_COST);
16626   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16627   ins_encode %{
16628     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16629             as_FloatRegister($src1$$reg),
16630             as_FloatRegister($src2$$reg));
16631   %}
16632   ins_pipe(vdop_fp128);
16633 %}
16634 
16635 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16636 %{
16637   predicate(n->as_Vector()->length() == 2);
16638   match(Set dst (SubVD src1 src2));
16639   ins_cost(INSN_COST);
16640   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16641   ins_encode %{
16642     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16643             as_FloatRegister($src1$$reg),
16644             as_FloatRegister($src2$$reg));
16645   %}
16646   ins_pipe(vdop_fp128);
16647 %}
16648 
16649 // --------------------------------- MUL --------------------------------------
16650 
16651 instruct vmul8B(vecD dst, vecD src1, vecD src2)
16652 %{
16653   predicate(n->as_Vector()->length() == 4 ||
16654             n->as_Vector()->length() == 8);
16655   match(Set dst (MulVB src1 src2));
16656   ins_cost(INSN_COST);
16657   format %{ "mulv  $dst,$src1,$src2\t# vector (8B)" %}
16658   ins_encode %{
16659     __ mulv(as_FloatRegister($dst$$reg), __ T8B,
16660             as_FloatRegister($src1$$reg),
16661             as_FloatRegister($src2$$reg));
16662   %}
16663   ins_pipe(vmul64);
16664 %}
16665 
16666 instruct vmul16B(vecX dst, vecX src1, vecX src2)
16667 %{
16668   predicate(n->as_Vector()->length() == 16);
16669   match(Set dst (MulVB src1 src2));
16670   ins_cost(INSN_COST);
16671   format %{ "mulv  $dst,$src1,$src2\t# vector (16B)" %}
16672   ins_encode %{
16673     __ mulv(as_FloatRegister($dst$$reg), __ T16B,
16674             as_FloatRegister($src1$$reg),
16675             as_FloatRegister($src2$$reg));
16676   %}
16677   ins_pipe(vmul128);
16678 %}
16679 
16680 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16681 %{
16682   predicate(n->as_Vector()->length() == 2 ||
16683             n->as_Vector()->length() == 4);
16684   match(Set dst (MulVS src1 src2));
16685   ins_cost(INSN_COST);
16686   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16687   ins_encode %{
16688     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16689             as_FloatRegister($src1$$reg),
16690             as_FloatRegister($src2$$reg));
16691   %}
16692   ins_pipe(vmul64);
16693 %}
16694 
16695 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16696 %{
16697   predicate(n->as_Vector()->length() == 8);
16698   match(Set dst (MulVS src1 src2));
16699   ins_cost(INSN_COST);
16700   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16701   ins_encode %{
16702     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16703             as_FloatRegister($src1$$reg),
16704             as_FloatRegister($src2$$reg));
16705   %}
16706   ins_pipe(vmul128);
16707 %}
16708 
16709 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16710 %{
16711   predicate(n->as_Vector()->length() == 2);
16712   match(Set dst (MulVI src1 src2));
16713   ins_cost(INSN_COST);
16714   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16715   ins_encode %{
16716     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16717             as_FloatRegister($src1$$reg),
16718             as_FloatRegister($src2$$reg));
16719   %}
16720   ins_pipe(vmul64);
16721 %}
16722 
16723 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16724 %{
16725   predicate(n->as_Vector()->length() == 4);
16726   match(Set dst (MulVI src1 src2));
16727   ins_cost(INSN_COST);
16728   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16729   ins_encode %{
16730     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16731             as_FloatRegister($src1$$reg),
16732             as_FloatRegister($src2$$reg));
16733   %}
16734   ins_pipe(vmul128);
16735 %}
16736 
16737 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16738 %{
16739   predicate(n->as_Vector()->length() == 2);
16740   match(Set dst (MulVF src1 src2));
16741   ins_cost(INSN_COST);
16742   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16743   ins_encode %{
16744     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16745             as_FloatRegister($src1$$reg),
16746             as_FloatRegister($src2$$reg));
16747   %}
16748   ins_pipe(vmuldiv_fp64);
16749 %}
16750 
16751 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16752 %{
16753   predicate(n->as_Vector()->length() == 4);
16754   match(Set dst (MulVF src1 src2));
16755   ins_cost(INSN_COST);
16756   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16757   ins_encode %{
16758     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16759             as_FloatRegister($src1$$reg),
16760             as_FloatRegister($src2$$reg));
16761   %}
16762   ins_pipe(vmuldiv_fp128);
16763 %}
16764 
16765 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16766 %{
16767   predicate(n->as_Vector()->length() == 2);
16768   match(Set dst (MulVD src1 src2));
16769   ins_cost(INSN_COST);
16770   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16771   ins_encode %{
16772     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16773             as_FloatRegister($src1$$reg),
16774             as_FloatRegister($src2$$reg));
16775   %}
16776   ins_pipe(vmuldiv_fp128);
16777 %}
16778 
16779 // --------------------------------- MLA --------------------------------------
16780 
16781 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16782 %{
16783   predicate(n->as_Vector()->length() == 2 ||
16784             n->as_Vector()->length() == 4);
16785   match(Set dst (AddVS dst (MulVS src1 src2)));
16786   ins_cost(INSN_COST);
16787   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16788   ins_encode %{
16789     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16790             as_FloatRegister($src1$$reg),
16791             as_FloatRegister($src2$$reg));
16792   %}
16793   ins_pipe(vmla64);
16794 %}
16795 
16796 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16797 %{
16798   predicate(n->as_Vector()->length() == 8);
16799   match(Set dst (AddVS dst (MulVS src1 src2)));
16800   ins_cost(INSN_COST);
16801   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16802   ins_encode %{
16803     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16804             as_FloatRegister($src1$$reg),
16805             as_FloatRegister($src2$$reg));
16806   %}
16807   ins_pipe(vmla128);
16808 %}
16809 
16810 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16811 %{
16812   predicate(n->as_Vector()->length() == 2);
16813   match(Set dst (AddVI dst (MulVI src1 src2)));
16814   ins_cost(INSN_COST);
16815   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16816   ins_encode %{
16817     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16818             as_FloatRegister($src1$$reg),
16819             as_FloatRegister($src2$$reg));
16820   %}
16821   ins_pipe(vmla64);
16822 %}
16823 
16824 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16825 %{
16826   predicate(n->as_Vector()->length() == 4);
16827   match(Set dst (AddVI dst (MulVI src1 src2)));
16828   ins_cost(INSN_COST);
16829   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16830   ins_encode %{
16831     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16832             as_FloatRegister($src1$$reg),
16833             as_FloatRegister($src2$$reg));
16834   %}
16835   ins_pipe(vmla128);
16836 %}
16837 
16838 // dst + src1 * src2
16839 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16840   predicate(UseFMA && n->as_Vector()->length() == 2);
16841   match(Set dst (FmaVF  dst (Binary src1 src2)));
16842   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16843   ins_cost(INSN_COST);
16844   ins_encode %{
16845     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16846             as_FloatRegister($src1$$reg),
16847             as_FloatRegister($src2$$reg));
16848   %}
16849   ins_pipe(vmuldiv_fp64);
16850 %}
16851 
16852 // dst + src1 * src2
16853 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16854   predicate(UseFMA && n->as_Vector()->length() == 4);
16855   match(Set dst (FmaVF  dst (Binary src1 src2)));
16856   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16857   ins_cost(INSN_COST);
16858   ins_encode %{
16859     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16860             as_FloatRegister($src1$$reg),
16861             as_FloatRegister($src2$$reg));
16862   %}
16863   ins_pipe(vmuldiv_fp128);
16864 %}
16865 
16866 // dst + src1 * src2
16867 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16868   predicate(UseFMA && n->as_Vector()->length() == 2);
16869   match(Set dst (FmaVD  dst (Binary src1 src2)));
16870   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16871   ins_cost(INSN_COST);
16872   ins_encode %{
16873     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16874             as_FloatRegister($src1$$reg),
16875             as_FloatRegister($src2$$reg));
16876   %}
16877   ins_pipe(vmuldiv_fp128);
16878 %}
16879 
16880 // --------------------------------- MLS --------------------------------------
16881 
16882 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16883 %{
16884   predicate(n->as_Vector()->length() == 2 ||
16885             n->as_Vector()->length() == 4);
16886   match(Set dst (SubVS dst (MulVS src1 src2)));
16887   ins_cost(INSN_COST);
16888   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16889   ins_encode %{
16890     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16891             as_FloatRegister($src1$$reg),
16892             as_FloatRegister($src2$$reg));
16893   %}
16894   ins_pipe(vmla64);
16895 %}
16896 
16897 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16898 %{
16899   predicate(n->as_Vector()->length() == 8);
16900   match(Set dst (SubVS dst (MulVS src1 src2)));
16901   ins_cost(INSN_COST);
16902   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16903   ins_encode %{
16904     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16905             as_FloatRegister($src1$$reg),
16906             as_FloatRegister($src2$$reg));
16907   %}
16908   ins_pipe(vmla128);
16909 %}
16910 
16911 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16912 %{
16913   predicate(n->as_Vector()->length() == 2);
16914   match(Set dst (SubVI dst (MulVI src1 src2)));
16915   ins_cost(INSN_COST);
16916   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16917   ins_encode %{
16918     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16919             as_FloatRegister($src1$$reg),
16920             as_FloatRegister($src2$$reg));
16921   %}
16922   ins_pipe(vmla64);
16923 %}
16924 
16925 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16926 %{
16927   predicate(n->as_Vector()->length() == 4);
16928   match(Set dst (SubVI dst (MulVI src1 src2)));
16929   ins_cost(INSN_COST);
16930   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16931   ins_encode %{
16932     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16933             as_FloatRegister($src1$$reg),
16934             as_FloatRegister($src2$$reg));
16935   %}
16936   ins_pipe(vmla128);
16937 %}
16938 
16939 // dst - src1 * src2
16940 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16941   predicate(UseFMA && n->as_Vector()->length() == 2);
16942   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16943   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16944   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16945   ins_cost(INSN_COST);
16946   ins_encode %{
16947     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16948             as_FloatRegister($src1$$reg),
16949             as_FloatRegister($src2$$reg));
16950   %}
16951   ins_pipe(vmuldiv_fp64);
16952 %}
16953 
16954 // dst - src1 * src2
16955 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16956   predicate(UseFMA && n->as_Vector()->length() == 4);
16957   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16958   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16959   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16960   ins_cost(INSN_COST);
16961   ins_encode %{
16962     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16963             as_FloatRegister($src1$$reg),
16964             as_FloatRegister($src2$$reg));
16965   %}
16966   ins_pipe(vmuldiv_fp128);
16967 %}
16968 
16969 // dst - src1 * src2
16970 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16971   predicate(UseFMA && n->as_Vector()->length() == 2);
16972   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16973   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16974   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16975   ins_cost(INSN_COST);
16976   ins_encode %{
16977     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16978             as_FloatRegister($src1$$reg),
16979             as_FloatRegister($src2$$reg));
16980   %}
16981   ins_pipe(vmuldiv_fp128);
16982 %}
16983 
16984 // --------------- Vector Multiply-Add Shorts into Integer --------------------
16985 
16986 instruct vmuladdS2I(vecX dst, vecX src1, vecX src2, vecX tmp) %{
16987   predicate(n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16988   match(Set dst (MulAddVS2VI src1 src2));
16989   ins_cost(INSN_COST);
16990   effect(TEMP_DEF dst, TEMP tmp);
16991   format %{ "smullv  $tmp, $src1, $src2\t# vector (4H)\n\t"
16992             "smullv  $dst, $src1, $src2\t# vector (8H)\n\t"
16993             "addpv   $dst, $tmp, $dst\t# vector (4S)\n\t" %}
16994   ins_encode %{
16995     __ smullv(as_FloatRegister($tmp$$reg), __ T4H,
16996               as_FloatRegister($src1$$reg),
16997               as_FloatRegister($src2$$reg));
16998     __ smullv(as_FloatRegister($dst$$reg), __ T8H,
16999               as_FloatRegister($src1$$reg),
17000               as_FloatRegister($src2$$reg));
17001     __ addpv(as_FloatRegister($dst$$reg), __ T4S,
17002              as_FloatRegister($tmp$$reg),
17003              as_FloatRegister($dst$$reg));
17004   %}
17005   ins_pipe(vmuldiv_fp128);
17006 %}
17007 
17008 // --------------------------------- DIV --------------------------------------
17009 
17010 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
17011 %{
17012   predicate(n->as_Vector()->length() == 2);
17013   match(Set dst (DivVF src1 src2));
17014   ins_cost(INSN_COST);
17015   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
17016   ins_encode %{
17017     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17018             as_FloatRegister($src1$$reg),
17019             as_FloatRegister($src2$$reg));
17020   %}
17021   ins_pipe(vmuldiv_fp64);
17022 %}
17023 
17024 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17025 %{
17026   predicate(n->as_Vector()->length() == 4);
17027   match(Set dst (DivVF src1 src2));
17028   ins_cost(INSN_COST);
17029   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17030   ins_encode %{
17031     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17032             as_FloatRegister($src1$$reg),
17033             as_FloatRegister($src2$$reg));
17034   %}
17035   ins_pipe(vmuldiv_fp128);
17036 %}
17037 
17038 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17039 %{
17040   predicate(n->as_Vector()->length() == 2);
17041   match(Set dst (DivVD src1 src2));
17042   ins_cost(INSN_COST);
17043   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17044   ins_encode %{
17045     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17046             as_FloatRegister($src1$$reg),
17047             as_FloatRegister($src2$$reg));
17048   %}
17049   ins_pipe(vmuldiv_fp128);
17050 %}
17051 
17052 // --------------------------------- SQRT -------------------------------------
17053 
17054 instruct vsqrt2F(vecD dst, vecD src)
17055 %{
17056   predicate(n->as_Vector()->length() == 2);
17057   match(Set dst (SqrtVF src));
17058   format %{ "fsqrt  $dst, $src\t# vector (2F)" %}
17059   ins_encode %{
17060     __ fsqrt(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
17061   %}
17062   ins_pipe(vunop_fp64);
17063 %}
17064 
17065 instruct vsqrt4F(vecX dst, vecX src)
17066 %{
17067   predicate(n->as_Vector()->length() == 4);
17068   match(Set dst (SqrtVF src));
17069   format %{ "fsqrt  $dst, $src\t# vector (4F)" %}
17070   ins_encode %{
17071     __ fsqrt(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
17072   %}
17073   ins_pipe(vsqrt_fp128);
17074 %}
17075 
17076 instruct vsqrt2D(vecX dst, vecX src)
17077 %{
17078   predicate(n->as_Vector()->length() == 2);
17079   match(Set dst (SqrtVD src));
17080   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17081   ins_encode %{
17082     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17083              as_FloatRegister($src$$reg));
17084   %}
17085   ins_pipe(vsqrt_fp128);
17086 %}
17087 
17088 // --------------------------------- ABS --------------------------------------
17089 
17090 instruct vabs8B(vecD dst, vecD src)
17091 %{
17092   predicate(n->as_Vector()->length() == 4 ||
17093             n->as_Vector()->length() == 8);
17094   match(Set dst (AbsVB src));
17095   ins_cost(INSN_COST);
17096   format %{ "abs  $dst, $src\t# vector (8B)" %}
17097   ins_encode %{
17098     __ absr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
17099   %}
17100   ins_pipe(vlogical64);
17101 %}
17102 
17103 instruct vabs16B(vecX dst, vecX src)
17104 %{
17105   predicate(n->as_Vector()->length() == 16);
17106   match(Set dst (AbsVB src));
17107   ins_cost(INSN_COST);
17108   format %{ "abs  $dst, $src\t# vector (16B)" %}
17109   ins_encode %{
17110     __ absr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
17111   %}
17112   ins_pipe(vlogical128);
17113 %}
17114 
17115 instruct vabs4S(vecD dst, vecD src)
17116 %{
17117   predicate(n->as_Vector()->length() == 4);
17118   match(Set dst (AbsVS src));
17119   ins_cost(INSN_COST);
17120   format %{ "abs  $dst, $src\t# vector (4H)" %}
17121   ins_encode %{
17122     __ absr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg));
17123   %}
17124   ins_pipe(vlogical64);
17125 %}
17126 
17127 instruct vabs8S(vecX dst, vecX src)
17128 %{
17129   predicate(n->as_Vector()->length() == 8);
17130   match(Set dst (AbsVS src));
17131   ins_cost(INSN_COST);
17132   format %{ "abs  $dst, $src\t# vector (8H)" %}
17133   ins_encode %{
17134     __ absr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg));
17135   %}
17136   ins_pipe(vlogical128);
17137 %}
17138 
17139 instruct vabs2I(vecD dst, vecD src)
17140 %{
17141   predicate(n->as_Vector()->length() == 2);
17142   match(Set dst (AbsVI src));
17143   ins_cost(INSN_COST);
17144   format %{ "abs  $dst, $src\t# vector (2S)" %}
17145   ins_encode %{
17146     __ absr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
17147   %}
17148   ins_pipe(vlogical64);
17149 %}
17150 
17151 instruct vabs4I(vecX dst, vecX src)
17152 %{
17153   predicate(n->as_Vector()->length() == 4);
17154   match(Set dst (AbsVI src));
17155   ins_cost(INSN_COST);
17156   format %{ "abs  $dst, $src\t# vector (4S)" %}
17157   ins_encode %{
17158     __ absr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
17159   %}
17160   ins_pipe(vlogical128);
17161 %}
17162 
17163 instruct vabs2L(vecX dst, vecX src)
17164 %{
17165   predicate(n->as_Vector()->length() == 2);
17166   match(Set dst (AbsVL src));
17167   ins_cost(INSN_COST);
17168   format %{ "abs  $dst, $src\t# vector (2D)" %}
17169   ins_encode %{
17170     __ absr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
17171   %}
17172   ins_pipe(vlogical128);
17173 %}
17174 
17175 instruct vabs2F(vecD dst, vecD src)
17176 %{
17177   predicate(n->as_Vector()->length() == 2);
17178   match(Set dst (AbsVF src));
17179   ins_cost(INSN_COST * 3);
17180   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17181   ins_encode %{
17182     __ fabs(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
17183   %}
17184   ins_pipe(vunop_fp64);
17185 %}
17186 
17187 instruct vabs4F(vecX dst, vecX src)
17188 %{
17189   predicate(n->as_Vector()->length() == 4);
17190   match(Set dst (AbsVF src));
17191   ins_cost(INSN_COST * 3);
17192   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17193   ins_encode %{
17194     __ fabs(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
17195   %}
17196   ins_pipe(vunop_fp128);
17197 %}
17198 
17199 instruct vabs2D(vecX dst, vecX src)
17200 %{
17201   predicate(n->as_Vector()->length() == 2);
17202   match(Set dst (AbsVD src));
17203   ins_cost(INSN_COST * 3);
17204   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17205   ins_encode %{
17206     __ fabs(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
17207   %}
17208   ins_pipe(vunop_fp128);
17209 %}
17210 
17211 // --------------------------------- NEG --------------------------------------
17212 
17213 instruct vneg2F(vecD dst, vecD src)
17214 %{
17215   predicate(n->as_Vector()->length() == 2);
17216   match(Set dst (NegVF src));
17217   ins_cost(INSN_COST * 3);
17218   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17219   ins_encode %{
17220     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17221             as_FloatRegister($src$$reg));
17222   %}
17223   ins_pipe(vunop_fp64);
17224 %}
17225 
17226 instruct vneg4F(vecX dst, vecX src)
17227 %{
17228   predicate(n->as_Vector()->length() == 4);
17229   match(Set dst (NegVF src));
17230   ins_cost(INSN_COST * 3);
17231   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17232   ins_encode %{
17233     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17234             as_FloatRegister($src$$reg));
17235   %}
17236   ins_pipe(vunop_fp128);
17237 %}
17238 
17239 instruct vneg2D(vecX dst, vecX src)
17240 %{
17241   predicate(n->as_Vector()->length() == 2);
17242   match(Set dst (NegVD src));
17243   ins_cost(INSN_COST * 3);
17244   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17245   ins_encode %{
17246     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17247             as_FloatRegister($src$$reg));
17248   %}
17249   ins_pipe(vunop_fp128);
17250 %}
17251 
17252 // --------------------------------- AND --------------------------------------
17253 
17254 instruct vand8B(vecD dst, vecD src1, vecD src2)
17255 %{
17256   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17257             n->as_Vector()->length_in_bytes() == 8);
17258   match(Set dst (AndV src1 src2));
17259   ins_cost(INSN_COST);
17260   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17261   ins_encode %{
17262     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17263             as_FloatRegister($src1$$reg),
17264             as_FloatRegister($src2$$reg));
17265   %}
17266   ins_pipe(vlogical64);
17267 %}
17268 
17269 instruct vand16B(vecX dst, vecX src1, vecX src2)
17270 %{
17271   predicate(n->as_Vector()->length_in_bytes() == 16);
17272   match(Set dst (AndV src1 src2));
17273   ins_cost(INSN_COST);
17274   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17275   ins_encode %{
17276     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17277             as_FloatRegister($src1$$reg),
17278             as_FloatRegister($src2$$reg));
17279   %}
17280   ins_pipe(vlogical128);
17281 %}
17282 
17283 // --------------------------------- OR ---------------------------------------
17284 
17285 instruct vor8B(vecD dst, vecD src1, vecD src2)
17286 %{
17287   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17288             n->as_Vector()->length_in_bytes() == 8);
17289   match(Set dst (OrV src1 src2));
17290   ins_cost(INSN_COST);
17291   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17292   ins_encode %{
17293     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17294             as_FloatRegister($src1$$reg),
17295             as_FloatRegister($src2$$reg));
17296   %}
17297   ins_pipe(vlogical64);
17298 %}
17299 
17300 instruct vor16B(vecX dst, vecX src1, vecX src2)
17301 %{
17302   predicate(n->as_Vector()->length_in_bytes() == 16);
17303   match(Set dst (OrV src1 src2));
17304   ins_cost(INSN_COST);
17305   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17306   ins_encode %{
17307     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17308             as_FloatRegister($src1$$reg),
17309             as_FloatRegister($src2$$reg));
17310   %}
17311   ins_pipe(vlogical128);
17312 %}
17313 
17314 // --------------------------------- XOR --------------------------------------
17315 
17316 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17317 %{
17318   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17319             n->as_Vector()->length_in_bytes() == 8);
17320   match(Set dst (XorV src1 src2));
17321   ins_cost(INSN_COST);
17322   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17323   ins_encode %{
17324     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17325             as_FloatRegister($src1$$reg),
17326             as_FloatRegister($src2$$reg));
17327   %}
17328   ins_pipe(vlogical64);
17329 %}
17330 
17331 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17332 %{
17333   predicate(n->as_Vector()->length_in_bytes() == 16);
17334   match(Set dst (XorV src1 src2));
17335   ins_cost(INSN_COST);
17336   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17337   ins_encode %{
17338     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17339             as_FloatRegister($src1$$reg),
17340             as_FloatRegister($src2$$reg));
17341   %}
17342   ins_pipe(vlogical128);
17343 %}
17344 
17345 // ------------------------------ Shift ---------------------------------------
17346 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
17347   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17348             n->as_Vector()->length_in_bytes() == 8);
17349   match(Set dst (LShiftCntV cnt));
17350   match(Set dst (RShiftCntV cnt));
17351   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
17352   ins_encode %{
17353     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
17354   %}
17355   ins_pipe(vdup_reg_reg64);
17356 %}
17357 
17358 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
17359   predicate(n->as_Vector()->length_in_bytes() == 16);
17360   match(Set dst (LShiftCntV cnt));
17361   match(Set dst (RShiftCntV cnt));
17362   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
17363   ins_encode %{
17364     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17365   %}
17366   ins_pipe(vdup_reg_reg128);
17367 %}
17368 
17369 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
17370   predicate(n->as_Vector()->length() == 4 ||
17371             n->as_Vector()->length() == 8);
17372   match(Set dst (LShiftVB src shift));
17373   ins_cost(INSN_COST);
17374   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17375   ins_encode %{
17376     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17377             as_FloatRegister($src$$reg),
17378             as_FloatRegister($shift$$reg));
17379   %}
17380   ins_pipe(vshift64);
17381 %}
17382 
17383 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17384   predicate(n->as_Vector()->length() == 16);
17385   match(Set dst (LShiftVB src shift));
17386   ins_cost(INSN_COST);
17387   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17388   ins_encode %{
17389     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17390             as_FloatRegister($src$$reg),
17391             as_FloatRegister($shift$$reg));
17392   %}
17393   ins_pipe(vshift128);
17394 %}
17395 
17396 // Right shifts with vector shift count on aarch64 SIMD are implemented
17397 // as left shift by negative shift count.
17398 // There are two cases for vector shift count.
17399 //
17400 // Case 1: The vector shift count is from replication.
17401 //        |            |
17402 //    LoadVector  RShiftCntV
17403 //        |       /
17404 //     RShiftVI
17405 // Note: In inner loop, multiple neg instructions are used, which can be
17406 // moved to outer loop and merge into one neg instruction.
17407 //
17408 // Case 2: The vector shift count is from loading.
17409 // This case isn't supported by middle-end now. But it's supported by
17410 // panama/vectorIntrinsics(JEP 338: Vector API).
17411 //        |            |
17412 //    LoadVector  LoadVector
17413 //        |       /
17414 //     RShiftVI
17415 //
17416 
17417 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17418   predicate(n->as_Vector()->length() == 4 ||
17419             n->as_Vector()->length() == 8);
17420   match(Set dst (RShiftVB src shift));
17421   ins_cost(INSN_COST);
17422   effect(TEMP tmp);
17423   format %{ "negr  $tmp,$shift\t"
17424             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
17425   ins_encode %{
17426     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17427             as_FloatRegister($shift$$reg));
17428     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17429             as_FloatRegister($src$$reg),
17430             as_FloatRegister($tmp$$reg));
17431   %}
17432   ins_pipe(vshift64);
17433 %}
17434 
17435 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17436   predicate(n->as_Vector()->length() == 16);
17437   match(Set dst (RShiftVB src shift));
17438   ins_cost(INSN_COST);
17439   effect(TEMP tmp);
17440   format %{ "negr  $tmp,$shift\t"
17441             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
17442   ins_encode %{
17443     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17444             as_FloatRegister($shift$$reg));
17445     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17446             as_FloatRegister($src$$reg),
17447             as_FloatRegister($tmp$$reg));
17448   %}
17449   ins_pipe(vshift128);
17450 %}
17451 
17452 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17453   predicate(n->as_Vector()->length() == 4 ||
17454             n->as_Vector()->length() == 8);
17455   match(Set dst (URShiftVB src shift));
17456   ins_cost(INSN_COST);
17457   effect(TEMP tmp);
17458   format %{ "negr  $tmp,$shift\t"
17459             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
17460   ins_encode %{
17461     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17462             as_FloatRegister($shift$$reg));
17463     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17464             as_FloatRegister($src$$reg),
17465             as_FloatRegister($tmp$$reg));
17466   %}
17467   ins_pipe(vshift64);
17468 %}
17469 
17470 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17471   predicate(n->as_Vector()->length() == 16);
17472   match(Set dst (URShiftVB src shift));
17473   ins_cost(INSN_COST);
17474   effect(TEMP tmp);
17475   format %{ "negr  $tmp,$shift\t"
17476             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
17477   ins_encode %{
17478     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17479             as_FloatRegister($shift$$reg));
17480     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17481             as_FloatRegister($src$$reg),
17482             as_FloatRegister($tmp$$reg));
17483   %}
17484   ins_pipe(vshift128);
17485 %}
17486 
17487 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17488   predicate(n->as_Vector()->length() == 4 ||
17489             n->as_Vector()->length() == 8);
17490   match(Set dst (LShiftVB src (LShiftCntV shift)));
17491   ins_cost(INSN_COST);
17492   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17493   ins_encode %{
17494     int sh = (int)$shift$$constant;
17495     if (sh >= 8) {
17496       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17497              as_FloatRegister($src$$reg),
17498              as_FloatRegister($src$$reg));
17499     } else {
17500       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17501              as_FloatRegister($src$$reg), sh);
17502     }
17503   %}
17504   ins_pipe(vshift64_imm);
17505 %}
17506 
17507 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17508   predicate(n->as_Vector()->length() == 16);
17509   match(Set dst (LShiftVB src (LShiftCntV shift)));
17510   ins_cost(INSN_COST);
17511   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17512   ins_encode %{
17513     int sh = (int)$shift$$constant;
17514     if (sh >= 8) {
17515       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17516              as_FloatRegister($src$$reg),
17517              as_FloatRegister($src$$reg));
17518     } else {
17519       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17520              as_FloatRegister($src$$reg), sh);
17521     }
17522   %}
17523   ins_pipe(vshift128_imm);
17524 %}
17525 
17526 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17527   predicate(n->as_Vector()->length() == 4 ||
17528             n->as_Vector()->length() == 8);
17529   match(Set dst (RShiftVB src (RShiftCntV shift)));
17530   ins_cost(INSN_COST);
17531   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17532   ins_encode %{
17533     int sh = (int)$shift$$constant;
17534     if (sh >= 8) sh = 7;
17535     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17536            as_FloatRegister($src$$reg), sh);
17537   %}
17538   ins_pipe(vshift64_imm);
17539 %}
17540 
17541 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17542   predicate(n->as_Vector()->length() == 16);
17543   match(Set dst (RShiftVB src (RShiftCntV shift)));
17544   ins_cost(INSN_COST);
17545   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17546   ins_encode %{
17547     int sh = (int)$shift$$constant;
17548     if (sh >= 8) sh = 7;
17549     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17550            as_FloatRegister($src$$reg), sh);
17551   %}
17552   ins_pipe(vshift128_imm);
17553 %}
17554 
17555 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17556   predicate(n->as_Vector()->length() == 4 ||
17557             n->as_Vector()->length() == 8);
17558   match(Set dst (URShiftVB src (RShiftCntV shift)));
17559   ins_cost(INSN_COST);
17560   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17561   ins_encode %{
17562     int sh = (int)$shift$$constant;
17563     if (sh >= 8) {
17564       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17565              as_FloatRegister($src$$reg),
17566              as_FloatRegister($src$$reg));
17567     } else {
17568       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17569              as_FloatRegister($src$$reg), sh);
17570     }
17571   %}
17572   ins_pipe(vshift64_imm);
17573 %}
17574 
17575 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17576   predicate(n->as_Vector()->length() == 16);
17577   match(Set dst (URShiftVB src (RShiftCntV shift)));
17578   ins_cost(INSN_COST);
17579   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17580   ins_encode %{
17581     int sh = (int)$shift$$constant;
17582     if (sh >= 8) {
17583       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17584              as_FloatRegister($src$$reg),
17585              as_FloatRegister($src$$reg));
17586     } else {
17587       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17588              as_FloatRegister($src$$reg), sh);
17589     }
17590   %}
17591   ins_pipe(vshift128_imm);
17592 %}
17593 
17594 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
17595   predicate(n->as_Vector()->length() == 2 ||
17596             n->as_Vector()->length() == 4);
17597   match(Set dst (LShiftVS src shift));
17598   ins_cost(INSN_COST);
17599   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17600   ins_encode %{
17601     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17602             as_FloatRegister($src$$reg),
17603             as_FloatRegister($shift$$reg));
17604   %}
17605   ins_pipe(vshift64);
17606 %}
17607 
17608 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17609   predicate(n->as_Vector()->length() == 8);
17610   match(Set dst (LShiftVS src shift));
17611   ins_cost(INSN_COST);
17612   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17613   ins_encode %{
17614     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17615             as_FloatRegister($src$$reg),
17616             as_FloatRegister($shift$$reg));
17617   %}
17618   ins_pipe(vshift128);
17619 %}
17620 
17621 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17622   predicate(n->as_Vector()->length() == 2 ||
17623             n->as_Vector()->length() == 4);
17624   match(Set dst (RShiftVS src shift));
17625   ins_cost(INSN_COST);
17626   effect(TEMP tmp);
17627   format %{ "negr  $tmp,$shift\t"
17628             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
17629   ins_encode %{
17630     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17631             as_FloatRegister($shift$$reg));
17632     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17633             as_FloatRegister($src$$reg),
17634             as_FloatRegister($tmp$$reg));
17635   %}
17636   ins_pipe(vshift64);
17637 %}
17638 
17639 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17640   predicate(n->as_Vector()->length() == 8);
17641   match(Set dst (RShiftVS src shift));
17642   ins_cost(INSN_COST);
17643   effect(TEMP tmp);
17644   format %{ "negr  $tmp,$shift\t"
17645             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
17646   ins_encode %{
17647     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17648             as_FloatRegister($shift$$reg));
17649     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17650             as_FloatRegister($src$$reg),
17651             as_FloatRegister($tmp$$reg));
17652   %}
17653   ins_pipe(vshift128);
17654 %}
17655 
17656 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17657   predicate(n->as_Vector()->length() == 2 ||
17658             n->as_Vector()->length() == 4);
17659   match(Set dst (URShiftVS src shift));
17660   ins_cost(INSN_COST);
17661   effect(TEMP tmp);
17662   format %{ "negr  $tmp,$shift\t"
17663             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
17664   ins_encode %{
17665     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17666             as_FloatRegister($shift$$reg));
17667     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17668             as_FloatRegister($src$$reg),
17669             as_FloatRegister($tmp$$reg));
17670   %}
17671   ins_pipe(vshift64);
17672 %}
17673 
17674 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17675   predicate(n->as_Vector()->length() == 8);
17676   match(Set dst (URShiftVS src shift));
17677   ins_cost(INSN_COST);
17678   effect(TEMP tmp);
17679   format %{ "negr  $tmp,$shift\t"
17680             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
17681   ins_encode %{
17682     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17683             as_FloatRegister($shift$$reg));
17684     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17685             as_FloatRegister($src$$reg),
17686             as_FloatRegister($tmp$$reg));
17687   %}
17688   ins_pipe(vshift128);
17689 %}
17690 
17691 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17692   predicate(n->as_Vector()->length() == 2 ||
17693             n->as_Vector()->length() == 4);
17694   match(Set dst (LShiftVS src (LShiftCntV shift)));
17695   ins_cost(INSN_COST);
17696   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17697   ins_encode %{
17698     int sh = (int)$shift$$constant;
17699     if (sh >= 16) {
17700       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17701              as_FloatRegister($src$$reg),
17702              as_FloatRegister($src$$reg));
17703     } else {
17704       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17705              as_FloatRegister($src$$reg), sh);
17706     }
17707   %}
17708   ins_pipe(vshift64_imm);
17709 %}
17710 
17711 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17712   predicate(n->as_Vector()->length() == 8);
17713   match(Set dst (LShiftVS src (LShiftCntV shift)));
17714   ins_cost(INSN_COST);
17715   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17716   ins_encode %{
17717     int sh = (int)$shift$$constant;
17718     if (sh >= 16) {
17719       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17720              as_FloatRegister($src$$reg),
17721              as_FloatRegister($src$$reg));
17722     } else {
17723       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17724              as_FloatRegister($src$$reg), sh);
17725     }
17726   %}
17727   ins_pipe(vshift128_imm);
17728 %}
17729 
17730 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17731   predicate(n->as_Vector()->length() == 2 ||
17732             n->as_Vector()->length() == 4);
17733   match(Set dst (RShiftVS src (RShiftCntV shift)));
17734   ins_cost(INSN_COST);
17735   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17736   ins_encode %{
17737     int sh = (int)$shift$$constant;
17738     if (sh >= 16) sh = 15;
17739     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17740            as_FloatRegister($src$$reg), sh);
17741   %}
17742   ins_pipe(vshift64_imm);
17743 %}
17744 
17745 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17746   predicate(n->as_Vector()->length() == 8);
17747   match(Set dst (RShiftVS src (RShiftCntV shift)));
17748   ins_cost(INSN_COST);
17749   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17750   ins_encode %{
17751     int sh = (int)$shift$$constant;
17752     if (sh >= 16) sh = 15;
17753     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17754            as_FloatRegister($src$$reg), sh);
17755   %}
17756   ins_pipe(vshift128_imm);
17757 %}
17758 
17759 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17760   predicate(n->as_Vector()->length() == 2 ||
17761             n->as_Vector()->length() == 4);
17762   match(Set dst (URShiftVS src (RShiftCntV shift)));
17763   ins_cost(INSN_COST);
17764   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17765   ins_encode %{
17766     int sh = (int)$shift$$constant;
17767     if (sh >= 16) {
17768       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17769              as_FloatRegister($src$$reg),
17770              as_FloatRegister($src$$reg));
17771     } else {
17772       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17773              as_FloatRegister($src$$reg), sh);
17774     }
17775   %}
17776   ins_pipe(vshift64_imm);
17777 %}
17778 
17779 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17780   predicate(n->as_Vector()->length() == 8);
17781   match(Set dst (URShiftVS src (RShiftCntV shift)));
17782   ins_cost(INSN_COST);
17783   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17784   ins_encode %{
17785     int sh = (int)$shift$$constant;
17786     if (sh >= 16) {
17787       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17788              as_FloatRegister($src$$reg),
17789              as_FloatRegister($src$$reg));
17790     } else {
17791       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17792              as_FloatRegister($src$$reg), sh);
17793     }
17794   %}
17795   ins_pipe(vshift128_imm);
17796 %}
17797 
17798 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
17799   predicate(n->as_Vector()->length() == 2);
17800   match(Set dst (LShiftVI src shift));
17801   ins_cost(INSN_COST);
17802   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17803   ins_encode %{
17804     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17805             as_FloatRegister($src$$reg),
17806             as_FloatRegister($shift$$reg));
17807   %}
17808   ins_pipe(vshift64);
17809 %}
17810 
17811 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17812   predicate(n->as_Vector()->length() == 4);
17813   match(Set dst (LShiftVI src shift));
17814   ins_cost(INSN_COST);
17815   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17816   ins_encode %{
17817     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17818             as_FloatRegister($src$$reg),
17819             as_FloatRegister($shift$$reg));
17820   %}
17821   ins_pipe(vshift128);
17822 %}
17823 
17824 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17825   predicate(n->as_Vector()->length() == 2);
17826   match(Set dst (RShiftVI src shift));
17827   ins_cost(INSN_COST);
17828   effect(TEMP tmp);
17829   format %{ "negr  $tmp,$shift\t"
17830             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
17831   ins_encode %{
17832     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17833             as_FloatRegister($shift$$reg));
17834     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17835             as_FloatRegister($src$$reg),
17836             as_FloatRegister($tmp$$reg));
17837   %}
17838   ins_pipe(vshift64);
17839 %}
17840 
17841 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17842   predicate(n->as_Vector()->length() == 4);
17843   match(Set dst (RShiftVI src shift));
17844   ins_cost(INSN_COST);
17845   effect(TEMP tmp);
17846   format %{ "negr  $tmp,$shift\t"
17847             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
17848   ins_encode %{
17849     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17850             as_FloatRegister($shift$$reg));
17851     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17852             as_FloatRegister($src$$reg),
17853             as_FloatRegister($tmp$$reg));
17854   %}
17855   ins_pipe(vshift128);
17856 %}
17857 
17858 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17859   predicate(n->as_Vector()->length() == 2);
17860   match(Set dst (URShiftVI src shift));
17861   ins_cost(INSN_COST);
17862   effect(TEMP tmp);
17863   format %{ "negr  $tmp,$shift\t"
17864             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
17865   ins_encode %{
17866     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17867             as_FloatRegister($shift$$reg));
17868     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17869             as_FloatRegister($src$$reg),
17870             as_FloatRegister($tmp$$reg));
17871   %}
17872   ins_pipe(vshift64);
17873 %}
17874 
17875 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17876   predicate(n->as_Vector()->length() == 4);
17877   match(Set dst (URShiftVI src shift));
17878   ins_cost(INSN_COST);
17879   effect(TEMP tmp);
17880   format %{ "negr  $tmp,$shift\t"
17881             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
17882   ins_encode %{
17883     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17884             as_FloatRegister($shift$$reg));
17885     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17886             as_FloatRegister($src$$reg),
17887             as_FloatRegister($tmp$$reg));
17888   %}
17889   ins_pipe(vshift128);
17890 %}
17891 
17892 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17893   predicate(n->as_Vector()->length() == 2);
17894   match(Set dst (LShiftVI src (LShiftCntV shift)));
17895   ins_cost(INSN_COST);
17896   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17897   ins_encode %{
17898     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17899            as_FloatRegister($src$$reg),
17900            (int)$shift$$constant);
17901   %}
17902   ins_pipe(vshift64_imm);
17903 %}
17904 
17905 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17906   predicate(n->as_Vector()->length() == 4);
17907   match(Set dst (LShiftVI src (LShiftCntV shift)));
17908   ins_cost(INSN_COST);
17909   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17910   ins_encode %{
17911     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17912            as_FloatRegister($src$$reg),
17913            (int)$shift$$constant);
17914   %}
17915   ins_pipe(vshift128_imm);
17916 %}
17917 
17918 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17919   predicate(n->as_Vector()->length() == 2);
17920   match(Set dst (RShiftVI src (RShiftCntV shift)));
17921   ins_cost(INSN_COST);
17922   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17923   ins_encode %{
17924     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17925             as_FloatRegister($src$$reg),
17926             (int)$shift$$constant);
17927   %}
17928   ins_pipe(vshift64_imm);
17929 %}
17930 
17931 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17932   predicate(n->as_Vector()->length() == 4);
17933   match(Set dst (RShiftVI src (RShiftCntV shift)));
17934   ins_cost(INSN_COST);
17935   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17936   ins_encode %{
17937     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17938             as_FloatRegister($src$$reg),
17939             (int)$shift$$constant);
17940   %}
17941   ins_pipe(vshift128_imm);
17942 %}
17943 
17944 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17945   predicate(n->as_Vector()->length() == 2);
17946   match(Set dst (URShiftVI src (RShiftCntV shift)));
17947   ins_cost(INSN_COST);
17948   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17949   ins_encode %{
17950     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17951             as_FloatRegister($src$$reg),
17952             (int)$shift$$constant);
17953   %}
17954   ins_pipe(vshift64_imm);
17955 %}
17956 
17957 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17958   predicate(n->as_Vector()->length() == 4);
17959   match(Set dst (URShiftVI src (RShiftCntV shift)));
17960   ins_cost(INSN_COST);
17961   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17962   ins_encode %{
17963     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17964             as_FloatRegister($src$$reg),
17965             (int)$shift$$constant);
17966   %}
17967   ins_pipe(vshift128_imm);
17968 %}
17969 
17970 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17971   predicate(n->as_Vector()->length() == 2);
17972   match(Set dst (LShiftVL src shift));
17973   ins_cost(INSN_COST);
17974   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17975   ins_encode %{
17976     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17977             as_FloatRegister($src$$reg),
17978             as_FloatRegister($shift$$reg));
17979   %}
17980   ins_pipe(vshift128);
17981 %}
17982 
17983 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17984   predicate(n->as_Vector()->length() == 2);
17985   match(Set dst (RShiftVL src shift));
17986   ins_cost(INSN_COST);
17987   effect(TEMP tmp);
17988   format %{ "negr  $tmp,$shift\t"
17989             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
17990   ins_encode %{
17991     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17992             as_FloatRegister($shift$$reg));
17993     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17994             as_FloatRegister($src$$reg),
17995             as_FloatRegister($tmp$$reg));
17996   %}
17997   ins_pipe(vshift128);
17998 %}
17999 
18000 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
18001   predicate(n->as_Vector()->length() == 2);
18002   match(Set dst (URShiftVL src shift));
18003   ins_cost(INSN_COST);
18004   effect(TEMP tmp);
18005   format %{ "negr  $tmp,$shift\t"
18006             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
18007   ins_encode %{
18008     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
18009             as_FloatRegister($shift$$reg));
18010     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
18011             as_FloatRegister($src$$reg),
18012             as_FloatRegister($tmp$$reg));
18013   %}
18014   ins_pipe(vshift128);
18015 %}
18016 
18017 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
18018   predicate(n->as_Vector()->length() == 2);
18019   match(Set dst (LShiftVL src (LShiftCntV shift)));
18020   ins_cost(INSN_COST);
18021   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
18022   ins_encode %{
18023     __ shl(as_FloatRegister($dst$$reg), __ T2D,
18024            as_FloatRegister($src$$reg),
18025            (int)$shift$$constant);
18026   %}
18027   ins_pipe(vshift128_imm);
18028 %}
18029 
18030 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
18031   predicate(n->as_Vector()->length() == 2);
18032   match(Set dst (RShiftVL src (RShiftCntV shift)));
18033   ins_cost(INSN_COST);
18034   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
18035   ins_encode %{
18036     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
18037             as_FloatRegister($src$$reg),
18038             (int)$shift$$constant);
18039   %}
18040   ins_pipe(vshift128_imm);
18041 %}
18042 
18043 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
18044   predicate(n->as_Vector()->length() == 2);
18045   match(Set dst (URShiftVL src (RShiftCntV shift)));
18046   ins_cost(INSN_COST);
18047   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
18048   ins_encode %{
18049     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
18050             as_FloatRegister($src$$reg),
18051             (int)$shift$$constant);
18052   %}
18053   ins_pipe(vshift128_imm);
18054 %}
18055 
18056 instruct vmax2F(vecD dst, vecD src1, vecD src2)
18057 %{
18058   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18059   match(Set dst (MaxV src1 src2));
18060   ins_cost(INSN_COST);
18061   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
18062   ins_encode %{
18063     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
18064             as_FloatRegister($src1$$reg),
18065             as_FloatRegister($src2$$reg));
18066   %}
18067   ins_pipe(vdop_fp64);
18068 %}
18069 
18070 instruct vmax4F(vecX dst, vecX src1, vecX src2)
18071 %{
18072   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18073   match(Set dst (MaxV src1 src2));
18074   ins_cost(INSN_COST);
18075   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
18076   ins_encode %{
18077     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
18078             as_FloatRegister($src1$$reg),
18079             as_FloatRegister($src2$$reg));
18080   %}
18081   ins_pipe(vdop_fp128);
18082 %}
18083 
18084 instruct vmax2D(vecX dst, vecX src1, vecX src2)
18085 %{
18086   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18087   match(Set dst (MaxV src1 src2));
18088   ins_cost(INSN_COST);
18089   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
18090   ins_encode %{
18091     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
18092             as_FloatRegister($src1$$reg),
18093             as_FloatRegister($src2$$reg));
18094   %}
18095   ins_pipe(vdop_fp128);
18096 %}
18097 
18098 instruct vmin2F(vecD dst, vecD src1, vecD src2)
18099 %{
18100   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18101   match(Set dst (MinV src1 src2));
18102   ins_cost(INSN_COST);
18103   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
18104   ins_encode %{
18105     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
18106             as_FloatRegister($src1$$reg),
18107             as_FloatRegister($src2$$reg));
18108   %}
18109   ins_pipe(vdop_fp64);
18110 %}
18111 
18112 instruct vmin4F(vecX dst, vecX src1, vecX src2)
18113 %{
18114   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18115   match(Set dst (MinV src1 src2));
18116   ins_cost(INSN_COST);
18117   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
18118   ins_encode %{
18119     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
18120             as_FloatRegister($src1$$reg),
18121             as_FloatRegister($src2$$reg));
18122   %}
18123   ins_pipe(vdop_fp128);
18124 %}
18125 
18126 instruct vmin2D(vecX dst, vecX src1, vecX src2)
18127 %{
18128   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18129   match(Set dst (MinV src1 src2));
18130   ins_cost(INSN_COST);
18131   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
18132   ins_encode %{
18133     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
18134             as_FloatRegister($src1$$reg),
18135             as_FloatRegister($src2$$reg));
18136   %}
18137   ins_pipe(vdop_fp128);
18138 %}
18139 
18140 instruct vround2D_reg(vecX dst, vecX src, immI rmode) %{
18141   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18142   match(Set dst (RoundDoubleModeV src rmode));
18143   format %{ "frint  $dst, $src, $rmode" %}
18144   ins_encode %{
18145     switch ($rmode$$constant) {
18146       case RoundDoubleModeNode::rmode_rint:
18147         __ frintn(as_FloatRegister($dst$$reg), __ T2D,
18148                   as_FloatRegister($src$$reg));
18149         break;
18150       case RoundDoubleModeNode::rmode_floor:
18151         __ frintm(as_FloatRegister($dst$$reg), __ T2D,
18152                   as_FloatRegister($src$$reg));
18153         break;
18154       case RoundDoubleModeNode::rmode_ceil:
18155         __ frintp(as_FloatRegister($dst$$reg), __ T2D,
18156                   as_FloatRegister($src$$reg));
18157         break;
18158     }
18159   %}
18160   ins_pipe(vdop_fp128);
18161 %}
18162 
18163 instruct vpopcount4I(vecX dst, vecX src) %{
18164   predicate(UsePopCountInstruction && n->as_Vector()->length() == 4);
18165   match(Set dst (PopCountVI src));
18166   format %{
18167     "cnt     $dst, $src\t# vector (16B)\n\t"
18168     "uaddlp  $dst, $dst\t# vector (16B)\n\t"
18169     "uaddlp  $dst, $dst\t# vector (8H)"
18170   %}
18171   ins_encode %{
18172     __ cnt(as_FloatRegister($dst$$reg), __ T16B,
18173            as_FloatRegister($src$$reg));
18174     __ uaddlp(as_FloatRegister($dst$$reg), __ T16B,
18175               as_FloatRegister($dst$$reg));
18176     __ uaddlp(as_FloatRegister($dst$$reg), __ T8H,
18177               as_FloatRegister($dst$$reg));
18178   %}
18179   ins_pipe(pipe_class_default);
18180 %}
18181 
18182 instruct vpopcount2I(vecD dst, vecD src) %{
18183   predicate(UsePopCountInstruction && n->as_Vector()->length() == 2);
18184   match(Set dst (PopCountVI src));
18185   format %{
18186     "cnt     $dst, $src\t# vector (8B)\n\t"
18187     "uaddlp  $dst, $dst\t# vector (8B)\n\t"
18188     "uaddlp  $dst, $dst\t# vector (4H)"
18189   %}
18190   ins_encode %{
18191     __ cnt(as_FloatRegister($dst$$reg), __ T8B,
18192            as_FloatRegister($src$$reg));
18193     __ uaddlp(as_FloatRegister($dst$$reg), __ T8B,
18194               as_FloatRegister($dst$$reg));
18195     __ uaddlp(as_FloatRegister($dst$$reg), __ T4H,
18196               as_FloatRegister($dst$$reg));
18197   %}
18198   ins_pipe(pipe_class_default);
18199 %}
18200 
18201 // BEGIN This section of the file is automatically generated. Do not edit --------------
18202 // This section is generated from aarch64_neon_ad.m4
18203 
18204 // ====================VECTOR INSTRUCTIONS==================================
18205 
18206 // ------------------------------ Load/store/reinterpret -----------------------
18207 
18208 // Load vector (16 bits)
18209 instruct loadV2(vecD dst, memory mem)
18210 %{
18211   predicate(n->as_LoadVector()->memory_size() == 2);
18212   match(Set dst (LoadVector mem));
18213   ins_cost(4 * INSN_COST);
18214   format %{ "ldrh   $dst,$mem\t# vector (16 bits)" %}
18215   ins_encode( aarch64_enc_ldrvH(dst, mem) );
18216   ins_pipe(vload_reg_mem64);
18217 %}
18218 
18219 // Store Vector (16 bits)
18220 instruct storeV2(vecD src, memory mem)
18221 %{
18222   predicate(n->as_StoreVector()->memory_size() == 2);
18223   match(Set mem (StoreVector mem src));
18224   ins_cost(4 * INSN_COST);
18225   format %{ "strh   $mem,$src\t# vector (16 bits)" %}
18226   ins_encode( aarch64_enc_strvH(src, mem) );
18227   ins_pipe(vstore_reg_mem64);
18228 %}
18229 
18230 instruct reinterpretD(vecD dst)
18231 %{
18232   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 8 &&
18233             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 8);
18234   match(Set dst (VectorReinterpret dst));
18235   ins_cost(0);
18236   format %{ " # reinterpret $dst" %}
18237   ins_encode %{
18238     // empty
18239   %}
18240   ins_pipe(pipe_class_empty);
18241 %}
18242 
18243 instruct reinterpretX(vecX dst)
18244 %{
18245   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 16 &&
18246             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 16);
18247   match(Set dst (VectorReinterpret dst));
18248   ins_cost(0);
18249   format %{ " # reinterpret $dst" %}
18250   ins_encode %{
18251     // empty
18252   %}
18253   ins_pipe(pipe_class_empty);
18254 %}
18255 
18256 instruct reinterpretD2X(vecX dst, vecD src)
18257 %{
18258   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 16 &&
18259             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 8);
18260   match(Set dst (VectorReinterpret src));
18261   ins_cost(INSN_COST);
18262   format %{ " # reinterpret $dst,$src" %}
18263   ins_encode %{
18264     // If register is the same, then move is not needed.
18265     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
18266       __ orr(as_FloatRegister($dst$$reg), __ T8B,
18267              as_FloatRegister($src$$reg),
18268              as_FloatRegister($src$$reg));
18269     }
18270   %}
18271   ins_pipe(vlogical64);
18272 %}
18273 
18274 instruct reinterpretX2D(vecD dst, vecX src)
18275 %{
18276   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 8 &&
18277             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 16);
18278   match(Set dst (VectorReinterpret src));
18279   ins_cost(INSN_COST);
18280   format %{ " # reinterpret $dst,$src" %}
18281   ins_encode %{
18282     // If register is the same, then move is not needed.
18283     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
18284       __ orr(as_FloatRegister($dst$$reg), __ T8B,
18285              as_FloatRegister($src$$reg),
18286              as_FloatRegister($src$$reg));
18287     }
18288   %}
18289   ins_pipe(vlogical64);
18290 %}
18291 
18292 // ------------------------------ Vector cast -------------------------------
18293 
18294 instruct vcvt4Bto4S(vecD dst, vecD src)
18295 %{
18296   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18297   match(Set dst (VectorCastB2X src));
18298   format %{ "sxtl  $dst, T8H, $src, T8B\t# convert 4B to 4S vector" %}
18299   ins_encode %{
18300     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
18301   %}
18302   ins_pipe(pipe_class_default);
18303 %}
18304 
18305 instruct vcvt8Bto8S(vecX dst, vecD src)
18306 %{
18307   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18308   match(Set dst (VectorCastB2X src));
18309   format %{ "sxtl  $dst, T8H, $src, T8B\t# convert 8B to 8S vector" %}
18310   ins_encode %{
18311     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
18312   %}
18313   ins_pipe(pipe_class_default);
18314 %}
18315 
18316 instruct vcvt4Sto4B(vecD dst, vecD src)
18317 %{
18318   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18319   match(Set dst (VectorCastS2X src));
18320   format %{ "xtn  $dst, T8B, $src, T8H\t# convert 4S to 4B vector" %}
18321   ins_encode %{
18322     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
18323   %}
18324   ins_pipe(pipe_class_default);
18325 %}
18326 
18327 instruct vcvt8Sto8B(vecD dst, vecX src)
18328 %{
18329   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18330   match(Set dst (VectorCastS2X src));
18331   format %{ "xtn  $dst, T8B, $src, T8H\t# convert 8S to 8B vector" %}
18332   ins_encode %{
18333     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
18334   %}
18335   ins_pipe(pipe_class_default);
18336 %}
18337 
18338 instruct vcvt4Sto4I(vecX dst, vecD src)
18339 %{
18340   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
18341   match(Set dst (VectorCastS2X src));
18342   format %{ "sxtl  $dst, T4S, $src, T4H\t# convert 4S to 4I vector" %}
18343   ins_encode %{
18344     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg), __ T4H);
18345   %}
18346   ins_pipe(pipe_class_default);
18347 %}
18348 
18349 instruct vcvt4Ito4S(vecD dst, vecX src)
18350 %{
18351   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18352   match(Set dst (VectorCastI2X src));
18353   format %{ "xtn  $dst, T4H, $src, T4S\t# convert 4I to 4S vector" %}
18354   ins_encode %{
18355     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
18356   %}
18357   ins_pipe(pipe_class_default);
18358 %}
18359 
18360 instruct vcvt2Ito2L(vecX dst, vecD src)
18361 %{
18362   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
18363   match(Set dst (VectorCastI2X src));
18364   format %{ "sxtl  $dst, T2D, $src, T2S\t# convert 2I to 2L vector" %}
18365   ins_encode %{
18366     __ sxtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg), __ T2S);
18367   %}
18368   ins_pipe(pipe_class_default);
18369 %}
18370 
18371 instruct vcvt2Lto2I(vecD dst, vecX src)
18372 %{
18373   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
18374   match(Set dst (VectorCastL2X src));
18375   format %{ "xtn  $dst, T2S, $src, T2D\t# convert 2L to 2I vector" %}
18376   ins_encode %{
18377     __ xtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg), __ T2D);
18378   %}
18379   ins_pipe(pipe_class_default);
18380 %}
18381 
18382 instruct vcvt4Bto4I(vecX dst, vecD src)
18383 %{
18384   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
18385   match(Set dst (VectorCastB2X src));
18386   format %{ "sxtl  $dst, T8H, $src, T8B\n\t"
18387             "sxtl  $dst, T4S, $dst, T4H\t# convert 4B to 4I vector"
18388   %}
18389   ins_encode %{
18390     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
18391     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
18392   %}
18393   ins_pipe(pipe_slow);
18394 %}
18395 
18396 instruct vcvt4Ito4B(vecD dst, vecX src)
18397 %{
18398   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18399   match(Set dst (VectorCastI2X src));
18400   format %{ "xtn  $dst, T4H, $src, T4S\n\t"
18401             "xtn  $dst, T8B, $dst, T8H\t# convert 4I to 4B vector"
18402   %}
18403   ins_encode %{
18404     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
18405     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
18406   %}
18407   ins_pipe(pipe_slow);
18408 %}
18409 
18410 instruct vcvt4Bto4F(vecX dst, vecD src)
18411 %{
18412   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18413   match(Set dst (VectorCastB2X src));
18414   format %{ "sxtl  $dst, T8H, $src, T8B\n\t"
18415             "sxtl  $dst, T4S, $dst, T4H\n\t"
18416             "scvtfv  T4S, $dst, $dst\t# convert 4B to 4F vector"
18417   %}
18418   ins_encode %{
18419     __ sxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
18420     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
18421     __ scvtfv(__ T4S, as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg));
18422   %}
18423   ins_pipe(pipe_slow);
18424 %}
18425 
18426 instruct vcvt4Sto4F(vecX dst, vecD src)
18427 %{
18428   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18429   match(Set dst (VectorCastS2X src));
18430   format %{ "sxtl    $dst, T4S, $src, T4H\n\t"
18431             "scvtfv  T4S, $dst, $dst\t# convert 4S to 4F vector"
18432   %}
18433   ins_encode %{
18434     __ sxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg), __ T4H);
18435     __ scvtfv(__ T4S, as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg));
18436   %}
18437   ins_pipe(pipe_slow);
18438 %}
18439 
18440 instruct vcvt2Ito2D(vecX dst, vecD src)
18441 %{
18442   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18443   match(Set dst (VectorCastI2X src));
18444   format %{ "sxtl    $dst, T2D, $src, T2S\n\t"
18445             "scvtfv  T2D, $dst, $dst\t# convert 2I to 2D vector"
18446   %}
18447   ins_encode %{
18448     __ sxtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg), __ T2S);
18449     __ scvtfv(__ T2D, as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg));
18450   %}
18451   ins_pipe(pipe_slow);
18452 %}
18453 
18454 instruct vcvt2Ito2F(vecD dst, vecD src)
18455 %{
18456   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18457   match(Set dst (VectorCastI2X src));
18458   format %{ "scvtfv  T2S, $dst, $src\t# convert 2I to 2F vector" %}
18459   ins_encode %{
18460     __ scvtfv(__ T2S, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
18461   %}
18462   ins_pipe(pipe_class_default);
18463 %}
18464 
18465 instruct vcvt4Ito4F(vecX dst, vecX src)
18466 %{
18467   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18468   match(Set dst (VectorCastI2X src));
18469   format %{ "scvtfv  T4S, $dst, $src\t# convert 4I to 4F vector" %}
18470   ins_encode %{
18471     __ scvtfv(__ T4S, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
18472   %}
18473   ins_pipe(pipe_class_default);
18474 %}
18475 
18476 instruct vcvt2Lto2D(vecX dst, vecX src)
18477 %{
18478   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18479   match(Set dst (VectorCastL2X src));
18480   format %{ "scvtfv  T2D, $dst, $src\t# convert 2L to 2D vector" %}
18481   ins_encode %{
18482     __ scvtfv(__ T2D, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
18483   %}
18484   ins_pipe(pipe_class_default);
18485 %}
18486 
18487 instruct vcvt2Fto2D(vecX dst, vecD src)
18488 %{
18489   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18490   match(Set dst (VectorCastF2X src));
18491   format %{ "fcvtl  $dst, T2D, $src, T2S\t# convert 2F to 2D vector" %}
18492   ins_encode %{
18493     __ fcvtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg), __ T2S);
18494   %}
18495   ins_pipe(pipe_class_default);
18496 %}
18497 
18498 instruct vcvt2Dto2F(vecD dst, vecX src)
18499 %{
18500   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18501   match(Set dst (VectorCastD2X src));
18502   format %{ "fcvtn  $dst, T2S, $src, T2D\t# convert 2D to 2F vector" %}
18503   ins_encode %{
18504     __ fcvtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg), __ T2D);
18505   %}
18506   ins_pipe(pipe_class_default);
18507 %}
18508 
18509 instruct vcvt2Lto2F(vecD dst, vecX src)
18510 %{
18511   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18512   match(Set dst (VectorCastL2X src));
18513   format %{ "scvtfv  T2D, $dst, $src\n\t"
18514             "fcvtn   $dst, T2S, $dst, T2D\t# convert 2L to 2F vector"
18515   %}
18516   ins_encode %{
18517     __ scvtfv(__ T2D, as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
18518     __ fcvtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($dst$$reg), __ T2D);
18519   %}
18520   ins_pipe(pipe_slow);
18521 %}
18522 
18523 // ------------------------------ Reduction -------------------------------
18524 
18525 instruct reduce_add8B(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, vecD tmp)
18526 %{
18527   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18528   match(Set dst (AddReductionVI isrc vsrc));
18529   ins_cost(INSN_COST);
18530   effect(TEMP_DEF dst, TEMP tmp);
18531   format %{ "addv  $tmp, T8B, $vsrc\n\t"
18532             "smov  $dst, $tmp, B, 0\n\t"
18533             "addw  $dst, $dst, $isrc\n\t"
18534             "sxtb  $dst, $dst\t# add reduction8B"
18535   %}
18536   ins_encode %{
18537     __ addv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($vsrc$$reg));
18538     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
18539     __ addw($dst$$Register, $dst$$Register, $isrc$$Register);
18540     __ sxtb($dst$$Register, $dst$$Register);
18541   %}
18542   ins_pipe(pipe_slow);
18543 %}
18544 
18545 instruct reduce_add16B(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX tmp)
18546 %{
18547   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18548   match(Set dst (AddReductionVI isrc vsrc));
18549   ins_cost(INSN_COST);
18550   effect(TEMP_DEF dst, TEMP tmp);
18551   format %{ "addv  $tmp, T16B, $vsrc\n\t"
18552             "smov  $dst, $tmp, B, 0\n\t"
18553             "addw  $dst, $dst, $isrc\n\t"
18554             "sxtb  $dst, $dst\t# add reduction16B"
18555   %}
18556   ins_encode %{
18557     __ addv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($vsrc$$reg));
18558     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
18559     __ addw($dst$$Register, $dst$$Register, $isrc$$Register);
18560     __ sxtb($dst$$Register, $dst$$Register);
18561   %}
18562   ins_pipe(pipe_slow);
18563 %}
18564 
18565 instruct reduce_add4S(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, vecD tmp)
18566 %{
18567   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18568   match(Set dst (AddReductionVI isrc vsrc));
18569   ins_cost(INSN_COST);
18570   effect(TEMP_DEF dst, TEMP tmp);
18571   format %{ "addv  $tmp, T4H, $vsrc\n\t"
18572             "smov  $dst, $tmp, H, 0\n\t"
18573             "addw  $dst, $dst, $isrc\n\t"
18574             "sxth  $dst, $dst\t# add reduction4S"
18575   %}
18576   ins_encode %{
18577     __ addv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($vsrc$$reg));
18578     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
18579     __ addw($dst$$Register, $dst$$Register, $isrc$$Register);
18580     __ sxth($dst$$Register, $dst$$Register);
18581   %}
18582   ins_pipe(pipe_slow);
18583 %}
18584 
18585 instruct reduce_add8S(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX tmp)
18586 %{
18587   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18588   match(Set dst (AddReductionVI isrc vsrc));
18589   ins_cost(INSN_COST);
18590   effect(TEMP_DEF dst, TEMP tmp);
18591   format %{ "addv  $tmp, T8H, $vsrc\n\t"
18592             "smov  $dst, $tmp, H, 0\n\t"
18593             "addw  $dst, $dst, $isrc\n\t"
18594             "sxth  $dst, $dst\t# add reduction8S"
18595   %}
18596   ins_encode %{
18597     __ addv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($vsrc$$reg));
18598     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
18599     __ addw($dst$$Register, $dst$$Register, $isrc$$Register);
18600     __ sxth($dst$$Register, $dst$$Register);
18601   %}
18602   ins_pipe(pipe_slow);
18603 %}
18604 
18605 instruct reduce_add2L(iRegLNoSp dst, iRegL isrc, vecX vsrc, vecX tmp)
18606 %{
18607   match(Set dst (AddReductionVL isrc vsrc));
18608   ins_cost(INSN_COST);
18609   effect(TEMP_DEF dst, TEMP tmp);
18610   format %{ "addpd $tmp, $vsrc\n\t"
18611             "umov  $dst, $tmp, D, 0\n\t"
18612             "add   $dst, $isrc, $dst\t# add reduction2L"
18613   %}
18614   ins_encode %{
18615     __ addpd(as_FloatRegister($tmp$$reg), as_FloatRegister($vsrc$$reg));
18616     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ D, 0);
18617     __ add($dst$$Register, $isrc$$Register, $dst$$Register);
18618   %}
18619   ins_pipe(pipe_slow);
18620 %}
18621 
18622 instruct reduce_mul8B(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, vecD vtmp1, vecD vtmp2, iRegINoSp itmp)
18623 %{
18624   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18625   match(Set dst (MulReductionVI isrc vsrc));
18626   ins_cost(INSN_COST);
18627   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
18628   format %{ "ins   $vtmp1, S, $vsrc, 0, 1\n\t"
18629             "mulv  $vtmp1, T8B, $vtmp1, $vsrc\n\t"
18630             "ins   $vtmp2, H, $vtmp1, 0, 1\n\t"
18631             "mulv  $vtmp2, T8B, $vtmp2, $vtmp1\n\t"
18632             "umov  $itmp, $vtmp2, B, 0\n\t"
18633             "mulw  $dst, $itmp, $isrc\n\t"
18634             "sxtb  $dst, $dst\n\t"
18635             "umov  $itmp, $vtmp2, B, 1\n\t"
18636             "mulw  $dst, $itmp, $dst\n\t"
18637             "sxtb  $dst, $dst\t# mul reduction8B"
18638   %}
18639   ins_encode %{
18640     __ ins(as_FloatRegister($vtmp1$$reg), __ S,
18641            as_FloatRegister($vsrc$$reg), 0, 1);
18642     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
18643             as_FloatRegister($vtmp1$$reg), as_FloatRegister($vsrc$$reg));
18644     __ ins(as_FloatRegister($vtmp2$$reg), __ H,
18645            as_FloatRegister($vtmp1$$reg), 0, 1);
18646     __ mulv(as_FloatRegister($vtmp2$$reg), __ T8B,
18647             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
18648     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 0);
18649     __ mulw($dst$$Register, $itmp$$Register, $isrc$$Register);
18650     __ sxtb($dst$$Register, $dst$$Register);
18651     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 1);
18652     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
18653     __ sxtb($dst$$Register, $dst$$Register);
18654   %}
18655   ins_pipe(pipe_slow);
18656 %}
18657 
18658 instruct reduce_mul16B(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp1, vecX vtmp2, iRegINoSp itmp)
18659 %{
18660   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18661   match(Set dst (MulReductionVI isrc vsrc));
18662   ins_cost(INSN_COST);
18663   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
18664   format %{ "ins   $vtmp1, D, $vsrc, 0, 1\n\t"
18665             "mulv  $vtmp1, T8B, $vtmp1, $vsrc\n\t"
18666             "ins   $vtmp2, S, $vtmp1, 0, 1\n\t"
18667             "mulv  $vtmp1, T8B, $vtmp2, $vtmp1\n\t"
18668             "ins   $vtmp2, H, $vtmp1, 0, 1\n\t"
18669             "mulv  $vtmp2, T8B, $vtmp2, $vtmp1\n\t"
18670             "umov  $itmp, $vtmp2, B, 0\n\t"
18671             "mulw  $dst, $itmp, $isrc\n\t"
18672             "sxtb  $dst, $dst\n\t"
18673             "umov  $itmp, $vtmp2, B, 1\n\t"
18674             "mulw  $dst, $itmp, $dst\n\t"
18675             "sxtb  $dst, $dst\t# mul reduction16B"
18676   %}
18677   ins_encode %{
18678     __ ins(as_FloatRegister($vtmp1$$reg), __ D,
18679            as_FloatRegister($vsrc$$reg), 0, 1);
18680     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
18681             as_FloatRegister($vtmp1$$reg), as_FloatRegister($vsrc$$reg));
18682     __ ins(as_FloatRegister($vtmp2$$reg), __ S,
18683            as_FloatRegister($vtmp1$$reg), 0, 1);
18684     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
18685             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
18686     __ ins(as_FloatRegister($vtmp2$$reg), __ H,
18687            as_FloatRegister($vtmp1$$reg), 0, 1);
18688     __ mulv(as_FloatRegister($vtmp2$$reg), __ T8B,
18689             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
18690     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 0);
18691     __ mulw($dst$$Register, $itmp$$Register, $isrc$$Register);
18692     __ sxtb($dst$$Register, $dst$$Register);
18693     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 1);
18694     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
18695     __ sxtb($dst$$Register, $dst$$Register);
18696   %}
18697   ins_pipe(pipe_slow);
18698 %}
18699 
18700 instruct reduce_mul4S(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, vecD vtmp, iRegINoSp itmp)
18701 %{
18702   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18703   match(Set dst (MulReductionVI isrc vsrc));
18704   ins_cost(INSN_COST);
18705   effect(TEMP_DEF dst, TEMP vtmp, TEMP itmp);
18706   format %{ "ins   $vtmp, S, $vsrc, 0, 1\n\t"
18707             "mulv  $vtmp, T4H, $vtmp, $vsrc\n\t"
18708             "umov  $itmp, $vtmp, H, 0\n\t"
18709             "mulw  $dst, $itmp, $isrc\n\t"
18710             "sxth  $dst, $dst\n\t"
18711             "umov  $itmp, $vtmp, H, 1\n\t"
18712             "mulw  $dst, $itmp, $dst\n\t"
18713             "sxth  $dst, $dst\t# mul reduction4S"
18714   %}
18715   ins_encode %{
18716     __ ins(as_FloatRegister($vtmp$$reg), __ S,
18717            as_FloatRegister($vsrc$$reg), 0, 1);
18718     __ mulv(as_FloatRegister($vtmp$$reg), __ T4H,
18719             as_FloatRegister($vtmp$$reg), as_FloatRegister($vsrc$$reg));
18720     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ H, 0);
18721     __ mulw($dst$$Register, $itmp$$Register, $isrc$$Register);
18722     __ sxth($dst$$Register, $dst$$Register);
18723     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ H, 1);
18724     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
18725     __ sxth($dst$$Register, $dst$$Register);
18726   %}
18727   ins_pipe(pipe_slow);
18728 %}
18729 
18730 instruct reduce_mul8S(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX vtmp1, vecX vtmp2, iRegINoSp itmp)
18731 %{
18732   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18733   match(Set dst (MulReductionVI isrc vsrc));
18734   ins_cost(INSN_COST);
18735   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
18736   format %{ "ins   $vtmp1, D, $vsrc, 0, 1\n\t"
18737             "mulv  $vtmp1, T4H, $vtmp1, $vsrc\n\t"
18738             "ins   $vtmp2, S, $vtmp1, 0, 1\n\t"
18739             "mulv  $vtmp2, T4H, $vtmp2, $vtmp1\n\t"
18740             "umov  $itmp, $vtmp2, H, 0\n\t"
18741             "mulw  $dst, $itmp, $isrc\n\t"
18742             "sxth  $dst, $dst\n\t"
18743             "umov  $itmp, $vtmp2, H, 1\n\t"
18744             "mulw  $dst, $itmp, $dst\n\t"
18745             "sxth  $dst, $dst\t# mul reduction8S"
18746   %}
18747   ins_encode %{
18748     __ ins(as_FloatRegister($vtmp1$$reg), __ D,
18749            as_FloatRegister($vsrc$$reg), 0, 1);
18750     __ mulv(as_FloatRegister($vtmp1$$reg), __ T4H,
18751             as_FloatRegister($vtmp1$$reg), as_FloatRegister($vsrc$$reg));
18752     __ ins(as_FloatRegister($vtmp2$$reg), __ S,
18753            as_FloatRegister($vtmp1$$reg), 0, 1);
18754     __ mulv(as_FloatRegister($vtmp2$$reg), __ T4H,
18755             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
18756     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ H, 0);
18757     __ mulw($dst$$Register, $itmp$$Register, $isrc$$Register);
18758     __ sxth($dst$$Register, $dst$$Register);
18759     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ H, 1);
18760     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
18761     __ sxth($dst$$Register, $dst$$Register);
18762   %}
18763   ins_pipe(pipe_slow);
18764 %}
18765 
18766 instruct reduce_mul2L(iRegLNoSp dst, iRegL isrc, vecX vsrc, iRegLNoSp tmp)
18767 %{
18768   match(Set dst (MulReductionVL isrc vsrc));
18769   ins_cost(INSN_COST);
18770   effect(TEMP_DEF dst, TEMP tmp);
18771   format %{ "umov  $tmp, $vsrc, D, 0\n\t"
18772             "mul   $dst, $isrc, $tmp\n\t"
18773             "umov  $tmp, $vsrc, D, 1\n\t"
18774             "mul   $dst, $dst, $tmp\t# mul reduction2L"
18775   %}
18776   ins_encode %{
18777     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
18778     __ mul($dst$$Register, $isrc$$Register, $tmp$$Register);
18779     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
18780     __ mul($dst$$Register, $dst$$Register, $tmp$$Register);
18781   %}
18782   ins_pipe(pipe_slow);
18783 %}
18784 
18785 instruct reduce_max8B(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, vecD tmp, rFlagsReg cr)
18786 %{
18787   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18788   match(Set dst (MaxReductionV isrc vsrc));
18789   ins_cost(INSN_COST);
18790   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
18791   format %{ "smaxv $tmp, T8B, $vsrc\n\t"
18792             "smov  $dst, $tmp, B, 0\n\t"
18793             "cmpw  $dst, $isrc\n\t"
18794             "cselw $dst, $dst, $isrc GT\t# max reduction8B"
18795   %}
18796   ins_encode %{
18797     __ smaxv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($vsrc$$reg));
18798     __ smov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ B, 0);
18799     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
18800     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::GT);
18801   %}
18802   ins_pipe(pipe_slow);
18803 %}
18804 
18805 instruct reduce_max16B(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX tmp, rFlagsReg cr)
18806 %{
18807   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18808   match(Set dst (MaxReductionV isrc vsrc));
18809   ins_cost(INSN_COST);
18810   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
18811   format %{ "smaxv $tmp, T16B, $vsrc\n\t"
18812             "smov  $dst, $tmp, B, 0\n\t"
18813             "cmpw  $dst, $isrc\n\t"
18814             "cselw $dst, $dst, $isrc GT\t# max reduction16B"
18815   %}
18816   ins_encode %{
18817     __ smaxv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($vsrc$$reg));
18818     __ smov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ B, 0);
18819     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
18820     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::GT);
18821   %}
18822   ins_pipe(pipe_slow);
18823 %}
18824 
18825 instruct reduce_max4S(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, vecD tmp, rFlagsReg cr)
18826 %{
18827   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18828   match(Set dst (MaxReductionV isrc vsrc));
18829   ins_cost(INSN_COST);
18830   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
18831   format %{ "smaxv $tmp, T4H, $vsrc\n\t"
18832             "smov  $dst, $tmp, H, 0\n\t"
18833             "cmpw  $dst, $isrc\n\t"
18834             "cselw $dst, $dst, $isrc GT\t# max reduction4S"
18835   %}
18836   ins_encode %{
18837     __ smaxv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($vsrc$$reg));
18838     __ smov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ H, 0);
18839     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
18840     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::GT);
18841   %}
18842   ins_pipe(pipe_slow);
18843 %}
18844 
18845 instruct reduce_max8S(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX tmp, rFlagsReg cr)
18846 %{
18847   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18848   match(Set dst (MaxReductionV isrc vsrc));
18849   ins_cost(INSN_COST);
18850   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
18851   format %{ "smaxv $tmp, T8H, $vsrc\n\t"
18852             "smov  $dst, $tmp, H, 0\n\t"
18853             "cmpw  $dst, $isrc\n\t"
18854             "cselw $dst, $dst, $isrc GT\t# max reduction8S"
18855   %}
18856   ins_encode %{
18857     __ smaxv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($vsrc$$reg));
18858     __ smov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ H, 0);
18859     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
18860     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::GT);
18861   %}
18862   ins_pipe(pipe_slow);
18863 %}
18864 
18865 instruct reduce_max4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX tmp, rFlagsReg cr)
18866 %{
18867   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18868   match(Set dst (MaxReductionV isrc vsrc));
18869   ins_cost(INSN_COST);
18870   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
18871   format %{ "smaxv $tmp, T4S, $vsrc\n\t"
18872             "umov  $dst, $tmp, S, 0\n\t"
18873             "cmpw  $dst, $isrc\n\t"
18874             "cselw $dst, $dst, $isrc GT\t# max reduction4I"
18875   %}
18876   ins_encode %{
18877     __ smaxv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
18878     __ umov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ S, 0);
18879     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
18880     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::GT);
18881   %}
18882   ins_pipe(pipe_slow);
18883 %}
18884 
18885 instruct reduce_min8B(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, vecD tmp, rFlagsReg cr)
18886 %{
18887   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18888   match(Set dst (MinReductionV isrc vsrc));
18889   ins_cost(INSN_COST);
18890   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
18891   format %{ "sminv $tmp, T8B, $vsrc\n\t"
18892             "smov  $dst, $tmp, B, 0\n\t"
18893             "cmpw  $dst, $isrc\n\t"
18894             "cselw $dst, $dst, $isrc LT\t# min reduction8B"
18895   %}
18896   ins_encode %{
18897     __ sminv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($vsrc$$reg));
18898     __ smov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ B, 0);
18899     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
18900     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::LT);
18901   %}
18902   ins_pipe(pipe_slow);
18903 %}
18904 
18905 instruct reduce_min16B(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX tmp, rFlagsReg cr)
18906 %{
18907   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18908   match(Set dst (MinReductionV isrc vsrc));
18909   ins_cost(INSN_COST);
18910   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
18911   format %{ "sminv $tmp, T16B, $vsrc\n\t"
18912             "smov  $dst, $tmp, B, 0\n\t"
18913             "cmpw  $dst, $isrc\n\t"
18914             "cselw $dst, $dst, $isrc LT\t# min reduction16B"
18915   %}
18916   ins_encode %{
18917     __ sminv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($vsrc$$reg));
18918     __ smov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ B, 0);
18919     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
18920     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::LT);
18921   %}
18922   ins_pipe(pipe_slow);
18923 %}
18924 
18925 instruct reduce_min4S(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, vecD tmp, rFlagsReg cr)
18926 %{
18927   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18928   match(Set dst (MinReductionV isrc vsrc));
18929   ins_cost(INSN_COST);
18930   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
18931   format %{ "sminv $tmp, T4H, $vsrc\n\t"
18932             "smov  $dst, $tmp, H, 0\n\t"
18933             "cmpw  $dst, $isrc\n\t"
18934             "cselw $dst, $dst, $isrc LT\t# min reduction4S"
18935   %}
18936   ins_encode %{
18937     __ sminv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($vsrc$$reg));
18938     __ smov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ H, 0);
18939     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
18940     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::LT);
18941   %}
18942   ins_pipe(pipe_slow);
18943 %}
18944 
18945 instruct reduce_min8S(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX tmp, rFlagsReg cr)
18946 %{
18947   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18948   match(Set dst (MinReductionV isrc vsrc));
18949   ins_cost(INSN_COST);
18950   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
18951   format %{ "sminv $tmp, T8H, $vsrc\n\t"
18952             "smov  $dst, $tmp, H, 0\n\t"
18953             "cmpw  $dst, $isrc\n\t"
18954             "cselw $dst, $dst, $isrc LT\t# min reduction8S"
18955   %}
18956   ins_encode %{
18957     __ sminv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($vsrc$$reg));
18958     __ smov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ H, 0);
18959     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
18960     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::LT);
18961   %}
18962   ins_pipe(pipe_slow);
18963 %}
18964 
18965 instruct reduce_min4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, vecX tmp, rFlagsReg cr)
18966 %{
18967   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18968   match(Set dst (MinReductionV isrc vsrc));
18969   ins_cost(INSN_COST);
18970   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
18971   format %{ "sminv $tmp, T4S, $vsrc\n\t"
18972             "umov  $dst, $tmp, S, 0\n\t"
18973             "cmpw  $dst, $isrc\n\t"
18974             "cselw $dst, $dst, $isrc LT\t# min reduction4I"
18975   %}
18976   ins_encode %{
18977     __ sminv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($vsrc$$reg));
18978     __ umov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ S, 0);
18979     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
18980     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::LT);
18981   %}
18982   ins_pipe(pipe_slow);
18983 %}
18984 
18985 instruct reduce_max2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, vecX tmp, rFlagsReg cr)
18986 %{
18987   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18988   match(Set dst (MaxReductionV isrc vsrc));
18989   ins_cost(INSN_COST);
18990   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
18991   format %{ "dup   $tmp, T2D, $vsrc\n\t"
18992             "smaxv $tmp, T4S, $tmp\n\t"
18993             "umov  $dst, $tmp, S, 0\n\t"
18994             "cmpw  $dst, $isrc\n\t"
18995             "cselw $dst, $dst, $isrc GT\t# max reduction2I"
18996   %}
18997   ins_encode %{
18998     __ dup(as_FloatRegister($tmp$$reg), __ T2D, as_FloatRegister($vsrc$$reg));
18999     __ smaxv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($tmp$$reg));
19000     __ umov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ S, 0);
19001     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
19002     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::GT);
19003   %}
19004   ins_pipe(pipe_slow);
19005 %}
19006 
19007 instruct reduce_min2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, vecX tmp, rFlagsReg cr)
19008 %{
19009   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19010   match(Set dst (MinReductionV isrc vsrc));
19011   ins_cost(INSN_COST);
19012   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
19013   format %{ "dup   $tmp, T2D, $vsrc\n\t"
19014             "sminv $tmp, T4S, $tmp\n\t"
19015             "umov  $dst, $tmp, S, 0\n\t"
19016             "cmpw  $dst, $isrc\n\t"
19017             "cselw $dst, $dst, $isrc LT\t# min reduction2I"
19018   %}
19019   ins_encode %{
19020     __ dup(as_FloatRegister($tmp$$reg), __ T2D, as_FloatRegister($vsrc$$reg));
19021     __ sminv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($tmp$$reg));
19022     __ umov(as_Register($dst$$reg), as_FloatRegister($tmp$$reg), __ S, 0);
19023     __ cmpw(as_Register($dst$$reg), as_Register($isrc$$reg));
19024     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($isrc$$reg), Assembler::LT);
19025   %}
19026   ins_pipe(pipe_slow);
19027 %}
19028 
19029 instruct reduce_max2L(iRegLNoSp dst, iRegL isrc, vecX vsrc, iRegLNoSp tmp, rFlagsReg cr)
19030 %{
19031   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19032   match(Set dst (MaxReductionV isrc vsrc));
19033   ins_cost(INSN_COST);
19034   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
19035   format %{ "umov  $tmp, $vsrc, D, 0\n\t"
19036             "cmp   $isrc,$tmp\n\t"
19037             "csel  $dst, $isrc, $tmp GT\n\t"
19038             "umov  $tmp, $vsrc, D, 1\n\t"
19039             "cmp   $dst, $tmp\n\t"
19040             "csel  $dst, $dst, $tmp GT\t# max reduction2L"
19041   %}
19042   ins_encode %{
19043     __ umov(as_Register($tmp$$reg), as_FloatRegister($vsrc$$reg), __ D, 0);
19044     __ cmp(as_Register($isrc$$reg), as_Register($tmp$$reg));
19045     __ csel(as_Register($dst$$reg), as_Register($isrc$$reg), as_Register($tmp$$reg), Assembler::GT);
19046     __ umov(as_Register($tmp$$reg), as_FloatRegister($vsrc$$reg), __ D, 1);
19047     __ cmp(as_Register($dst$$reg), as_Register($tmp$$reg));
19048     __ csel(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($tmp$$reg), Assembler::GT);
19049   %}
19050   ins_pipe(pipe_slow);
19051 %}
19052 
19053 instruct reduce_min2L(iRegLNoSp dst, iRegL isrc, vecX vsrc, iRegLNoSp tmp, rFlagsReg cr)
19054 %{
19055   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19056   match(Set dst (MinReductionV isrc vsrc));
19057   ins_cost(INSN_COST);
19058   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
19059   format %{ "umov  $tmp, $vsrc, D, 0\n\t"
19060             "cmp   $isrc,$tmp\n\t"
19061             "csel  $dst, $isrc, $tmp LT\n\t"
19062             "umov  $tmp, $vsrc, D, 1\n\t"
19063             "cmp   $dst, $tmp\n\t"
19064             "csel  $dst, $dst, $tmp LT\t# min reduction2L"
19065   %}
19066   ins_encode %{
19067     __ umov(as_Register($tmp$$reg), as_FloatRegister($vsrc$$reg), __ D, 0);
19068     __ cmp(as_Register($isrc$$reg), as_Register($tmp$$reg));
19069     __ csel(as_Register($dst$$reg), as_Register($isrc$$reg), as_Register($tmp$$reg), Assembler::LT);
19070     __ umov(as_Register($tmp$$reg), as_FloatRegister($vsrc$$reg), __ D, 1);
19071     __ cmp(as_Register($dst$$reg), as_Register($tmp$$reg));
19072     __ csel(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($tmp$$reg), Assembler::LT);
19073   %}
19074   ins_pipe(pipe_slow);
19075 %}
19076 
19077 instruct reduce_and8B(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
19078 %{
19079   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19080   match(Set dst (AndReductionV isrc vsrc));
19081   ins_cost(INSN_COST);
19082   effect(TEMP_DEF dst, TEMP tmp);
19083   format %{ "umov   $tmp, $vsrc, S, 0\n\t"
19084             "umov   $dst, $vsrc, S, 1\n\t"
19085             "andw   $dst, $dst, $tmp\n\t"
19086             "andw   $dst, $dst, $dst, LSR #16\n\t"
19087             "andw   $dst, $dst, $dst, LSR #8\n\t"
19088             "andw   $dst, $isrc, $dst\n\t"
19089             "sxtb   $dst, $dst\t# and reduction8B"
19090   %}
19091   ins_encode %{
19092     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
19093     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
19094     __ andw($dst$$Register, $dst$$Register, $tmp$$Register);
19095     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19096     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
19097     __ andw($dst$$Register, $isrc$$Register, $dst$$Register);
19098     __ sxtb($dst$$Register, $dst$$Register);
19099   %}
19100   ins_pipe(pipe_slow);
19101 %}
19102 
19103 instruct reduce_orr8B(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
19104 %{
19105   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19106   match(Set dst (OrReductionV isrc vsrc));
19107   ins_cost(INSN_COST);
19108   effect(TEMP_DEF dst, TEMP tmp);
19109   format %{ "umov   $tmp, $vsrc, S, 0\n\t"
19110             "umov   $dst, $vsrc, S, 1\n\t"
19111             "orrw   $dst, $dst, $tmp\n\t"
19112             "orrw   $dst, $dst, $dst, LSR #16\n\t"
19113             "orrw   $dst, $dst, $dst, LSR #8\n\t"
19114             "orrw   $dst, $isrc, $dst\n\t"
19115             "sxtb   $dst, $dst\t# orr reduction8B"
19116   %}
19117   ins_encode %{
19118     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
19119     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
19120     __ orrw($dst$$Register, $dst$$Register, $tmp$$Register);
19121     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19122     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
19123     __ orrw($dst$$Register, $isrc$$Register, $dst$$Register);
19124     __ sxtb($dst$$Register, $dst$$Register);
19125   %}
19126   ins_pipe(pipe_slow);
19127 %}
19128 
19129 instruct reduce_eor8B(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
19130 %{
19131   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19132   match(Set dst (XorReductionV isrc vsrc));
19133   ins_cost(INSN_COST);
19134   effect(TEMP_DEF dst, TEMP tmp);
19135   format %{ "umov   $tmp, $vsrc, S, 0\n\t"
19136             "umov   $dst, $vsrc, S, 1\n\t"
19137             "eorw   $dst, $dst, $tmp\n\t"
19138             "eorw   $dst, $dst, $dst, LSR #16\n\t"
19139             "eorw   $dst, $dst, $dst, LSR #8\n\t"
19140             "eorw   $dst, $isrc, $dst\n\t"
19141             "sxtb   $dst, $dst\t# eor reduction8B"
19142   %}
19143   ins_encode %{
19144     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
19145     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
19146     __ eorw($dst$$Register, $dst$$Register, $tmp$$Register);
19147     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19148     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
19149     __ eorw($dst$$Register, $isrc$$Register, $dst$$Register);
19150     __ sxtb($dst$$Register, $dst$$Register);
19151   %}
19152   ins_pipe(pipe_slow);
19153 %}
19154 
19155 instruct reduce_and16B(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, iRegINoSp tmp)
19156 %{
19157   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19158   match(Set dst (AndReductionV isrc vsrc));
19159   ins_cost(INSN_COST);
19160   effect(TEMP_DEF dst, TEMP tmp);
19161   format %{ "umov   $tmp, $vsrc, D, 0\n\t"
19162             "umov   $dst, $vsrc, D, 1\n\t"
19163             "andr   $dst, $dst, $tmp\n\t"
19164             "andr   $dst, $dst, $dst, LSR #32\n\t"
19165             "andw   $dst, $dst, $dst, LSR #16\n\t"
19166             "andw   $dst, $dst, $dst, LSR #8\n\t"
19167             "andw   $dst, $isrc, $dst\n\t"
19168             "sxtb   $dst, $dst\t# and reduction16B"
19169   %}
19170   ins_encode %{
19171     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19172     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19173     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
19174     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
19175     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19176     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
19177     __ andw($dst$$Register, $isrc$$Register, $dst$$Register);
19178     __ sxtb($dst$$Register, $dst$$Register);
19179   %}
19180   ins_pipe(pipe_slow);
19181 %}
19182 
19183 instruct reduce_orr16B(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, iRegINoSp tmp)
19184 %{
19185   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19186   match(Set dst (OrReductionV isrc vsrc));
19187   ins_cost(INSN_COST);
19188   effect(TEMP_DEF dst, TEMP tmp);
19189   format %{ "umov   $tmp, $vsrc, D, 0\n\t"
19190             "umov   $dst, $vsrc, D, 1\n\t"
19191             "orr    $dst, $dst, $tmp\n\t"
19192             "orr    $dst, $dst, $dst, LSR #32\n\t"
19193             "orrw   $dst, $dst, $dst, LSR #16\n\t"
19194             "orrw   $dst, $dst, $dst, LSR #8\n\t"
19195             "orrw   $dst, $isrc, $dst\n\t"
19196             "sxtb   $dst, $dst\t# orr reduction16B"
19197   %}
19198   ins_encode %{
19199     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19200     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19201     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
19202     __ orr ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
19203     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19204     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
19205     __ orrw($dst$$Register, $isrc$$Register, $dst$$Register);
19206     __ sxtb($dst$$Register, $dst$$Register);
19207   %}
19208   ins_pipe(pipe_slow);
19209 %}
19210 
19211 instruct reduce_eor16B(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, iRegINoSp tmp)
19212 %{
19213   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19214   match(Set dst (XorReductionV isrc vsrc));
19215   ins_cost(INSN_COST);
19216   effect(TEMP_DEF dst, TEMP tmp);
19217   format %{ "umov   $tmp, $vsrc, D, 0\n\t"
19218             "umov   $dst, $vsrc, D, 1\n\t"
19219             "eor    $dst, $dst, $tmp\n\t"
19220             "eor    $dst, $dst, $dst, LSR #32\n\t"
19221             "eorw   $dst, $dst, $dst, LSR #16\n\t"
19222             "eorw   $dst, $dst, $dst, LSR #8\n\t"
19223             "eorw   $dst, $isrc, $dst\n\t"
19224             "sxtb   $dst, $dst\t# eor reduction16B"
19225   %}
19226   ins_encode %{
19227     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19228     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19229     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
19230     __ eor ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
19231     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19232     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
19233     __ eorw($dst$$Register, $isrc$$Register, $dst$$Register);
19234     __ sxtb($dst$$Register, $dst$$Register);
19235   %}
19236   ins_pipe(pipe_slow);
19237 %}
19238 
19239 instruct reduce_and4S(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
19240 %{
19241   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19242   match(Set dst (AndReductionV isrc vsrc));
19243   ins_cost(INSN_COST);
19244   effect(TEMP_DEF dst, TEMP tmp);
19245   format %{ "umov   $tmp, $vsrc, S, 0\n\t"
19246             "umov   $dst, $vsrc, S, 1\n\t"
19247             "andw   $dst, $dst, $tmp\n\t"
19248             "andw   $dst, $dst, $dst, LSR #16\n\t"
19249             "andw   $dst, $isrc, $dst\n\t"
19250             "sxth   $dst, $dst\t# and reduction4S"
19251   %}
19252   ins_encode %{
19253     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
19254     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
19255     __ andw($dst$$Register, $dst$$Register, $tmp$$Register);
19256     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19257     __ andw($dst$$Register, $isrc$$Register, $dst$$Register);
19258     __ sxth($dst$$Register, $dst$$Register);
19259   %}
19260   ins_pipe(pipe_slow);
19261 %}
19262 
19263 instruct reduce_orr4S(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
19264 %{
19265   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19266   match(Set dst (OrReductionV isrc vsrc));
19267   ins_cost(INSN_COST);
19268   effect(TEMP_DEF dst, TEMP tmp);
19269   format %{ "umov   $tmp, $vsrc, S, 0\n\t"
19270             "umov   $dst, $vsrc, S, 1\n\t"
19271             "orrw   $dst, $dst, $tmp\n\t"
19272             "orrw   $dst, $dst, $dst, LSR #16\n\t"
19273             "orrw   $dst, $isrc, $dst\n\t"
19274             "sxth   $dst, $dst\t# orr reduction4S"
19275   %}
19276   ins_encode %{
19277     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
19278     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
19279     __ orrw($dst$$Register, $dst$$Register, $tmp$$Register);
19280     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19281     __ orrw($dst$$Register, $isrc$$Register, $dst$$Register);
19282     __ sxth($dst$$Register, $dst$$Register);
19283   %}
19284   ins_pipe(pipe_slow);
19285 %}
19286 
19287 instruct reduce_eor4S(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
19288 %{
19289   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19290   match(Set dst (XorReductionV isrc vsrc));
19291   ins_cost(INSN_COST);
19292   effect(TEMP_DEF dst, TEMP tmp);
19293   format %{ "umov   $tmp, $vsrc, S, 0\n\t"
19294             "umov   $dst, $vsrc, S, 1\n\t"
19295             "eorw   $dst, $dst, $tmp\n\t"
19296             "eorw   $dst, $dst, $dst, LSR #16\n\t"
19297             "eorw   $dst, $isrc, $dst\n\t"
19298             "sxth   $dst, $dst\t# eor reduction4S"
19299   %}
19300   ins_encode %{
19301     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
19302     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
19303     __ eorw($dst$$Register, $dst$$Register, $tmp$$Register);
19304     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19305     __ eorw($dst$$Register, $isrc$$Register, $dst$$Register);
19306     __ sxth($dst$$Register, $dst$$Register);
19307   %}
19308   ins_pipe(pipe_slow);
19309 %}
19310 
19311 instruct reduce_and8S(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, iRegINoSp tmp)
19312 %{
19313   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19314   match(Set dst (AndReductionV isrc vsrc));
19315   ins_cost(INSN_COST);
19316   effect(TEMP_DEF dst, TEMP tmp);
19317   format %{ "umov   $tmp, $vsrc, D, 0\n\t"
19318             "umov   $dst, $vsrc, D, 1\n\t"
19319             "andr   $dst, $dst, $tmp\n\t"
19320             "andr   $dst, $dst, $dst, LSR #32\n\t"
19321             "andw   $dst, $dst, $dst, LSR #16\n\t"
19322             "andw   $dst, $isrc, $dst\n\t"
19323             "sxth   $dst, $dst\t# and reduction8S"
19324   %}
19325   ins_encode %{
19326     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19327     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19328     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
19329     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
19330     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19331     __ andw($dst$$Register, $isrc$$Register, $dst$$Register);
19332     __ sxth($dst$$Register, $dst$$Register);
19333   %}
19334   ins_pipe(pipe_slow);
19335 %}
19336 
19337 instruct reduce_orr8S(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, iRegINoSp tmp)
19338 %{
19339   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19340   match(Set dst (OrReductionV isrc vsrc));
19341   ins_cost(INSN_COST);
19342   effect(TEMP_DEF dst, TEMP tmp);
19343   format %{ "umov   $tmp, $vsrc, D, 0\n\t"
19344             "umov   $dst, $vsrc, D, 1\n\t"
19345             "orr    $dst, $dst, $tmp\n\t"
19346             "orr    $dst, $dst, $dst, LSR #32\n\t"
19347             "orrw   $dst, $dst, $dst, LSR #16\n\t"
19348             "orrw   $dst, $isrc, $dst\n\t"
19349             "sxth   $dst, $dst\t# orr reduction8S"
19350   %}
19351   ins_encode %{
19352     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19353     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19354     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
19355     __ orr ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
19356     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19357     __ orrw($dst$$Register, $isrc$$Register, $dst$$Register);
19358     __ sxth($dst$$Register, $dst$$Register);
19359   %}
19360   ins_pipe(pipe_slow);
19361 %}
19362 
19363 instruct reduce_eor8S(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, iRegINoSp tmp)
19364 %{
19365   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19366   match(Set dst (XorReductionV isrc vsrc));
19367   ins_cost(INSN_COST);
19368   effect(TEMP_DEF dst, TEMP tmp);
19369   format %{ "umov   $tmp, $vsrc, D, 0\n\t"
19370             "umov   $dst, $vsrc, D, 1\n\t"
19371             "eor    $dst, $dst, $tmp\n\t"
19372             "eor    $dst, $dst, $dst, LSR #32\n\t"
19373             "eorw   $dst, $dst, $dst, LSR #16\n\t"
19374             "eorw   $dst, $isrc, $dst\n\t"
19375             "sxth   $dst, $dst\t# eor reduction8S"
19376   %}
19377   ins_encode %{
19378     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19379     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19380     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
19381     __ eor ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
19382     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
19383     __ eorw($dst$$Register, $isrc$$Register, $dst$$Register);
19384     __ sxth($dst$$Register, $dst$$Register);
19385   %}
19386   ins_pipe(pipe_slow);
19387 %}
19388 
19389 instruct reduce_and2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
19390 %{
19391   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19392   match(Set dst (AndReductionV isrc vsrc));
19393   ins_cost(INSN_COST);
19394   effect(TEMP_DEF dst, TEMP tmp);
19395   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
19396             "andw  $dst, $tmp, $isrc\n\t"
19397             "umov  $tmp, $vsrc, S, 1\n\t"
19398             "andw  $dst, $tmp, $dst\t# and reduction2I"
19399   %}
19400   ins_encode %{
19401     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
19402     __ andw($dst$$Register, $tmp$$Register, $isrc$$Register);
19403     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
19404     __ andw($dst$$Register, $tmp$$Register, $dst$$Register);
19405   %}
19406   ins_pipe(pipe_slow);
19407 %}
19408 
19409 instruct reduce_orr2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
19410 %{
19411   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19412   match(Set dst (OrReductionV isrc vsrc));
19413   ins_cost(INSN_COST);
19414   effect(TEMP_DEF dst, TEMP tmp);
19415   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
19416             "orrw  $dst, $tmp, $isrc\n\t"
19417             "umov  $tmp, $vsrc, S, 1\n\t"
19418             "orrw  $dst, $tmp, $dst\t# orr reduction2I"
19419   %}
19420   ins_encode %{
19421     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
19422     __ orrw($dst$$Register, $tmp$$Register, $isrc$$Register);
19423     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
19424     __ orrw($dst$$Register, $tmp$$Register, $dst$$Register);
19425   %}
19426   ins_pipe(pipe_slow);
19427 %}
19428 
19429 instruct reduce_eor2I(iRegINoSp dst, iRegIorL2I isrc, vecD vsrc, iRegINoSp tmp)
19430 %{
19431   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19432   match(Set dst (XorReductionV isrc vsrc));
19433   ins_cost(INSN_COST);
19434   effect(TEMP_DEF dst, TEMP tmp);
19435   format %{ "umov  $tmp, $vsrc, S, 0\n\t"
19436             "eorw  $dst, $tmp, $isrc\n\t"
19437             "umov  $tmp, $vsrc, S, 1\n\t"
19438             "eorw  $dst, $tmp, $dst\t# eor reduction2I"
19439   %}
19440   ins_encode %{
19441     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 0);
19442     __ eorw($dst$$Register, $tmp$$Register, $isrc$$Register);
19443     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ S, 1);
19444     __ eorw($dst$$Register, $tmp$$Register, $dst$$Register);
19445   %}
19446   ins_pipe(pipe_slow);
19447 %}
19448 
19449 instruct reduce_and4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, iRegINoSp tmp)
19450 %{
19451   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19452   match(Set dst (AndReductionV isrc vsrc));
19453   ins_cost(INSN_COST);
19454   effect(TEMP_DEF dst, TEMP tmp);
19455   format %{ "umov   $tmp, $vsrc, D, 0\n\t"
19456             "umov   $dst, $vsrc, D, 1\n\t"
19457             "andr   $dst, $dst, $tmp\n\t"
19458             "andr   $dst, $dst, $dst, LSR #32\n\t"
19459             "andw   $dst, $isrc, $dst\t# and reduction4I"
19460   %}
19461   ins_encode %{
19462     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19463     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19464     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
19465     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
19466     __ andw($dst$$Register, $isrc$$Register, $dst$$Register);
19467   %}
19468   ins_pipe(pipe_slow);
19469 %}
19470 
19471 instruct reduce_orr4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, iRegINoSp tmp)
19472 %{
19473   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19474   match(Set dst (OrReductionV isrc vsrc));
19475   ins_cost(INSN_COST);
19476   effect(TEMP_DEF dst, TEMP tmp);
19477   format %{ "umov   $tmp, $vsrc, D, 0\n\t"
19478             "umov   $dst, $vsrc, D, 1\n\t"
19479             "orr    $dst, $dst, $tmp\n\t"
19480             "orr    $dst, $dst, $dst, LSR #32\n\t"
19481             "orrw   $dst, $isrc, $dst\t# orr reduction4I"
19482   %}
19483   ins_encode %{
19484     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19485     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19486     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
19487     __ orr ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
19488     __ orrw($dst$$Register, $isrc$$Register, $dst$$Register);
19489   %}
19490   ins_pipe(pipe_slow);
19491 %}
19492 
19493 instruct reduce_eor4I(iRegINoSp dst, iRegIorL2I isrc, vecX vsrc, iRegINoSp tmp)
19494 %{
19495   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19496   match(Set dst (XorReductionV isrc vsrc));
19497   ins_cost(INSN_COST);
19498   effect(TEMP_DEF dst, TEMP tmp);
19499   format %{ "umov   $tmp, $vsrc, D, 0\n\t"
19500             "umov   $dst, $vsrc, D, 1\n\t"
19501             "eor    $dst, $dst, $tmp\n\t"
19502             "eor    $dst, $dst, $dst, LSR #32\n\t"
19503             "eorw   $dst, $isrc, $dst\t# eor reduction4I"
19504   %}
19505   ins_encode %{
19506     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19507     __ umov($dst$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19508     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
19509     __ eor ($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
19510     __ eorw($dst$$Register, $isrc$$Register, $dst$$Register);
19511   %}
19512   ins_pipe(pipe_slow);
19513 %}
19514 
19515 instruct reduce_and2L(iRegLNoSp dst, iRegL isrc, vecX vsrc, iRegLNoSp tmp)
19516 %{
19517   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19518   match(Set dst (AndReductionV isrc vsrc));
19519   ins_cost(INSN_COST);
19520   effect(TEMP_DEF dst, TEMP tmp);
19521   format %{ "umov  $tmp, $vsrc, D, 0\n\t"
19522             "andr  $dst, $isrc, $tmp\n\t"
19523             "umov  $tmp, $vsrc, D, 1\n\t"
19524             "andr  $dst, $dst, $tmp\t# and reduction2L"
19525   %}
19526   ins_encode %{
19527     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19528     __ andr($dst$$Register, $isrc$$Register, $tmp$$Register);
19529     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19530     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
19531   %}
19532   ins_pipe(pipe_slow);
19533 %}
19534 
19535 instruct reduce_orr2L(iRegLNoSp dst, iRegL isrc, vecX vsrc, iRegLNoSp tmp)
19536 %{
19537   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19538   match(Set dst (OrReductionV isrc vsrc));
19539   ins_cost(INSN_COST);
19540   effect(TEMP_DEF dst, TEMP tmp);
19541   format %{ "umov  $tmp, $vsrc, D, 0\n\t"
19542             "orr   $dst, $isrc, $tmp\n\t"
19543             "umov  $tmp, $vsrc, D, 1\n\t"
19544             "orr   $dst, $dst, $tmp\t# orr reduction2L"
19545   %}
19546   ins_encode %{
19547     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19548     __ orr ($dst$$Register, $isrc$$Register, $tmp$$Register);
19549     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19550     __ orr ($dst$$Register, $dst$$Register, $tmp$$Register);
19551   %}
19552   ins_pipe(pipe_slow);
19553 %}
19554 
19555 instruct reduce_eor2L(iRegLNoSp dst, iRegL isrc, vecX vsrc, iRegLNoSp tmp)
19556 %{
19557   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19558   match(Set dst (XorReductionV isrc vsrc));
19559   ins_cost(INSN_COST);
19560   effect(TEMP_DEF dst, TEMP tmp);
19561   format %{ "umov  $tmp, $vsrc, D, 0\n\t"
19562             "eor   $dst, $isrc, $tmp\n\t"
19563             "umov  $tmp, $vsrc, D, 1\n\t"
19564             "eor   $dst, $dst, $tmp\t# eor reduction2L"
19565   %}
19566   ins_encode %{
19567     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 0);
19568     __ eor ($dst$$Register, $isrc$$Register, $tmp$$Register);
19569     __ umov($tmp$$Register, as_FloatRegister($vsrc$$reg), __ D, 1);
19570     __ eor ($dst$$Register, $dst$$Register, $tmp$$Register);
19571   %}
19572   ins_pipe(pipe_slow);
19573 %}
19574 
19575 // ------------------------------ Vector insert ---------------------------------
19576 
19577 instruct insert8B(vecD dst, vecD src, iRegIorL2I val, immI idx)
19578 %{
19579   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19580   match(Set dst (VectorInsert (Binary src val) idx));
19581   ins_cost(INSN_COST);
19582   format %{ "orr    $dst, T8B, $src, $src\n\t"
19583             "mov    $dst, T8B, $idx, $val\t# insert into vector(8B)" %}
19584   ins_encode %{
19585     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
19586       __ orr(as_FloatRegister($dst$$reg), __ T8B,
19587              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
19588     }
19589     __ mov(as_FloatRegister($dst$$reg), __ T8B, $idx$$constant, $val$$Register);
19590   %}
19591   ins_pipe(pipe_slow);
19592 %}
19593 
19594 instruct insert16B(vecX dst, vecX src, iRegIorL2I val, immI idx)
19595 %{
19596   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19597   match(Set dst (VectorInsert (Binary src val) idx));
19598   ins_cost(INSN_COST);
19599   format %{ "orr    $dst, T16B, $src, $src\n\t"
19600             "mov    $dst, T16B, $idx, $val\t# insert into vector(16B)" %}
19601   ins_encode %{
19602     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
19603       __ orr(as_FloatRegister($dst$$reg), __ T16B,
19604              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
19605     }
19606     __ mov(as_FloatRegister($dst$$reg), __ T16B, $idx$$constant, $val$$Register);
19607   %}
19608   ins_pipe(pipe_slow);
19609 %}
19610 
19611 instruct insert4S(vecD dst, vecD src, iRegIorL2I val, immI idx)
19612 %{
19613   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19614   match(Set dst (VectorInsert (Binary src val) idx));
19615   ins_cost(INSN_COST);
19616   format %{ "orr    $dst, T8B, $src, $src\n\t"
19617             "mov    $dst, T4H, $idx, $val\t# insert into vector(4S)" %}
19618   ins_encode %{
19619     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
19620       __ orr(as_FloatRegister($dst$$reg), __ T8B,
19621              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
19622     }
19623     __ mov(as_FloatRegister($dst$$reg), __ T4H, $idx$$constant, $val$$Register);
19624   %}
19625   ins_pipe(pipe_slow);
19626 %}
19627 
19628 instruct insert8S(vecX dst, vecX src, iRegIorL2I val, immI idx)
19629 %{
19630   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19631   match(Set dst (VectorInsert (Binary src val) idx));
19632   ins_cost(INSN_COST);
19633   format %{ "orr    $dst, T16B, $src, $src\n\t"
19634             "mov    $dst, T8H, $idx, $val\t# insert into vector(8S)" %}
19635   ins_encode %{
19636     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
19637       __ orr(as_FloatRegister($dst$$reg), __ T16B,
19638              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
19639     }
19640     __ mov(as_FloatRegister($dst$$reg), __ T8H, $idx$$constant, $val$$Register);
19641   %}
19642   ins_pipe(pipe_slow);
19643 %}
19644 
19645 instruct insert2I(vecD dst, vecD src, iRegIorL2I val, immI idx)
19646 %{
19647   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_INT);
19648   match(Set dst (VectorInsert (Binary src val) idx));
19649   ins_cost(INSN_COST);
19650   format %{ "orr    $dst, T8B, $src, $src\n\t"
19651             "mov    $dst, T2S, $idx, $val\t# insert into vector(2I)" %}
19652   ins_encode %{
19653     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
19654       __ orr(as_FloatRegister($dst$$reg), __ T8B,
19655              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
19656     }
19657     __ mov(as_FloatRegister($dst$$reg), __ T2S, $idx$$constant, $val$$Register);
19658   %}
19659   ins_pipe(pipe_slow);
19660 %}
19661 
19662 instruct insert4I(vecX dst, vecX src, iRegIorL2I val, immI idx)
19663 %{
19664   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_INT);
19665   match(Set dst (VectorInsert (Binary src val) idx));
19666   ins_cost(INSN_COST);
19667   format %{ "orr    $dst, T16B, $src, $src\n\t"
19668             "mov    $dst, T4S, $idx, $val\t# insert into vector(4I)" %}
19669   ins_encode %{
19670     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
19671       __ orr(as_FloatRegister($dst$$reg), __ T16B,
19672              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
19673     }
19674     __ mov(as_FloatRegister($dst$$reg), __ T4S, $idx$$constant, $val$$Register);
19675   %}
19676   ins_pipe(pipe_slow);
19677 %}
19678 
19679 instruct insert2L(vecX dst, vecX src, iRegL val, immI idx)
19680 %{
19681   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19682   match(Set dst (VectorInsert (Binary src val) idx));
19683   ins_cost(INSN_COST);
19684   format %{ "orr    $dst, T16B, $src, $src\n\t"
19685             "mov    $dst, T2D, $idx, $val\t# insert into vector(2L)" %}
19686   ins_encode %{
19687     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
19688       __ orr(as_FloatRegister($dst$$reg), __ T16B,
19689              as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
19690     }
19691     __ mov(as_FloatRegister($dst$$reg), __ T2D, $idx$$constant, $val$$Register);
19692   %}
19693   ins_pipe(pipe_slow);
19694 %}
19695 
19696 instruct insert2F(vecD dst, vecD src, vRegF val, immI idx)
19697 %{
19698   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19699   match(Set dst (VectorInsert (Binary src val) idx));
19700   ins_cost(INSN_COST);
19701   effect(TEMP_DEF dst);
19702   format %{ "orr    $dst, T8B, $src, $src\n\t"
19703             "ins    $dst, S, $val, $idx, 0\t# insert into vector(2F)" %}
19704   ins_encode %{
19705     __ orr(as_FloatRegister($dst$$reg), __ T8B,
19706            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
19707     __ ins(as_FloatRegister($dst$$reg), __ S,
19708            as_FloatRegister($val$$reg), $idx$$constant, 0);
19709   %}
19710   ins_pipe(pipe_slow);
19711 %}
19712 
19713 instruct insert4F(vecX dst, vecX src, vRegF val, immI idx)
19714 %{
19715   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19716   match(Set dst (VectorInsert (Binary src val) idx));
19717   ins_cost(INSN_COST);
19718   effect(TEMP_DEF dst);
19719   format %{ "orr    $dst, T16B, $src, $src\n\t"
19720             "ins    $dst, S, $val, $idx, 0\t# insert into vector(4F)" %}
19721   ins_encode %{
19722     __ orr(as_FloatRegister($dst$$reg), __ T16B,
19723            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
19724     __ ins(as_FloatRegister($dst$$reg), __ S,
19725            as_FloatRegister($val$$reg), $idx$$constant, 0);
19726   %}
19727   ins_pipe(pipe_slow);
19728 %}
19729 
19730 instruct insert2D(vecX dst, vecX src, vRegD val, immI idx)
19731 %{
19732   predicate(n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19733   match(Set dst (VectorInsert (Binary src val) idx));
19734   ins_cost(INSN_COST);
19735   effect(TEMP_DEF dst);
19736   format %{ "orr    $dst, T16B, $src, $src\n\t"
19737             "ins    $dst, D, $val, $idx, 0\t# insert into vector(2D)" %}
19738   ins_encode %{
19739     __ orr(as_FloatRegister($dst$$reg), __ T16B,
19740            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
19741     __ ins(as_FloatRegister($dst$$reg), __ D,
19742            as_FloatRegister($val$$reg), $idx$$constant, 0);
19743   %}
19744   ins_pipe(pipe_slow);
19745 %}
19746 
19747 // ------------------------------ Vector extract ---------------------------------
19748 
19749 instruct extract8B(iRegINoSp dst, vecD src, immI idx)
19750 %{
19751   predicate(n->in(1)->bottom_type()->is_vect()->length() == 8);
19752   match(Set dst (ExtractB src idx));
19753   ins_cost(INSN_COST);
19754   format %{ "smov    $dst, $src, B, $idx\t# extract from vector(8B)" %}
19755   ins_encode %{
19756     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ B, $idx$$constant);
19757   %}
19758   ins_pipe(pipe_class_default);
19759 %}
19760 
19761 instruct extract16B(iRegINoSp dst, vecX src, immI idx)
19762 %{
19763   predicate(n->in(1)->bottom_type()->is_vect()->length() == 16);
19764   match(Set dst (ExtractB src idx));
19765   ins_cost(INSN_COST);
19766   format %{ "smov    $dst, $src, B, $idx\t# extract from vector(16B)" %}
19767   ins_encode %{
19768     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ B, $idx$$constant);
19769   %}
19770   ins_pipe(pipe_class_default);
19771 %}
19772 
19773 instruct extract4S(iRegINoSp dst, vecD src, immI idx)
19774 %{
19775   predicate(n->in(1)->bottom_type()->is_vect()->length() == 4);
19776   match(Set dst (ExtractS src idx));
19777   ins_cost(INSN_COST);
19778   format %{ "smov    $dst, $src, H, $idx\t# extract from vector(4S)" %}
19779   ins_encode %{
19780     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ H, $idx$$constant);
19781   %}
19782   ins_pipe(pipe_class_default);
19783 %}
19784 
19785 instruct extract8S(iRegINoSp dst, vecX src, immI idx)
19786 %{
19787   predicate(n->in(1)->bottom_type()->is_vect()->length() == 8);
19788   match(Set dst (ExtractS src idx));
19789   ins_cost(INSN_COST);
19790   format %{ "smov    $dst, $src, H, $idx\t# extract from vector(8S)" %}
19791   ins_encode %{
19792     __ smov($dst$$Register, as_FloatRegister($src$$reg), __ H, $idx$$constant);
19793   %}
19794   ins_pipe(pipe_class_default);
19795 %}
19796 
19797 instruct extract2I(iRegINoSp dst, vecD src, immI idx)
19798 %{
19799   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
19800   match(Set dst (ExtractI src idx));
19801   ins_cost(INSN_COST);
19802   format %{ "umov    $dst, $src, S, $idx\t# extract from vector(2I)" %}
19803   ins_encode %{
19804     __ umov($dst$$Register, as_FloatRegister($src$$reg), __ S, $idx$$constant);
19805   %}
19806   ins_pipe(pipe_class_default);
19807 %}
19808 
19809 instruct extract4I(iRegINoSp dst, vecX src, immI idx)
19810 %{
19811   predicate(n->in(1)->bottom_type()->is_vect()->length() == 4);
19812   match(Set dst (ExtractI src idx));
19813   ins_cost(INSN_COST);
19814   format %{ "umov    $dst, $src, S, $idx\t# extract from vector(4I)" %}
19815   ins_encode %{
19816     __ umov($dst$$Register, as_FloatRegister($src$$reg), __ S, $idx$$constant);
19817   %}
19818   ins_pipe(pipe_class_default);
19819 %}
19820 
19821 instruct extract2L(iRegLNoSp dst, vecX src, immI idx)
19822 %{
19823   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
19824   match(Set dst (ExtractL src idx));
19825   ins_cost(INSN_COST);
19826   format %{ "umov    $dst, $src, D, $idx\t# extract from vector(2L)" %}
19827   ins_encode %{
19828     __ umov($dst$$Register, as_FloatRegister($src$$reg), __ D, $idx$$constant);
19829   %}
19830   ins_pipe(pipe_class_default);
19831 %}
19832 
19833 instruct extract2F(vRegF dst, vecD src, immI idx)
19834 %{
19835   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
19836   match(Set dst (ExtractF src idx));
19837   ins_cost(INSN_COST);
19838   format %{ "ins   $dst, S, $src, 0, $idx\t# extract from vector(2F)" %}
19839   ins_encode %{
19840     __ ins(as_FloatRegister($dst$$reg), __ S,
19841            as_FloatRegister($src$$reg), 0, $idx$$constant);
19842   %}
19843   ins_pipe(pipe_class_default);
19844 %}
19845 
19846 instruct extract4F(vRegF dst, vecX src, immI idx)
19847 %{
19848   predicate(n->in(1)->bottom_type()->is_vect()->length() == 4);
19849   match(Set dst (ExtractF src idx));
19850   ins_cost(INSN_COST);
19851   format %{ "ins   $dst, S, $src, 0, $idx\t# extract from vector(4F)" %}
19852   ins_encode %{
19853     __ ins(as_FloatRegister($dst$$reg), __ S,
19854            as_FloatRegister($src$$reg), 0, $idx$$constant);
19855   %}
19856   ins_pipe(pipe_class_default);
19857 %}
19858 
19859 instruct extract2D(vRegD dst, vecX src, immI idx)
19860 %{
19861   predicate(n->in(1)->bottom_type()->is_vect()->length() == 2);
19862   match(Set dst (ExtractD src idx));
19863   ins_cost(INSN_COST);
19864   format %{ "ins   $dst, D, $src, 0, $idx\t# extract from vector(2D)" %}
19865   ins_encode %{
19866     __ ins(as_FloatRegister($dst$$reg), __ D,
19867            as_FloatRegister($src$$reg), 0, $idx$$constant);
19868   %}
19869   ins_pipe(pipe_class_default);
19870 %}
19871 
19872 // ------------------------------ Vector comparison ---------------------------------
19873 
19874 instruct vcmeq8B(vecD dst, vecD src1, vecD src2, immI cond)
19875 %{
19876   predicate(n->as_Vector()->length() == 8 &&
19877             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19878             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19879   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19880   format %{ "cmeq  $dst, $src1, $src2\t# vector cmp (8B)" %}
19881   ins_cost(INSN_COST);
19882   ins_encode %{
19883     __ cmeq(as_FloatRegister($dst$$reg), __ T8B,
19884             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19885   %}
19886   ins_pipe(vdop64);
19887 %}
19888 
19889 instruct vcmeq16B(vecX dst, vecX src1, vecX src2, immI cond)
19890 %{
19891   predicate(n->as_Vector()->length() == 16 &&
19892             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19893             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19894   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19895   format %{ "cmeq  $dst, $src1, $src2\t# vector cmp (16B)" %}
19896   ins_cost(INSN_COST);
19897   ins_encode %{
19898     __ cmeq(as_FloatRegister($dst$$reg), __ T16B,
19899             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19900   %}
19901   ins_pipe(vdop128);
19902 %}
19903 
19904 instruct vcmeq4S(vecD dst, vecD src1, vecD src2, immI cond)
19905 %{
19906   predicate(n->as_Vector()->length() == 4 &&
19907             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19908             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19909   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19910   format %{ "cmeq  $dst, $src1, $src2\t# vector cmp (4S)" %}
19911   ins_cost(INSN_COST);
19912   ins_encode %{
19913     __ cmeq(as_FloatRegister($dst$$reg), __ T4H,
19914             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19915   %}
19916   ins_pipe(vdop64);
19917 %}
19918 
19919 instruct vcmeq8S(vecX dst, vecX src1, vecX src2, immI cond)
19920 %{
19921   predicate(n->as_Vector()->length() == 8 &&
19922             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19923             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19924   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19925   format %{ "cmeq  $dst, $src1, $src2\t# vector cmp (8S)" %}
19926   ins_cost(INSN_COST);
19927   ins_encode %{
19928     __ cmeq(as_FloatRegister($dst$$reg), __ T8H,
19929             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19930   %}
19931   ins_pipe(vdop128);
19932 %}
19933 
19934 instruct vcmeq2I(vecD dst, vecD src1, vecD src2, immI cond)
19935 %{
19936   predicate(n->as_Vector()->length() == 2 &&
19937             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19938             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19939   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19940   format %{ "cmeq  $dst, $src1, $src2\t# vector cmp (2I)" %}
19941   ins_cost(INSN_COST);
19942   ins_encode %{
19943     __ cmeq(as_FloatRegister($dst$$reg), __ T2S,
19944             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19945   %}
19946   ins_pipe(vdop64);
19947 %}
19948 
19949 instruct vcmeq4I(vecX dst, vecX src1, vecX src2, immI cond)
19950 %{
19951   predicate(n->as_Vector()->length() == 4 &&
19952             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19953             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19954   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19955   format %{ "cmeq  $dst, $src1, $src2\t# vector cmp (4I)" %}
19956   ins_cost(INSN_COST);
19957   ins_encode %{
19958     __ cmeq(as_FloatRegister($dst$$reg), __ T4S,
19959             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19960   %}
19961   ins_pipe(vdop128);
19962 %}
19963 
19964 instruct vcmeq2L(vecX dst, vecX src1, vecX src2, immI cond)
19965 %{
19966   predicate(n->as_Vector()->length() == 2 &&
19967             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19968             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19969   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19970   format %{ "cmeq  $dst, $src1, $src2\t# vector cmp (2L)" %}
19971   ins_cost(INSN_COST);
19972   ins_encode %{
19973     __ cmeq(as_FloatRegister($dst$$reg), __ T2D,
19974             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19975   %}
19976   ins_pipe(vdop128);
19977 %}
19978 
19979 instruct vcmeq2F(vecD dst, vecD src1, vecD src2, immI cond)
19980 %{
19981   predicate(n->as_Vector()->length() == 2 &&
19982             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19983             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19984   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
19985   format %{ "fcmeq  $dst, $src1, $src2\t# vector cmp (2F)" %}
19986   ins_cost(INSN_COST);
19987   ins_encode %{
19988     __ fcmeq(as_FloatRegister($dst$$reg), __ T2S,
19989             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19990   %}
19991   ins_pipe(vdop64);
19992 %}
19993 
19994 instruct vcmeq4F(vecX dst, vecX src1, vecX src2, immI cond)
19995 %{
19996   predicate(n->as_Vector()->length() == 4 &&
19997             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
19998             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19999   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20000   format %{ "fcmeq  $dst, $src1, $src2\t# vector cmp (4F)" %}
20001   ins_cost(INSN_COST);
20002   ins_encode %{
20003     __ fcmeq(as_FloatRegister($dst$$reg), __ T4S,
20004             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20005   %}
20006   ins_pipe(vdop128);
20007 %}
20008 
20009 instruct vcmeq2D(vecX dst, vecX src1, vecX src2, immI cond)
20010 %{
20011   predicate(n->as_Vector()->length() == 2 &&
20012             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
20013             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
20014   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20015   format %{ "fcmeq  $dst, $src1, $src2\t# vector cmp (2D)" %}
20016   ins_cost(INSN_COST);
20017   ins_encode %{
20018     __ fcmeq(as_FloatRegister($dst$$reg), __ T2D,
20019             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20020   %}
20021   ins_pipe(vdop128);
20022 %}
20023 
20024 instruct vcmgt8B(vecD dst, vecD src1, vecD src2, immI cond)
20025 %{
20026   predicate(n->as_Vector()->length() == 8 &&
20027             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20028             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20029   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20030   format %{ "cmgt  $dst, $src1, $src2\t# vector cmp (8B)" %}
20031   ins_cost(INSN_COST);
20032   ins_encode %{
20033     __ cmgt(as_FloatRegister($dst$$reg), __ T8B,
20034             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20035   %}
20036   ins_pipe(vdop64);
20037 %}
20038 
20039 instruct vcmgt16B(vecX dst, vecX src1, vecX src2, immI cond)
20040 %{
20041   predicate(n->as_Vector()->length() == 16 &&
20042             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20043             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20044   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20045   format %{ "cmgt  $dst, $src1, $src2\t# vector cmp (16B)" %}
20046   ins_cost(INSN_COST);
20047   ins_encode %{
20048     __ cmgt(as_FloatRegister($dst$$reg), __ T16B,
20049             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20050   %}
20051   ins_pipe(vdop128);
20052 %}
20053 
20054 instruct vcmgt4S(vecD dst, vecD src1, vecD src2, immI cond)
20055 %{
20056   predicate(n->as_Vector()->length() == 4 &&
20057             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20058             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20059   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20060   format %{ "cmgt  $dst, $src1, $src2\t# vector cmp (4S)" %}
20061   ins_cost(INSN_COST);
20062   ins_encode %{
20063     __ cmgt(as_FloatRegister($dst$$reg), __ T4H,
20064             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20065   %}
20066   ins_pipe(vdop64);
20067 %}
20068 
20069 instruct vcmgt8S(vecX dst, vecX src1, vecX src2, immI cond)
20070 %{
20071   predicate(n->as_Vector()->length() == 8 &&
20072             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20073             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20074   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20075   format %{ "cmgt  $dst, $src1, $src2\t# vector cmp (8S)" %}
20076   ins_cost(INSN_COST);
20077   ins_encode %{
20078     __ cmgt(as_FloatRegister($dst$$reg), __ T8H,
20079             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20080   %}
20081   ins_pipe(vdop128);
20082 %}
20083 
20084 instruct vcmgt2I(vecD dst, vecD src1, vecD src2, immI cond)
20085 %{
20086   predicate(n->as_Vector()->length() == 2 &&
20087             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20088             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20089   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20090   format %{ "cmgt  $dst, $src1, $src2\t# vector cmp (2I)" %}
20091   ins_cost(INSN_COST);
20092   ins_encode %{
20093     __ cmgt(as_FloatRegister($dst$$reg), __ T2S,
20094             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20095   %}
20096   ins_pipe(vdop64);
20097 %}
20098 
20099 instruct vcmgt4I(vecX dst, vecX src1, vecX src2, immI cond)
20100 %{
20101   predicate(n->as_Vector()->length() == 4 &&
20102             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20103             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20104   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20105   format %{ "cmgt  $dst, $src1, $src2\t# vector cmp (4I)" %}
20106   ins_cost(INSN_COST);
20107   ins_encode %{
20108     __ cmgt(as_FloatRegister($dst$$reg), __ T4S,
20109             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20110   %}
20111   ins_pipe(vdop128);
20112 %}
20113 
20114 instruct vcmgt2L(vecX dst, vecX src1, vecX src2, immI cond)
20115 %{
20116   predicate(n->as_Vector()->length() == 2 &&
20117             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20118             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
20119   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20120   format %{ "cmgt  $dst, $src1, $src2\t# vector cmp (2L)" %}
20121   ins_cost(INSN_COST);
20122   ins_encode %{
20123     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
20124             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20125   %}
20126   ins_pipe(vdop128);
20127 %}
20128 
20129 instruct vcmgt2F(vecD dst, vecD src1, vecD src2, immI cond)
20130 %{
20131   predicate(n->as_Vector()->length() == 2 &&
20132             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20133             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20134   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20135   format %{ "fcmgt  $dst, $src1, $src2\t# vector cmp (2F)" %}
20136   ins_cost(INSN_COST);
20137   ins_encode %{
20138     __ fcmgt(as_FloatRegister($dst$$reg), __ T2S,
20139             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20140   %}
20141   ins_pipe(vdop64);
20142 %}
20143 
20144 instruct vcmgt4F(vecX dst, vecX src1, vecX src2, immI cond)
20145 %{
20146   predicate(n->as_Vector()->length() == 4 &&
20147             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20148             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20149   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20150   format %{ "fcmgt  $dst, $src1, $src2\t# vector cmp (4F)" %}
20151   ins_cost(INSN_COST);
20152   ins_encode %{
20153     __ fcmgt(as_FloatRegister($dst$$reg), __ T4S,
20154             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20155   %}
20156   ins_pipe(vdop128);
20157 %}
20158 
20159 instruct vcmgt2D(vecX dst, vecX src1, vecX src2, immI cond)
20160 %{
20161   predicate(n->as_Vector()->length() == 2 &&
20162             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
20163             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
20164   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20165   format %{ "fcmgt  $dst, $src1, $src2\t# vector cmp (2D)" %}
20166   ins_cost(INSN_COST);
20167   ins_encode %{
20168     __ fcmgt(as_FloatRegister($dst$$reg), __ T2D,
20169             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20170   %}
20171   ins_pipe(vdop128);
20172 %}
20173 
20174 instruct vcmge8B(vecD dst, vecD src1, vecD src2, immI cond)
20175 %{
20176   predicate(n->as_Vector()->length() == 8 &&
20177             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20178             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20179   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20180   format %{ "cmge  $dst, $src1, $src2\t# vector cmp (8B)" %}
20181   ins_cost(INSN_COST);
20182   ins_encode %{
20183     __ cmge(as_FloatRegister($dst$$reg), __ T8B,
20184             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20185   %}
20186   ins_pipe(vdop64);
20187 %}
20188 
20189 instruct vcmge16B(vecX dst, vecX src1, vecX src2, immI cond)
20190 %{
20191   predicate(n->as_Vector()->length() == 16 &&
20192             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20193             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20194   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20195   format %{ "cmge  $dst, $src1, $src2\t# vector cmp (16B)" %}
20196   ins_cost(INSN_COST);
20197   ins_encode %{
20198     __ cmge(as_FloatRegister($dst$$reg), __ T16B,
20199             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20200   %}
20201   ins_pipe(vdop128);
20202 %}
20203 
20204 instruct vcmge4S(vecD dst, vecD src1, vecD src2, immI cond)
20205 %{
20206   predicate(n->as_Vector()->length() == 4 &&
20207             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20208             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20209   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20210   format %{ "cmge  $dst, $src1, $src2\t# vector cmp (4S)" %}
20211   ins_cost(INSN_COST);
20212   ins_encode %{
20213     __ cmge(as_FloatRegister($dst$$reg), __ T4H,
20214             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20215   %}
20216   ins_pipe(vdop64);
20217 %}
20218 
20219 instruct vcmge8S(vecX dst, vecX src1, vecX src2, immI cond)
20220 %{
20221   predicate(n->as_Vector()->length() == 8 &&
20222             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20223             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20224   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20225   format %{ "cmge  $dst, $src1, $src2\t# vector cmp (8S)" %}
20226   ins_cost(INSN_COST);
20227   ins_encode %{
20228     __ cmge(as_FloatRegister($dst$$reg), __ T8H,
20229             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20230   %}
20231   ins_pipe(vdop128);
20232 %}
20233 
20234 instruct vcmge2I(vecD dst, vecD src1, vecD src2, immI cond)
20235 %{
20236   predicate(n->as_Vector()->length() == 2 &&
20237             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20238             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20239   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20240   format %{ "cmge  $dst, $src1, $src2\t# vector cmp (2I)" %}
20241   ins_cost(INSN_COST);
20242   ins_encode %{
20243     __ cmge(as_FloatRegister($dst$$reg), __ T2S,
20244             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20245   %}
20246   ins_pipe(vdop64);
20247 %}
20248 
20249 instruct vcmge4I(vecX dst, vecX src1, vecX src2, immI cond)
20250 %{
20251   predicate(n->as_Vector()->length() == 4 &&
20252             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20253             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20254   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20255   format %{ "cmge  $dst, $src1, $src2\t# vector cmp (4I)" %}
20256   ins_cost(INSN_COST);
20257   ins_encode %{
20258     __ cmge(as_FloatRegister($dst$$reg), __ T4S,
20259             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20260   %}
20261   ins_pipe(vdop128);
20262 %}
20263 
20264 instruct vcmge2L(vecX dst, vecX src1, vecX src2, immI cond)
20265 %{
20266   predicate(n->as_Vector()->length() == 2 &&
20267             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20268             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
20269   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20270   format %{ "cmge  $dst, $src1, $src2\t# vector cmp (2L)" %}
20271   ins_cost(INSN_COST);
20272   ins_encode %{
20273     __ cmge(as_FloatRegister($dst$$reg), __ T2D,
20274             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20275   %}
20276   ins_pipe(vdop128);
20277 %}
20278 
20279 instruct vcmge2F(vecD dst, vecD src1, vecD src2, immI cond)
20280 %{
20281   predicate(n->as_Vector()->length() == 2 &&
20282             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20283             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20284   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20285   format %{ "fcmge  $dst, $src1, $src2\t# vector cmp (2F)" %}
20286   ins_cost(INSN_COST);
20287   ins_encode %{
20288     __ fcmge(as_FloatRegister($dst$$reg), __ T2S,
20289             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20290   %}
20291   ins_pipe(vdop64);
20292 %}
20293 
20294 instruct vcmge4F(vecX dst, vecX src1, vecX src2, immI cond)
20295 %{
20296   predicate(n->as_Vector()->length() == 4 &&
20297             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20298             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20299   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20300   format %{ "fcmge  $dst, $src1, $src2\t# vector cmp (4F)" %}
20301   ins_cost(INSN_COST);
20302   ins_encode %{
20303     __ fcmge(as_FloatRegister($dst$$reg), __ T4S,
20304             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20305   %}
20306   ins_pipe(vdop128);
20307 %}
20308 
20309 instruct vcmge2D(vecX dst, vecX src1, vecX src2, immI cond)
20310 %{
20311   predicate(n->as_Vector()->length() == 2 &&
20312             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
20313             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
20314   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20315   format %{ "fcmge  $dst, $src1, $src2\t# vector cmp (2D)" %}
20316   ins_cost(INSN_COST);
20317   ins_encode %{
20318     __ fcmge(as_FloatRegister($dst$$reg), __ T2D,
20319             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20320   %}
20321   ins_pipe(vdop128);
20322 %}
20323 
20324 instruct vcmne8B(vecD dst, vecD src1, vecD src2, immI cond)
20325 %{
20326   predicate(n->as_Vector()->length() == 8 &&
20327             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
20328             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20329   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20330   format %{ "cmeq  $dst, $src1, $src2\n\t# vector cmp (8B)"
20331             "not   $dst, $dst\t" %}
20332   ins_cost(INSN_COST);
20333   ins_encode %{
20334     __ cmeq(as_FloatRegister($dst$$reg), __ T8B,
20335             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20336     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20337   %}
20338   ins_pipe(pipe_slow);
20339 %}
20340 
20341 instruct vcmne16B(vecX dst, vecX src1, vecX src2, immI cond)
20342 %{
20343   predicate(n->as_Vector()->length() == 16 &&
20344             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
20345             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20346   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20347   format %{ "cmeq  $dst, $src1, $src2\n\t# vector cmp (16B)"
20348             "not   $dst, $dst\t" %}
20349   ins_cost(INSN_COST);
20350   ins_encode %{
20351     __ cmeq(as_FloatRegister($dst$$reg), __ T16B,
20352             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20353     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
20354   %}
20355   ins_pipe(pipe_slow);
20356 %}
20357 
20358 instruct vcmne4S(vecD dst, vecD src1, vecD src2, immI cond)
20359 %{
20360   predicate(n->as_Vector()->length() == 4 &&
20361             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
20362             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20363   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20364   format %{ "cmeq  $dst, $src1, $src2\n\t# vector cmp (4S)"
20365             "not   $dst, $dst\t" %}
20366   ins_cost(INSN_COST);
20367   ins_encode %{
20368     __ cmeq(as_FloatRegister($dst$$reg), __ T4H,
20369             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20370     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20371   %}
20372   ins_pipe(pipe_slow);
20373 %}
20374 
20375 instruct vcmne8S(vecX dst, vecX src1, vecX src2, immI cond)
20376 %{
20377   predicate(n->as_Vector()->length() == 8 &&
20378             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
20379             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20380   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20381   format %{ "cmeq  $dst, $src1, $src2\n\t# vector cmp (8S)"
20382             "not   $dst, $dst\t" %}
20383   ins_cost(INSN_COST);
20384   ins_encode %{
20385     __ cmeq(as_FloatRegister($dst$$reg), __ T8H,
20386             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20387     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
20388   %}
20389   ins_pipe(pipe_slow);
20390 %}
20391 
20392 instruct vcmne2I(vecD dst, vecD src1, vecD src2, immI cond)
20393 %{
20394   predicate(n->as_Vector()->length() == 2 &&
20395             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
20396             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20397   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20398   format %{ "cmeq  $dst, $src1, $src2\n\t# vector cmp (2I)"
20399             "not   $dst, $dst\t" %}
20400   ins_cost(INSN_COST);
20401   ins_encode %{
20402     __ cmeq(as_FloatRegister($dst$$reg), __ T2S,
20403             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20404     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20405   %}
20406   ins_pipe(pipe_slow);
20407 %}
20408 
20409 instruct vcmne4I(vecX dst, vecX src1, vecX src2, immI cond)
20410 %{
20411   predicate(n->as_Vector()->length() == 4 &&
20412             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
20413             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20414   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20415   format %{ "cmeq  $dst, $src1, $src2\n\t# vector cmp (4I)"
20416             "not   $dst, $dst\t" %}
20417   ins_cost(INSN_COST);
20418   ins_encode %{
20419     __ cmeq(as_FloatRegister($dst$$reg), __ T4S,
20420             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20421     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
20422   %}
20423   ins_pipe(pipe_slow);
20424 %}
20425 
20426 instruct vcmne2L(vecX dst, vecX src1, vecX src2, immI cond)
20427 %{
20428   predicate(n->as_Vector()->length() == 2 &&
20429             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
20430             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
20431   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20432   format %{ "cmeq  $dst, $src1, $src2\n\t# vector cmp (2L)"
20433             "not   $dst, $dst\t" %}
20434   ins_cost(INSN_COST);
20435   ins_encode %{
20436     __ cmeq(as_FloatRegister($dst$$reg), __ T2D,
20437             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20438     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
20439   %}
20440   ins_pipe(pipe_slow);
20441 %}
20442 
20443 instruct vcmne2F(vecD dst, vecD src1, vecD src2, immI cond)
20444 %{
20445   predicate(n->as_Vector()->length() == 2 &&
20446             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
20447             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20448   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20449   format %{ "fcmeq  $dst, $src1, $src2\n\t# vector cmp (2F)"
20450             "not   $dst, $dst\t" %}
20451   ins_cost(INSN_COST);
20452   ins_encode %{
20453     __ fcmeq(as_FloatRegister($dst$$reg), __ T2S,
20454             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20455     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
20456   %}
20457   ins_pipe(pipe_slow);
20458 %}
20459 
20460 instruct vcmne4F(vecX dst, vecX src1, vecX src2, immI cond)
20461 %{
20462   predicate(n->as_Vector()->length() == 4 &&
20463             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
20464             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20465   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20466   format %{ "fcmeq  $dst, $src1, $src2\n\t# vector cmp (4F)"
20467             "not   $dst, $dst\t" %}
20468   ins_cost(INSN_COST);
20469   ins_encode %{
20470     __ fcmeq(as_FloatRegister($dst$$reg), __ T4S,
20471             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20472     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
20473   %}
20474   ins_pipe(pipe_slow);
20475 %}
20476 
20477 instruct vcmne2D(vecX dst, vecX src1, vecX src2, immI cond)
20478 %{
20479   predicate(n->as_Vector()->length() == 2 &&
20480             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
20481             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
20482   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20483   format %{ "fcmeq  $dst, $src1, $src2\n\t# vector cmp (2D)"
20484             "not   $dst, $dst\t" %}
20485   ins_cost(INSN_COST);
20486   ins_encode %{
20487     __ fcmeq(as_FloatRegister($dst$$reg), __ T2D,
20488             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
20489     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
20490   %}
20491   ins_pipe(pipe_slow);
20492 %}
20493 
20494 instruct vcmlt8B(vecD dst, vecD src1, vecD src2, immI cond)
20495 %{
20496   predicate(n->as_Vector()->length() == 8 &&
20497             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
20498             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20499   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20500   format %{ "cmgt  $dst, $src2, $src1\t# vector cmp (8B)" %}
20501   ins_cost(INSN_COST);
20502   ins_encode %{
20503     __ cmgt(as_FloatRegister($dst$$reg), __ T8B,
20504             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20505   %}
20506   ins_pipe(vdop64);
20507 %}
20508 
20509 instruct vcmlt16B(vecX dst, vecX src1, vecX src2, immI cond)
20510 %{
20511   predicate(n->as_Vector()->length() == 16 &&
20512             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
20513             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20514   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20515   format %{ "cmgt  $dst, $src2, $src1\t# vector cmp (16B)" %}
20516   ins_cost(INSN_COST);
20517   ins_encode %{
20518     __ cmgt(as_FloatRegister($dst$$reg), __ T16B,
20519             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20520   %}
20521   ins_pipe(vdop128);
20522 %}
20523 
20524 instruct vcmlt4S(vecD dst, vecD src1, vecD src2, immI cond)
20525 %{
20526   predicate(n->as_Vector()->length() == 4 &&
20527             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
20528             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20529   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20530   format %{ "cmgt  $dst, $src2, $src1\t# vector cmp (4S)" %}
20531   ins_cost(INSN_COST);
20532   ins_encode %{
20533     __ cmgt(as_FloatRegister($dst$$reg), __ T4H,
20534             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20535   %}
20536   ins_pipe(vdop64);
20537 %}
20538 
20539 instruct vcmlt8S(vecX dst, vecX src1, vecX src2, immI cond)
20540 %{
20541   predicate(n->as_Vector()->length() == 8 &&
20542             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
20543             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20544   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20545   format %{ "cmgt  $dst, $src2, $src1\t# vector cmp (8S)" %}
20546   ins_cost(INSN_COST);
20547   ins_encode %{
20548     __ cmgt(as_FloatRegister($dst$$reg), __ T8H,
20549             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20550   %}
20551   ins_pipe(vdop128);
20552 %}
20553 
20554 instruct vcmlt2I(vecD dst, vecD src1, vecD src2, immI cond)
20555 %{
20556   predicate(n->as_Vector()->length() == 2 &&
20557             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
20558             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20559   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20560   format %{ "cmgt  $dst, $src2, $src1\t# vector cmp (2I)" %}
20561   ins_cost(INSN_COST);
20562   ins_encode %{
20563     __ cmgt(as_FloatRegister($dst$$reg), __ T2S,
20564             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20565   %}
20566   ins_pipe(vdop64);
20567 %}
20568 
20569 instruct vcmlt4I(vecX dst, vecX src1, vecX src2, immI cond)
20570 %{
20571   predicate(n->as_Vector()->length() == 4 &&
20572             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
20573             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20574   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20575   format %{ "cmgt  $dst, $src2, $src1\t# vector cmp (4I)" %}
20576   ins_cost(INSN_COST);
20577   ins_encode %{
20578     __ cmgt(as_FloatRegister($dst$$reg), __ T4S,
20579             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20580   %}
20581   ins_pipe(vdop128);
20582 %}
20583 
20584 instruct vcmlt2L(vecX dst, vecX src1, vecX src2, immI cond)
20585 %{
20586   predicate(n->as_Vector()->length() == 2 &&
20587             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
20588             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
20589   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20590   format %{ "cmgt  $dst, $src2, $src1\t# vector cmp (2L)" %}
20591   ins_cost(INSN_COST);
20592   ins_encode %{
20593     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
20594             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20595   %}
20596   ins_pipe(vdop128);
20597 %}
20598 
20599 instruct vcmlt2F(vecD dst, vecD src1, vecD src2, immI cond)
20600 %{
20601   predicate(n->as_Vector()->length() == 2 &&
20602             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
20603             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20604   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20605   format %{ "fcmgt  $dst, $src2, $src1\t# vector cmp (2F)" %}
20606   ins_cost(INSN_COST);
20607   ins_encode %{
20608     __ fcmgt(as_FloatRegister($dst$$reg), __ T2S,
20609             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20610   %}
20611   ins_pipe(vdop64);
20612 %}
20613 
20614 instruct vcmlt4F(vecX dst, vecX src1, vecX src2, immI cond)
20615 %{
20616   predicate(n->as_Vector()->length() == 4 &&
20617             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
20618             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20619   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20620   format %{ "fcmgt  $dst, $src2, $src1\t# vector cmp (4F)" %}
20621   ins_cost(INSN_COST);
20622   ins_encode %{
20623     __ fcmgt(as_FloatRegister($dst$$reg), __ T4S,
20624             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20625   %}
20626   ins_pipe(vdop128);
20627 %}
20628 
20629 instruct vcmlt2D(vecX dst, vecX src1, vecX src2, immI cond)
20630 %{
20631   predicate(n->as_Vector()->length() == 2 &&
20632             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
20633             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
20634   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20635   format %{ "fcmgt  $dst, $src2, $src1\t# vector cmp (2D)" %}
20636   ins_cost(INSN_COST);
20637   ins_encode %{
20638     __ fcmgt(as_FloatRegister($dst$$reg), __ T2D,
20639             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20640   %}
20641   ins_pipe(vdop128);
20642 %}
20643 
20644 instruct vcmle8B(vecD dst, vecD src1, vecD src2, immI cond)
20645 %{
20646   predicate(n->as_Vector()->length() == 8 &&
20647             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20648             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20649   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20650   format %{ "cmge  $dst, $src2, $src1\t# vector cmp (8B)" %}
20651   ins_cost(INSN_COST);
20652   ins_encode %{
20653     __ cmge(as_FloatRegister($dst$$reg), __ T8B,
20654             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20655   %}
20656   ins_pipe(vdop64);
20657 %}
20658 
20659 instruct vcmle16B(vecX dst, vecX src1, vecX src2, immI cond)
20660 %{
20661   predicate(n->as_Vector()->length() == 16 &&
20662             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20663             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20664   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20665   format %{ "cmge  $dst, $src2, $src1\t# vector cmp (16B)" %}
20666   ins_cost(INSN_COST);
20667   ins_encode %{
20668     __ cmge(as_FloatRegister($dst$$reg), __ T16B,
20669             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20670   %}
20671   ins_pipe(vdop128);
20672 %}
20673 
20674 instruct vcmle4S(vecD dst, vecD src1, vecD src2, immI cond)
20675 %{
20676   predicate(n->as_Vector()->length() == 4 &&
20677             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20678             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20679   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20680   format %{ "cmge  $dst, $src2, $src1\t# vector cmp (4S)" %}
20681   ins_cost(INSN_COST);
20682   ins_encode %{
20683     __ cmge(as_FloatRegister($dst$$reg), __ T4H,
20684             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20685   %}
20686   ins_pipe(vdop64);
20687 %}
20688 
20689 instruct vcmle8S(vecX dst, vecX src1, vecX src2, immI cond)
20690 %{
20691   predicate(n->as_Vector()->length() == 8 &&
20692             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20693             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20694   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20695   format %{ "cmge  $dst, $src2, $src1\t# vector cmp (8S)" %}
20696   ins_cost(INSN_COST);
20697   ins_encode %{
20698     __ cmge(as_FloatRegister($dst$$reg), __ T8H,
20699             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20700   %}
20701   ins_pipe(vdop128);
20702 %}
20703 
20704 instruct vcmle2I(vecD dst, vecD src1, vecD src2, immI cond)
20705 %{
20706   predicate(n->as_Vector()->length() == 2 &&
20707             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20708             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20709   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20710   format %{ "cmge  $dst, $src2, $src1\t# vector cmp (2I)" %}
20711   ins_cost(INSN_COST);
20712   ins_encode %{
20713     __ cmge(as_FloatRegister($dst$$reg), __ T2S,
20714             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20715   %}
20716   ins_pipe(vdop64);
20717 %}
20718 
20719 instruct vcmle4I(vecX dst, vecX src1, vecX src2, immI cond)
20720 %{
20721   predicate(n->as_Vector()->length() == 4 &&
20722             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20723             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
20724   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20725   format %{ "cmge  $dst, $src2, $src1\t# vector cmp (4I)" %}
20726   ins_cost(INSN_COST);
20727   ins_encode %{
20728     __ cmge(as_FloatRegister($dst$$reg), __ T4S,
20729             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20730   %}
20731   ins_pipe(vdop128);
20732 %}
20733 
20734 instruct vcmle2L(vecX dst, vecX src1, vecX src2, immI cond)
20735 %{
20736   predicate(n->as_Vector()->length() == 2 &&
20737             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20738             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
20739   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20740   format %{ "cmge  $dst, $src2, $src1\t# vector cmp (2L)" %}
20741   ins_cost(INSN_COST);
20742   ins_encode %{
20743     __ cmge(as_FloatRegister($dst$$reg), __ T2D,
20744             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20745   %}
20746   ins_pipe(vdop128);
20747 %}
20748 
20749 instruct vcmle2F(vecD dst, vecD src1, vecD src2, immI cond)
20750 %{
20751   predicate(n->as_Vector()->length() == 2 &&
20752             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20753             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20754   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20755   format %{ "fcmge  $dst, $src2, $src1\t# vector cmp (2F)" %}
20756   ins_cost(INSN_COST);
20757   ins_encode %{
20758     __ fcmge(as_FloatRegister($dst$$reg), __ T2S,
20759             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20760   %}
20761   ins_pipe(vdop64);
20762 %}
20763 
20764 instruct vcmle4F(vecX dst, vecX src1, vecX src2, immI cond)
20765 %{
20766   predicate(n->as_Vector()->length() == 4 &&
20767             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20768             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
20769   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20770   format %{ "fcmge  $dst, $src2, $src1\t# vector cmp (4F)" %}
20771   ins_cost(INSN_COST);
20772   ins_encode %{
20773     __ fcmge(as_FloatRegister($dst$$reg), __ T4S,
20774             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20775   %}
20776   ins_pipe(vdop128);
20777 %}
20778 
20779 instruct vcmle2D(vecX dst, vecX src1, vecX src2, immI cond)
20780 %{
20781   predicate(n->as_Vector()->length() == 2 &&
20782             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
20783             n->in(1)->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
20784   match(Set dst (VectorMaskCmp (Binary src1 src2) cond));
20785   format %{ "fcmge  $dst, $src2, $src1\t# vector cmp (2D)" %}
20786   ins_cost(INSN_COST);
20787   ins_encode %{
20788     __ fcmge(as_FloatRegister($dst$$reg), __ T2D,
20789             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
20790   %}
20791   ins_pipe(vdop128);
20792 %}
20793 
20794 // ------------------------------ Vector mul -----------------------------------
20795 
20796 instruct vmul2L(vecX dst, vecX src1, vecX src2, iRegLNoSp tmp1, iRegLNoSp tmp2)
20797 %{
20798   predicate(n->as_Vector()->length() == 2);
20799   match(Set dst (MulVL src1 src2));
20800   ins_cost(INSN_COST);
20801   effect(TEMP tmp1, TEMP tmp2);
20802   format %{ "umov   $tmp1, $src1, D, 0\n\t"
20803             "umov   $tmp2, $src2, D, 0\n\t"
20804             "mul    $tmp2, $tmp2, $tmp1\n\t"
20805             "mov    $dst,  T2D,   0, $tmp2\t# insert into vector(2L)\n\t"
20806             "umov   $tmp1, $src1, D, 1\n\t"
20807             "umov   $tmp2, $src2, D, 1\n\t"
20808             "mul    $tmp2, $tmp2, $tmp1\n\t"
20809             "mov    $dst,  T2D,   1, $tmp2\t# insert into vector(2L)\n\t"
20810   %}
20811   ins_encode %{
20812     __ umov($tmp1$$Register, as_FloatRegister($src1$$reg), __ D, 0);
20813     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ D, 0);
20814     __ mul(as_Register($tmp2$$reg), as_Register($tmp2$$reg), as_Register($tmp1$$reg));
20815     __ mov(as_FloatRegister($dst$$reg), __ T2D, 0, $tmp2$$Register);
20816     __ umov($tmp1$$Register, as_FloatRegister($src1$$reg), __ D, 1);
20817     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ D, 1);
20818     __ mul(as_Register($tmp2$$reg), as_Register($tmp2$$reg), as_Register($tmp1$$reg));
20819     __ mov(as_FloatRegister($dst$$reg), __ T2D, 1, $tmp2$$Register);
20820   %}
20821   ins_pipe(pipe_slow);
20822 %}
20823 
20824 // --------------------------------- Vector not --------------------------------
20825 
20826 instruct vnot2I(vecD dst, vecD src, immI_M1 m1)
20827 %{
20828   predicate(n->as_Vector()->length_in_bytes() == 8);
20829   match(Set dst (XorV src (ReplicateB m1)));
20830   match(Set dst (XorV src (ReplicateS m1)));
20831   match(Set dst (XorV src (ReplicateI m1)));
20832   ins_cost(INSN_COST);
20833   format %{ "not  $dst, $src\t# vector (8B)" %}
20834   ins_encode %{
20835     __ notr(as_FloatRegister($dst$$reg), __ T8B,
20836             as_FloatRegister($src$$reg));
20837   %}
20838   ins_pipe(pipe_class_default);
20839 %}
20840 
20841 instruct vnot4I(vecX dst, vecX src, immI_M1 m1)
20842 %{
20843   predicate(n->as_Vector()->length_in_bytes() == 16);
20844   match(Set dst (XorV src (ReplicateB m1)));
20845   match(Set dst (XorV src (ReplicateS m1)));
20846   match(Set dst (XorV src (ReplicateI m1)));
20847   ins_cost(INSN_COST);
20848   format %{ "not  $dst, $src\t# vector (16B)" %}
20849   ins_encode %{
20850     __ notr(as_FloatRegister($dst$$reg), __ T16B,
20851             as_FloatRegister($src$$reg));
20852   %}
20853   ins_pipe(pipe_class_default);
20854 %}
20855 
20856 instruct vnot2L(vecX dst, vecX src, immL_M1 m1)
20857 %{
20858   predicate(n->as_Vector()->length_in_bytes() == 16);
20859   match(Set dst (XorV src (ReplicateL m1)));
20860   ins_cost(INSN_COST);
20861   format %{ "not  $dst, $src\t# vector (16B)" %}
20862   ins_encode %{
20863     __ notr(as_FloatRegister($dst$$reg), __ T16B,
20864             as_FloatRegister($src$$reg));
20865   %}
20866   ins_pipe(pipe_class_default);
20867 %}
20868 
20869 // ------------------------------ Vector max/min -------------------------------
20870 
20871 instruct vmax8B(vecD dst, vecD src1, vecD src2)
20872 %{
20873   predicate((n->as_Vector()->length() == 4 || n->as_Vector()->length() == 8) &&
20874              n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20875   match(Set dst (MaxV src1 src2));
20876   ins_cost(INSN_COST);
20877   format %{ "maxv  $dst, $src1, $src2\t# vector (8B)" %}
20878   ins_encode %{
20879     __ maxv(as_FloatRegister($dst$$reg), __ T8B,
20880             as_FloatRegister($src1$$reg),
20881             as_FloatRegister($src2$$reg));
20882   %}
20883   ins_pipe(vdop64);
20884 %}
20885 
20886 instruct vmax16B(vecX dst, vecX src1, vecX src2)
20887 %{
20888   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20889   match(Set dst (MaxV src1 src2));
20890   ins_cost(INSN_COST);
20891   format %{ "maxv  $dst, $src1, $src2\t# vector (16B)" %}
20892   ins_encode %{
20893     __ maxv(as_FloatRegister($dst$$reg), __ T16B,
20894             as_FloatRegister($src1$$reg),
20895             as_FloatRegister($src2$$reg));
20896   %}
20897   ins_pipe(vdop128);
20898 %}
20899 
20900 instruct vmax4S(vecD dst, vecD src1, vecD src2)
20901 %{
20902   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20903   match(Set dst (MaxV src1 src2));
20904   ins_cost(INSN_COST);
20905   format %{ "maxv  $dst, $src1, $src2\t# vector (4S)" %}
20906   ins_encode %{
20907     __ maxv(as_FloatRegister($dst$$reg), __ T4H,
20908             as_FloatRegister($src1$$reg),
20909             as_FloatRegister($src2$$reg));
20910   %}
20911   ins_pipe(vdop64);
20912 %}
20913 
20914 instruct vmax8S(vecX dst, vecX src1, vecX src2)
20915 %{
20916   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20917   match(Set dst (MaxV src1 src2));
20918   ins_cost(INSN_COST);
20919   format %{ "maxv  $dst, $src1, $src2\t# vector (8S)" %}
20920   ins_encode %{
20921     __ maxv(as_FloatRegister($dst$$reg), __ T8H,
20922             as_FloatRegister($src1$$reg),
20923             as_FloatRegister($src2$$reg));
20924   %}
20925   ins_pipe(vdop128);
20926 %}
20927 
20928 instruct vmax2I(vecD dst, vecD src1, vecD src2)
20929 %{
20930   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
20931   match(Set dst (MaxV src1 src2));
20932   ins_cost(INSN_COST);
20933   format %{ "maxv  $dst, $src1, $src2\t# vector (2I)" %}
20934   ins_encode %{
20935     __ maxv(as_FloatRegister($dst$$reg), __ T2S,
20936             as_FloatRegister($src1$$reg),
20937             as_FloatRegister($src2$$reg));
20938   %}
20939   ins_pipe(vdop64);
20940 %}
20941 
20942 instruct vmax4I(vecX dst, vecX src1, vecX src2)
20943 %{
20944   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
20945   match(Set dst (MaxV src1 src2));
20946   ins_cost(INSN_COST);
20947   format %{ "maxv  $dst, $src1, $src2\t# vector (4I)" %}
20948   ins_encode %{
20949     __ maxv(as_FloatRegister($dst$$reg), __ T4S,
20950             as_FloatRegister($src1$$reg),
20951             as_FloatRegister($src2$$reg));
20952   %}
20953   ins_pipe(vdop128);
20954 %}
20955 
20956 instruct vmin8B(vecD dst, vecD src1, vecD src2)
20957 %{
20958   predicate((n->as_Vector()->length() == 4 || n->as_Vector()->length() == 8) &&
20959              n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20960   match(Set dst (MinV src1 src2));
20961   ins_cost(INSN_COST);
20962   format %{ "minv  $dst, $src1, $src2\t# vector (8B)" %}
20963   ins_encode %{
20964     __ minv(as_FloatRegister($dst$$reg), __ T8B,
20965             as_FloatRegister($src1$$reg),
20966             as_FloatRegister($src2$$reg));
20967   %}
20968   ins_pipe(vdop64);
20969 %}
20970 
20971 instruct vmin16B(vecX dst, vecX src1, vecX src2)
20972 %{
20973   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
20974   match(Set dst (MinV src1 src2));
20975   ins_cost(INSN_COST);
20976   format %{ "minv  $dst, $src1, $src2\t# vector (16B)" %}
20977   ins_encode %{
20978     __ minv(as_FloatRegister($dst$$reg), __ T16B,
20979             as_FloatRegister($src1$$reg),
20980             as_FloatRegister($src2$$reg));
20981   %}
20982   ins_pipe(vdop128);
20983 %}
20984 
20985 instruct vmin4S(vecD dst, vecD src1, vecD src2)
20986 %{
20987   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
20988   match(Set dst (MinV src1 src2));
20989   ins_cost(INSN_COST);
20990   format %{ "minv  $dst, $src1, $src2\t# vector (4S)" %}
20991   ins_encode %{
20992     __ minv(as_FloatRegister($dst$$reg), __ T4H,
20993             as_FloatRegister($src1$$reg),
20994             as_FloatRegister($src2$$reg));
20995   %}
20996   ins_pipe(vdop64);
20997 %}
20998 
20999 instruct vmin8S(vecX dst, vecX src1, vecX src2)
21000 %{
21001   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
21002   match(Set dst (MinV src1 src2));
21003   ins_cost(INSN_COST);
21004   format %{ "minv  $dst, $src1, $src2\t# vector (8S)" %}
21005   ins_encode %{
21006     __ minv(as_FloatRegister($dst$$reg), __ T8H,
21007             as_FloatRegister($src1$$reg),
21008             as_FloatRegister($src2$$reg));
21009   %}
21010   ins_pipe(vdop128);
21011 %}
21012 
21013 instruct vmin2I(vecD dst, vecD src1, vecD src2)
21014 %{
21015   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
21016   match(Set dst (MinV src1 src2));
21017   ins_cost(INSN_COST);
21018   format %{ "minv  $dst, $src1, $src2\t# vector (2I)" %}
21019   ins_encode %{
21020     __ minv(as_FloatRegister($dst$$reg), __ T2S,
21021             as_FloatRegister($src1$$reg),
21022             as_FloatRegister($src2$$reg));
21023   %}
21024   ins_pipe(vdop64);
21025 %}
21026 
21027 instruct vmin4I(vecX dst, vecX src1, vecX src2)
21028 %{
21029   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
21030   match(Set dst (MinV src1 src2));
21031   ins_cost(INSN_COST);
21032   format %{ "minv  $dst, $src1, $src2\t# vector (4I)" %}
21033   ins_encode %{
21034     __ minv(as_FloatRegister($dst$$reg), __ T4S,
21035             as_FloatRegister($src1$$reg),
21036             as_FloatRegister($src2$$reg));
21037   %}
21038   ins_pipe(vdop128);
21039 %}
21040 
21041 
21042 instruct vmax2L(vecX dst, vecX src1, vecX src2)
21043 %{
21044   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
21045   match(Set dst (MaxV src1 src2));
21046   ins_cost(INSN_COST);
21047   effect(TEMP dst);
21048   format %{ "cmgt  $dst, $src1, $src2\t# vector (2L)\n\t"
21049             "bsl   $dst, $src1, $src2\t# vector (16B)" %}
21050   ins_encode %{
21051     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
21052             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
21053     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
21054            as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
21055   %}
21056   ins_pipe(vdop128);
21057 %}
21058 
21059 instruct vmin2L(vecX dst, vecX src1, vecX src2)
21060 %{
21061   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
21062   match(Set dst (MinV src1 src2));
21063   ins_cost(INSN_COST);
21064   effect(TEMP dst);
21065   format %{ "cmgt  $dst, $src1, $src2\t# vector (2L)\n\t"
21066             "bsl   $dst, $src2, $src1\t# vector (16B)" %}
21067   ins_encode %{
21068     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
21069             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
21070     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
21071            as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
21072   %}
21073   ins_pipe(vdop128);
21074 %}
21075 
21076 // --------------------------------- blend (bsl) ----------------------------
21077 
21078 instruct vbsl8B(vecD dst, vecD src1, vecD src2)
21079 %{
21080   predicate(n->as_Vector()->length_in_bytes() == 8);
21081   match(Set dst (VectorBlend (Binary src1 src2) dst));
21082   ins_cost(INSN_COST);
21083   format %{ "bsl  $dst, $src2, $src1\t# vector (8B)" %}
21084   ins_encode %{
21085     __ bsl(as_FloatRegister($dst$$reg), __ T8B,
21086            as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
21087   %}
21088   ins_pipe(vlogical64);
21089 %}
21090 
21091 instruct vbsl16B(vecX dst, vecX src1, vecX src2)
21092 %{
21093   predicate(n->as_Vector()->length_in_bytes() == 16);
21094   match(Set dst (VectorBlend (Binary src1 src2) dst));
21095   ins_cost(INSN_COST);
21096   format %{ "bsl  $dst, $src2, $src1\t# vector (16B)" %}
21097   ins_encode %{
21098     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
21099            as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
21100   %}
21101   ins_pipe(vlogical128);
21102 %}
21103 
21104 // --------------------------------- Load/store Mask ----------------------------
21105 
21106 instruct loadmask8B(vecD dst, vecD src  )
21107 %{
21108   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
21109   match(Set dst (VectorLoadMask src ));
21110   ins_cost(INSN_COST);
21111   format %{ "negr  $dst, $src\t# load mask (8B to 8B)" %}
21112   ins_encode %{
21113     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
21114   %}
21115   ins_pipe(pipe_class_default);
21116 %}
21117 
21118 instruct loadmask16B(vecX dst, vecX src  )
21119 %{
21120   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
21121   match(Set dst (VectorLoadMask src ));
21122   ins_cost(INSN_COST);
21123   format %{ "negr  $dst, $src\t# load mask (16B to 16B)" %}
21124   ins_encode %{
21125     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
21126   %}
21127   ins_pipe(pipe_class_default);
21128 %}
21129 
21130 instruct storemask8B(vecD dst, vecD src , immI_1 size)
21131 %{
21132   predicate(n->as_Vector()->length() == 8);
21133   match(Set dst (VectorStoreMask src size));
21134   ins_cost(INSN_COST);
21135   format %{ "negr  $dst, $src\t# store mask (8B to 8B)" %}
21136   ins_encode %{
21137     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
21138   %}
21139   ins_pipe(pipe_class_default);
21140 %}
21141 
21142 instruct storemask16B(vecX dst, vecX src , immI_1 size)
21143 %{
21144   predicate(n->as_Vector()->length() == 16);
21145   match(Set dst (VectorStoreMask src size));
21146   ins_cost(INSN_COST);
21147   format %{ "negr  $dst, $src\t# store mask (16B to 16B)" %}
21148   ins_encode %{
21149     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
21150   %}
21151   ins_pipe(pipe_class_default);
21152 %}
21153 
21154 instruct loadmask4S(vecD dst, vecD src  )
21155 %{
21156   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
21157   match(Set dst (VectorLoadMask src ));
21158   ins_cost(INSN_COST);
21159   format %{ "uxtl  $dst, $src\n\t"
21160             "negr  $dst, $dst\t# load mask (4B to 4H)" %}
21161   ins_encode %{
21162     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
21163     __ negr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($dst$$reg));
21164   %}
21165   ins_pipe(pipe_slow);
21166 %}
21167 
21168 instruct loadmask8S(vecX dst, vecD src  )
21169 %{
21170   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
21171   match(Set dst (VectorLoadMask src ));
21172   ins_cost(INSN_COST);
21173   format %{ "uxtl  $dst, $src\n\t"
21174             "negr  $dst, $dst\t# load mask (8B to 8H)" %}
21175   ins_encode %{
21176     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
21177     __ negr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($dst$$reg));
21178   %}
21179   ins_pipe(pipe_slow);
21180 %}
21181 
21182 instruct storemask4S(vecD dst, vecD src , immI_2 size)
21183 %{
21184   predicate(n->as_Vector()->length() == 4);
21185   match(Set dst (VectorStoreMask src size));
21186   ins_cost(INSN_COST);
21187   format %{ "xtn  $dst, $src\n\t"
21188             "negr  $dst, $dst\t# store mask (4H to 4B)" %}
21189   ins_encode %{
21190     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
21191     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
21192   %}
21193   ins_pipe(pipe_slow);
21194 %}
21195 
21196 instruct storemask8S(vecD dst, vecX src , immI_2 size)
21197 %{
21198   predicate(n->as_Vector()->length() == 8);
21199   match(Set dst (VectorStoreMask src size));
21200   ins_cost(INSN_COST);
21201   format %{ "xtn  $dst, $src\n\t"
21202             "negr  $dst, $dst\t# store mask (8H to 8B)" %}
21203   ins_encode %{
21204     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
21205     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
21206   %}
21207   ins_pipe(pipe_slow);
21208 %}
21209 
21210 instruct loadmask2I(vecD dst, vecD src  )
21211 %{
21212   predicate(n->as_Vector()->length() == 2 &&
21213             (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
21214              n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
21215   match(Set dst (VectorLoadMask src ));
21216   ins_cost(INSN_COST);
21217   format %{ "uxtl  $dst, $src\t# 2B to 2H\n\t"
21218             "uxtl  $dst, $dst\t# 2H to 2S\n\t"
21219             "negr   $dst, $dst\t# load mask (2B to 2S)" %}
21220   ins_encode %{
21221     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
21222     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
21223     __ negr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg));
21224   %}
21225   ins_pipe(pipe_slow);
21226 %}
21227 
21228 instruct loadmask4I(vecX dst, vecD src  )
21229 %{
21230   predicate(n->as_Vector()->length() == 4 &&
21231             (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
21232              n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
21233   match(Set dst (VectorLoadMask src ));
21234   ins_cost(INSN_COST);
21235   format %{ "uxtl  $dst, $src\t# 4B to 4H\n\t"
21236             "uxtl  $dst, $dst\t# 4H to 4S\n\t"
21237             "negr   $dst, $dst\t# load mask (4B to 4S)" %}
21238   ins_encode %{
21239     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
21240     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
21241     __ negr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg));
21242   %}
21243   ins_pipe(pipe_slow);
21244 %}
21245 
21246 instruct storemask2I(vecD dst, vecD src , immI_4 size)
21247 %{
21248   predicate(n->as_Vector()->length() == 2);
21249   match(Set dst (VectorStoreMask src size));
21250   ins_cost(INSN_COST);
21251   format %{ "xtn  $dst, $src\t# 2S to 2H\n\t"
21252             "xtn  $dst, $dst\t# 2H to 2B\n\t"
21253             "negr   $dst, $dst\t# store mask (2S to 2B)" %}
21254   ins_encode %{
21255     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
21256     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
21257     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
21258   %}
21259   ins_pipe(pipe_slow);
21260 %}
21261 
21262 instruct storemask4I(vecD dst, vecX src , immI_4 size)
21263 %{
21264   predicate(n->as_Vector()->length() == 4);
21265   match(Set dst (VectorStoreMask src size));
21266   ins_cost(INSN_COST);
21267   format %{ "xtn  $dst, $src\t# 4S to 4H\n\t"
21268             "xtn  $dst, $dst\t# 4H to 4B\n\t"
21269             "negr   $dst, $dst\t# store mask (4S to 4B)" %}
21270   ins_encode %{
21271     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
21272     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
21273     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
21274   %}
21275   ins_pipe(pipe_slow);
21276 %}
21277 
21278 instruct loadmask2L(vecX dst, vecD src)
21279 %{
21280   predicate(n->as_Vector()->length() == 2 &&
21281             (n->bottom_type()->is_vect()->element_basic_type() == T_LONG ||
21282              n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE));
21283   match(Set dst (VectorLoadMask src));
21284   ins_cost(INSN_COST);
21285   format %{ "uxtl  $dst, $src\t# 2B to 2S\n\t"
21286             "uxtl  $dst, $dst\t# 2S to 2I\n\t"
21287             "uxtl  $dst, $dst\t# 2I to 2L\n\t"
21288             "neg   $dst, $dst\t# load mask (2B to 2L)" %}
21289   ins_encode %{
21290     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
21291     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
21292     __ uxtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($dst$$reg), __ T2S);
21293     __ negr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($dst$$reg));
21294   %}
21295   ins_pipe(pipe_slow);
21296 %}
21297 
21298 instruct storemask2L(vecD dst, vecX src, immI_8 size)
21299 %{
21300   predicate(n->as_Vector()->length() == 2);
21301   match(Set dst (VectorStoreMask src size));
21302   ins_cost(INSN_COST);
21303   format %{ "xtn  $dst, $src\t# 2L to 2I\n\t"
21304             "xtn  $dst, $dst\t# 2I to 2S\n\t"
21305             "xtn  $dst, $dst\t# 2S to 2B\n\t"
21306             "neg  $dst, $dst\t# store mask (2L to 2B)" %}
21307   ins_encode %{
21308     __ xtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg), __ T2D);
21309     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($dst$$reg), __ T4S);
21310     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
21311     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
21312   %}
21313   ins_pipe(pipe_slow);
21314 %}
21315 
21316 //-------------------------------- LOAD_IOTA_INDICES----------------------------------
21317 
21318 instruct loadcon8B(vecD dst, immI0 src)
21319 %{
21320   predicate((n->as_Vector()->length() == 2 || n->as_Vector()->length() == 4 ||
21321              n->as_Vector()->length() == 8) &&
21322              n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
21323   match(Set dst (VectorLoadConst src));
21324   ins_cost(INSN_COST);
21325   format %{ "ldr $dst, CONSTANT_MEMORY\t# load iota indices" %}
21326   ins_encode %{
21327     __ lea(rscratch1, ExternalAddress(StubRoutines::aarch64::vector_iota_indices()));
21328     __ ldrd(as_FloatRegister($dst$$reg), rscratch1);
21329   %}
21330   ins_pipe(pipe_class_memory);
21331 %}
21332 
21333 instruct loadcon16B(vecX dst, immI0 src)
21334 %{
21335   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
21336   match(Set dst (VectorLoadConst src));
21337   ins_cost(INSN_COST);
21338   format %{ "ldr $dst, CONSTANT_MEMORY\t# load iota indices" %}
21339   ins_encode %{
21340     __ lea(rscratch1, ExternalAddress(StubRoutines::aarch64::vector_iota_indices()));
21341     __ ldrq(as_FloatRegister($dst$$reg), rscratch1);
21342   %}
21343   ins_pipe(pipe_class_memory);
21344 %}
21345 
21346 //-------------------------------- LOAD_SHUFFLE ----------------------------------
21347 
21348 instruct loadshuffle8B(vecD dst, vecD src)
21349 %{
21350   predicate(n->as_Vector()->length() == 8 &&
21351             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
21352   match(Set dst (VectorLoadShuffle src));
21353   ins_cost(INSN_COST);
21354   format %{ "mov  $dst, $src\t# get 8B shuffle" %}
21355   ins_encode %{
21356     __ orr(as_FloatRegister($dst$$reg), __ T8B,
21357            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
21358   %}
21359   ins_pipe(pipe_class_default);
21360 %}
21361 
21362 instruct loadshuffle16B(vecX dst, vecX src)
21363 %{
21364   predicate(n->as_Vector()->length() == 16 &&
21365             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
21366   match(Set dst (VectorLoadShuffle src));
21367   ins_cost(INSN_COST);
21368   format %{ "mov  $dst, $src\t# get 16B shuffle" %}
21369   ins_encode %{
21370     __ orr(as_FloatRegister($dst$$reg), __ T16B,
21371            as_FloatRegister($src$$reg), as_FloatRegister($src$$reg));
21372   %}
21373   ins_pipe(pipe_class_default);
21374 %}
21375 
21376 instruct loadshuffle4S(vecD dst, vecD src)
21377 %{
21378   predicate(n->as_Vector()->length() == 4 &&
21379             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
21380   match(Set dst (VectorLoadShuffle src));
21381   ins_cost(INSN_COST);
21382   format %{ "uxtl  $dst, $src\t# 4B to 4H" %}
21383   ins_encode %{
21384     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
21385   %}
21386   ins_pipe(pipe_class_default);
21387 %}
21388 
21389 instruct loadshuffle8S(vecX dst, vecD src)
21390 %{
21391   predicate(n->as_Vector()->length() == 8 &&
21392             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
21393   match(Set dst (VectorLoadShuffle src));
21394   ins_cost(INSN_COST);
21395   format %{ "uxtl  $dst, $src\t# 8B to 8H" %}
21396   ins_encode %{
21397     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
21398   %}
21399   ins_pipe(pipe_class_default);
21400 %}
21401 
21402 instruct loadshuffle4I(vecX dst, vecD src)
21403 %{
21404   predicate(n->as_Vector()->length() == 4 &&
21405            (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
21406             n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
21407   match(Set dst (VectorLoadShuffle src));
21408   ins_cost(INSN_COST);
21409   format %{ "uxtl  $dst, $src\t# 4B to 4H \n\t"
21410             "uxtl  $dst, $dst\t# 4H to 4S" %}
21411   ins_encode %{
21412     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
21413     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
21414   %}
21415   ins_pipe(pipe_slow);
21416 %}
21417 
21418 //-------------------------------- Rearrange -------------------------------------
21419 // Here is an example that rearranges a NEON vector with 4 ints:
21420 // Rearrange V1 int[a0, a1, a2, a3] to V2 int[a2, a3, a0, a1]
21421 //   1. Get the indices of V1 and store them as Vi byte[0, 1, 2, 3].
21422 //   2. Convert Vi byte[0, 1, 2, 3] to the indices of V2 and also store them as Vi byte[2, 3, 0, 1].
21423 //   3. Unsigned extend Long Vi from byte[2, 3, 0, 1] to int[2, 3, 0, 1].
21424 //   4. Multiply Vi int[2, 3, 0, 1] with constant int[0x04040404, 0x04040404, 0x04040404, 0x04040404]
21425 //      and get tbl base Vm int[0x08080808, 0x0c0c0c0c, 0x00000000, 0x04040404].
21426 //   5. Add Vm with constant int[0x03020100, 0x03020100, 0x03020100, 0x03020100]
21427 //      and get tbl index Vm int[0x0b0a0908, 0x0f0e0d0c, 0x03020100, 0x07060504]
21428 //   6. Use Vm as index register, and use V1 as table register.
21429 //      Then get V2 as the result by tbl NEON instructions.
21430 // Notes:
21431 //   Step 1 matches VectorLoadConst.
21432 //   Step 3 matches VectorLoadShuffle.
21433 //   Step 4, 5, 6 match VectorRearrange.
21434 //   For VectorRearrange short/int, the reason why such complex calculation is
21435 //   required is because NEON tbl supports bytes table only, so for short/int, we
21436 //   need to lookup 2/4 bytes as a group. For VectorRearrange long, we use bsl
21437 //   to implement rearrange.
21438 
21439 instruct rearrange8B(vecD dst, vecD src, vecD shuffle)
21440 %{
21441   predicate(n->as_Vector()->length() == 8 &&
21442             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
21443   match(Set dst (VectorRearrange src shuffle));
21444   ins_cost(INSN_COST);
21445   effect(TEMP_DEF dst);
21446   format %{ "tbl $dst, {$dst}, $shuffle\t# rearrange 8B" %}
21447   ins_encode %{
21448     __ tbl(as_FloatRegister($dst$$reg), __ T8B,
21449            as_FloatRegister($src$$reg), 1, as_FloatRegister($shuffle$$reg));
21450   %}
21451   ins_pipe(pipe_slow);
21452 %}
21453 
21454 instruct rearrange16B(vecX dst, vecX src, vecX shuffle)
21455 %{
21456   predicate(n->as_Vector()->length() == 16 &&
21457             n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
21458   match(Set dst (VectorRearrange src shuffle));
21459   ins_cost(INSN_COST);
21460   effect(TEMP_DEF dst);
21461   format %{ "tbl $dst, {$dst}, $shuffle\t# rearrange 16B" %}
21462   ins_encode %{
21463     __ tbl(as_FloatRegister($dst$$reg), __ T16B,
21464            as_FloatRegister($src$$reg), 1, as_FloatRegister($shuffle$$reg));
21465   %}
21466   ins_pipe(pipe_slow);
21467 %}
21468 
21469 instruct rearrange4S(vecD dst, vecD src, vecD shuffle, vecD tmp0, vecD tmp1)
21470 %{
21471   predicate(n->as_Vector()->length() == 4 &&
21472             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
21473   match(Set dst (VectorRearrange src shuffle));
21474   ins_cost(INSN_COST);
21475   effect(TEMP_DEF dst, TEMP tmp0, TEMP tmp1);
21476   format %{ "mov   $tmp0, CONSTANT\t# constant 0x0202020202020202\n\t"
21477             "mov   $tmp1, CONSTANT\t# constant 0x0100010001000100\n\t"
21478             "mulv  $dst, T4H, $shuffle, $tmp0\n\t"
21479             "addv  $dst, T8B, $dst, $tmp1\n\t"
21480             "tbl   $dst, {$src}, $dst\t# rearrange 4S" %}
21481   ins_encode %{
21482     __ mov(as_FloatRegister($tmp0$$reg), __ T8B, 0x02);
21483     __ mov(as_FloatRegister($tmp1$$reg), __ T4H, 0x0100);
21484     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
21485             as_FloatRegister($shuffle$$reg), as_FloatRegister($tmp0$$reg));
21486     __ addv(as_FloatRegister($dst$$reg), __ T8B,
21487             as_FloatRegister($dst$$reg), as_FloatRegister($tmp1$$reg));
21488     __ tbl(as_FloatRegister($dst$$reg), __ T8B,
21489            as_FloatRegister($src$$reg), 1, as_FloatRegister($dst$$reg));
21490   %}
21491   ins_pipe(pipe_slow);
21492 %}
21493 
21494 instruct rearrange8S(vecX dst, vecX src, vecX shuffle, vecX tmp0, vecX tmp1)
21495 %{
21496   predicate(n->as_Vector()->length() == 8 &&
21497             n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
21498   match(Set dst (VectorRearrange src shuffle));
21499   ins_cost(INSN_COST);
21500   effect(TEMP_DEF dst, TEMP tmp0, TEMP tmp1);
21501   format %{ "mov   $tmp0, CONSTANT\t# constant 0x0202020202020202\n\t"
21502             "mov   $tmp1, CONSTANT\t# constant 0x0100010001000100\n\t"
21503             "mulv  $dst, T8H, $shuffle, $tmp0\n\t"
21504             "addv  $dst, T16B, $dst, $tmp1\n\t"
21505             "tbl   $dst, {$src}, $dst\t# rearrange 8S" %}
21506   ins_encode %{
21507     __ mov(as_FloatRegister($tmp0$$reg), __ T16B, 0x02);
21508     __ mov(as_FloatRegister($tmp1$$reg), __ T8H, 0x0100);
21509     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
21510             as_FloatRegister($shuffle$$reg), as_FloatRegister($tmp0$$reg));
21511     __ addv(as_FloatRegister($dst$$reg), __ T16B,
21512             as_FloatRegister($dst$$reg), as_FloatRegister($tmp1$$reg));
21513     __ tbl(as_FloatRegister($dst$$reg), __ T16B,
21514            as_FloatRegister($src$$reg), 1, as_FloatRegister($dst$$reg));
21515   %}
21516   ins_pipe(pipe_slow);
21517 %}
21518 
21519 instruct rearrange4I(vecX dst, vecX src, vecX shuffle, vecX tmp0, vecX tmp1)
21520 %{
21521   predicate(n->as_Vector()->length() == 4 &&
21522            (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
21523             n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
21524   match(Set dst (VectorRearrange src shuffle));
21525   ins_cost(INSN_COST);
21526   effect(TEMP_DEF dst, TEMP tmp0, TEMP tmp1);
21527   format %{ "mov   $tmp0, CONSTANT\t# constant 0x0404040404040404\n\t"
21528             "mov   $tmp1, CONSTANT\t# constant 0x0302010003020100\n\t"
21529             "mulv  $dst, T8H, $shuffle, $tmp0\n\t"
21530             "addv  $dst, T16B, $dst, $tmp1\n\t"
21531             "tbl   $dst, {$src}, $dst\t# rearrange 4I" %}
21532   ins_encode %{
21533     __ mov(as_FloatRegister($tmp0$$reg), __ T16B, 0x04);
21534     __ mov(as_FloatRegister($tmp1$$reg), __ T4S, 0x03020100);
21535     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
21536             as_FloatRegister($shuffle$$reg), as_FloatRegister($tmp0$$reg));
21537     __ addv(as_FloatRegister($dst$$reg), __ T16B,
21538             as_FloatRegister($dst$$reg), as_FloatRegister($tmp1$$reg));
21539     __ tbl(as_FloatRegister($dst$$reg), __ T16B,
21540            as_FloatRegister($src$$reg), 1, as_FloatRegister($dst$$reg));
21541   %}
21542   ins_pipe(pipe_slow);
21543 %}
21544 
21545 //-------------------------------- Anytrue/alltrue -----------------------------
21546 
21547 instruct anytrue_in_mask8B(iRegINoSp dst, vecD src1, vecD src2, vecD tmp, rFlagsReg cr)
21548 %{
21549   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::ne);
21550   match(Set dst (VectorTest src1 src2 ));
21551   ins_cost(INSN_COST);
21552   effect(TEMP tmp, KILL cr);
21553   format %{ "addv  $tmp, T8B, $src1\t# src1 and src2 are the same\n\t"
21554             "umov  $dst, $tmp, B, 0\n\t"
21555             "cmp   $dst, 0\n\t"
21556             "cset  $dst" %}
21557   ins_encode %{
21558     __ addv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src1$$reg));
21559     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
21560     __ cmpw($dst$$Register, zr);
21561     __ csetw($dst$$Register, Assembler::NE);
21562   %}
21563   ins_pipe(pipe_slow);
21564 %}
21565 
21566 instruct anytrue_in_mask16B(iRegINoSp dst, vecX src1, vecX src2, vecX tmp, rFlagsReg cr)
21567 %{
21568   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::ne);
21569   match(Set dst (VectorTest src1 src2 ));
21570   ins_cost(INSN_COST);
21571   effect(TEMP tmp, KILL cr);
21572   format %{ "addv  $tmp, T16B, $src1\t# src1 and src2 are the same\n\t"
21573             "umov  $dst, $tmp, B, 0\n\t"
21574             "cmp   $dst, 0\n\t"
21575             "cset  $dst" %}
21576   ins_encode %{
21577     __ addv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src1$$reg));
21578     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
21579     __ cmpw($dst$$Register, zr);
21580     __ csetw($dst$$Register, Assembler::NE);
21581   %}
21582   ins_pipe(pipe_slow);
21583 %}
21584 
21585 instruct alltrue_in_mask8B(iRegINoSp dst, vecD src1, vecD src2, vecD tmp, rFlagsReg cr)
21586 %{
21587   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::overflow);
21588   match(Set dst (VectorTest src1 src2 ));
21589   ins_cost(INSN_COST);
21590   effect(TEMP tmp, KILL cr);
21591   format %{ "andr  $tmp, T8B, $src1, $src2\t# src2 is maskAllTrue\n\t"
21592             "notr  $tmp, T8B, $tmp\n\t"
21593             "addv  $tmp, T8B, $tmp\n\t"
21594             "umov  $dst, $tmp, B, 0\n\t"
21595             "cmp   $dst, 0\n\t"
21596             "cset  $dst" %}
21597   ins_encode %{
21598     __ andr(as_FloatRegister($tmp$$reg), __ T8B,
21599             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
21600     __ notr(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($tmp$$reg));
21601     __ addv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($tmp$$reg));
21602     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
21603     __ cmpw($dst$$Register, zr);
21604     __ csetw($dst$$Register, Assembler::EQ);
21605   %}
21606   ins_pipe(pipe_slow);
21607 %}
21608 
21609 instruct alltrue_in_mask16B(iRegINoSp dst, vecX src1, vecX src2, vecX tmp, rFlagsReg cr)
21610 %{
21611   predicate(static_cast<const VectorTestNode*>(n)->get_predicate() == BoolTest::overflow);
21612   match(Set dst (VectorTest src1 src2 ));
21613   ins_cost(INSN_COST);
21614   effect(TEMP tmp, KILL cr);
21615   format %{ "andr  $tmp, T16B, $src1, $src2\t# src2 is maskAllTrue\n\t"
21616             "notr  $tmp, T16B, $tmp\n\t"
21617             "addv  $tmp, T16B, $tmp\n\t"
21618             "umov  $dst, $tmp, B, 0\n\t"
21619             "cmp   $dst, 0\n\t"
21620             "cset  $dst" %}
21621   ins_encode %{
21622     __ andr(as_FloatRegister($tmp$$reg), __ T16B,
21623             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
21624     __ notr(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($tmp$$reg));
21625     __ addv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($tmp$$reg));
21626     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
21627     __ cmpw($dst$$Register, zr);
21628     __ csetw($dst$$Register, Assembler::EQ);
21629   %}
21630   ins_pipe(pipe_slow);
21631 %}
21632 // END This section of the file is automatically generated. Do not edit --------------
21633 
21634 //----------PEEPHOLE RULES-----------------------------------------------------
21635 // These must follow all instruction definitions as they use the names
21636 // defined in the instructions definitions.
21637 //
21638 // peepmatch ( root_instr_name [preceding_instruction]* );
21639 //
21640 // peepconstraint %{
21641 // (instruction_number.operand_name relational_op instruction_number.operand_name
21642 //  [, ...] );
21643 // // instruction numbers are zero-based using left to right order in peepmatch
21644 //
21645 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
21646 // // provide an instruction_number.operand_name for each operand that appears
21647 // // in the replacement instruction's match rule
21648 //
21649 // ---------VM FLAGS---------------------------------------------------------
21650 //
21651 // All peephole optimizations can be turned off using -XX:-OptoPeephole
21652 //
21653 // Each peephole rule is given an identifying number starting with zero and
21654 // increasing by one in the order seen by the parser.  An individual peephole
21655 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
21656 // on the command-line.
21657 //
21658 // ---------CURRENT LIMITATIONS----------------------------------------------
21659 //
21660 // Only match adjacent instructions in same basic block
21661 // Only equality constraints
21662 // Only constraints between operands, not (0.dest_reg == RAX_enc)
21663 // Only one replacement instruction
21664 //
21665 // ---------EXAMPLE----------------------------------------------------------
21666 //
21667 // // pertinent parts of existing instructions in architecture description
21668 // instruct movI(iRegINoSp dst, iRegI src)
21669 // %{
21670 //   match(Set dst (CopyI src));
21671 // %}
21672 //
21673 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
21674 // %{
21675 //   match(Set dst (AddI dst src));
21676 //   effect(KILL cr);
21677 // %}
21678 //
21679 // // Change (inc mov) to lea
21680 // peephole %{
21681 //   // increment preceeded by register-register move
21682 //   peepmatch ( incI_iReg movI );
21683 //   // require that the destination register of the increment
21684 //   // match the destination register of the move
21685 //   peepconstraint ( 0.dst == 1.dst );
21686 //   // construct a replacement instruction that sets
21687 //   // the destination to ( move's source register + one )
21688 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
21689 // %}
21690 //
21691 
21692 // Implementation no longer uses movX instructions since
21693 // machine-independent system no longer uses CopyX nodes.
21694 //
21695 // peephole
21696 // %{
21697 //   peepmatch (incI_iReg movI);
21698 //   peepconstraint (0.dst == 1.dst);
21699 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
21700 // %}
21701 
21702 // peephole
21703 // %{
21704 //   peepmatch (decI_iReg movI);
21705 //   peepconstraint (0.dst == 1.dst);
21706 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
21707 // %}
21708 
21709 // peephole
21710 // %{
21711 //   peepmatch (addI_iReg_imm movI);
21712 //   peepconstraint (0.dst == 1.dst);
21713 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
21714 // %}
21715 
21716 // peephole
21717 // %{
21718 //   peepmatch (incL_iReg movL);
21719 //   peepconstraint (0.dst == 1.dst);
21720 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
21721 // %}
21722 
21723 // peephole
21724 // %{
21725 //   peepmatch (decL_iReg movL);
21726 //   peepconstraint (0.dst == 1.dst);
21727 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
21728 // %}
21729 
21730 // peephole
21731 // %{
21732 //   peepmatch (addL_iReg_imm movL);
21733 //   peepconstraint (0.dst == 1.dst);
21734 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
21735 // %}
21736 
21737 // peephole
21738 // %{
21739 //   peepmatch (addP_iReg_imm movP);
21740 //   peepconstraint (0.dst == 1.dst);
21741 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
21742 // %}
21743 
21744 // // Change load of spilled value to only a spill
21745 // instruct storeI(memory mem, iRegI src)
21746 // %{
21747 //   match(Set mem (StoreI mem src));
21748 // %}
21749 //
21750 // instruct loadI(iRegINoSp dst, memory mem)
21751 // %{
21752 //   match(Set dst (LoadI mem));
21753 // %}
21754 //
21755 
21756 //----------SMARTSPILL RULES---------------------------------------------------
21757 // These must follow all instruction definitions as they use the names
21758 // defined in the instructions definitions.
21759 
21760 // Local Variables:
21761 // mode: c++
21762 // End: