1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 
1004 class CallStubImpl {
1005 
1006   //--------------------------------------------------------------
1007   //---<  Used for optimization in Compile::shorten_branches  >---
1008   //--------------------------------------------------------------
1009 
1010  public:
1011   // Size of call trampoline stub.
1012   static uint size_call_trampoline() {
1013     return 0; // no call trampolines on this platform
1014   }
1015 
1016   // number of relocations needed by a call trampoline stub
1017   static uint reloc_call_trampoline() {
1018     return 0; // no call trampolines on this platform
1019   }
1020 };
1021 
1022 class HandlerImpl {
1023 
1024  public:
1025 
1026   static int emit_exception_handler(CodeBuffer &cbuf);
1027   static int emit_deopt_handler(CodeBuffer& cbuf);
1028 
1029   static uint size_exception_handler() {
1030     return MacroAssembler::far_branch_size();
1031   }
1032 
1033   static uint size_deopt_handler() {
1034     // count one adr and one far branch instruction
1035     return 4 * NativeInstruction::instruction_size;
1036   }
1037 };
1038 
1039  bool is_CAS(int opcode, bool maybe_volatile);
1040 
1041   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1042 
1043   bool unnecessary_acquire(const Node *barrier);
1044   bool needs_acquiring_load(const Node *load);
1045 
1046   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1047 
1048   bool unnecessary_release(const Node *barrier);
1049   bool unnecessary_volatile(const Node *barrier);
1050   bool needs_releasing_store(const Node *store);
1051 
1052   // predicate controlling translation of CompareAndSwapX
1053   bool needs_acquiring_load_exclusive(const Node *load);
1054 
1055   // predicate controlling translation of StoreCM
1056   bool unnecessary_storestore(const Node *storecm);
1057 
1058   // predicate controlling addressing modes
1059   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1060 %}
1061 
1062 source %{
1063 
1064   // Optimizaton of volatile gets and puts
1065   // -------------------------------------
1066   //
1067   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1068   // use to implement volatile reads and writes. For a volatile read
1069   // we simply need
1070   //
1071   //   ldar<x>
1072   //
1073   // and for a volatile write we need
1074   //
1075   //   stlr<x>
1076   //
1077   // Alternatively, we can implement them by pairing a normal
1078   // load/store with a memory barrier. For a volatile read we need
1079   //
1080   //   ldr<x>
1081   //   dmb ishld
1082   //
1083   // for a volatile write
1084   //
1085   //   dmb ish
1086   //   str<x>
1087   //   dmb ish
1088   //
1089   // We can also use ldaxr and stlxr to implement compare and swap CAS
1090   // sequences. These are normally translated to an instruction
1091   // sequence like the following
1092   //
1093   //   dmb      ish
1094   // retry:
1095   //   ldxr<x>   rval raddr
1096   //   cmp       rval rold
1097   //   b.ne done
1098   //   stlxr<x>  rval, rnew, rold
1099   //   cbnz      rval retry
1100   // done:
1101   //   cset      r0, eq
1102   //   dmb ishld
1103   //
1104   // Note that the exclusive store is already using an stlxr
1105   // instruction. That is required to ensure visibility to other
1106   // threads of the exclusive write (assuming it succeeds) before that
1107   // of any subsequent writes.
1108   //
1109   // The following instruction sequence is an improvement on the above
1110   //
1111   // retry:
1112   //   ldaxr<x>  rval raddr
1113   //   cmp       rval rold
1114   //   b.ne done
1115   //   stlxr<x>  rval, rnew, rold
1116   //   cbnz      rval retry
1117   // done:
1118   //   cset      r0, eq
1119   //
1120   // We don't need the leading dmb ish since the stlxr guarantees
1121   // visibility of prior writes in the case that the swap is
1122   // successful. Crucially we don't have to worry about the case where
1123   // the swap is not successful since no valid program should be
1124   // relying on visibility of prior changes by the attempting thread
1125   // in the case where the CAS fails.
1126   //
1127   // Similarly, we don't need the trailing dmb ishld if we substitute
1128   // an ldaxr instruction since that will provide all the guarantees we
1129   // require regarding observation of changes made by other threads
1130   // before any change to the CAS address observed by the load.
1131   //
1132   // In order to generate the desired instruction sequence we need to
1133   // be able to identify specific 'signature' ideal graph node
1134   // sequences which i) occur as a translation of a volatile reads or
1135   // writes or CAS operations and ii) do not occur through any other
1136   // translation or graph transformation. We can then provide
1137   // alternative aldc matching rules which translate these node
1138   // sequences to the desired machine code sequences. Selection of the
1139   // alternative rules can be implemented by predicates which identify
1140   // the relevant node sequences.
1141   //
1142   // The ideal graph generator translates a volatile read to the node
1143   // sequence
1144   //
1145   //   LoadX[mo_acquire]
1146   //   MemBarAcquire
1147   //
1148   // As a special case when using the compressed oops optimization we
1149   // may also see this variant
1150   //
1151   //   LoadN[mo_acquire]
1152   //   DecodeN
1153   //   MemBarAcquire
1154   //
1155   // A volatile write is translated to the node sequence
1156   //
1157   //   MemBarRelease
1158   //   StoreX[mo_release] {CardMark}-optional
1159   //   MemBarVolatile
1160   //
1161   // n.b. the above node patterns are generated with a strict
1162   // 'signature' configuration of input and output dependencies (see
1163   // the predicates below for exact details). The card mark may be as
1164   // simple as a few extra nodes or, in a few GC configurations, may
1165   // include more complex control flow between the leading and
1166   // trailing memory barriers. However, whatever the card mark
1167   // configuration these signatures are unique to translated volatile
1168   // reads/stores -- they will not appear as a result of any other
1169   // bytecode translation or inlining nor as a consequence of
1170   // optimizing transforms.
1171   //
1172   // We also want to catch inlined unsafe volatile gets and puts and
1173   // be able to implement them using either ldar<x>/stlr<x> or some
1174   // combination of ldr<x>/stlr<x> and dmb instructions.
1175   //
1176   // Inlined unsafe volatiles puts manifest as a minor variant of the
1177   // normal volatile put node sequence containing an extra cpuorder
1178   // membar
1179   //
1180   //   MemBarRelease
1181   //   MemBarCPUOrder
1182   //   StoreX[mo_release] {CardMark}-optional
1183   //   MemBarCPUOrder
1184   //   MemBarVolatile
1185   //
1186   // n.b. as an aside, a cpuorder membar is not itself subject to
1187   // matching and translation by adlc rules.  However, the rule
1188   // predicates need to detect its presence in order to correctly
1189   // select the desired adlc rules.
1190   //
1191   // Inlined unsafe volatile gets manifest as a slightly different
1192   // node sequence to a normal volatile get because of the
1193   // introduction of some CPUOrder memory barriers to bracket the
1194   // Load. However, but the same basic skeleton of a LoadX feeding a
1195   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1196   // present
1197   //
1198   //   MemBarCPUOrder
1199   //        ||       \\
1200   //   MemBarCPUOrder LoadX[mo_acquire]
1201   //        ||            |
1202   //        ||       {DecodeN} optional
1203   //        ||       /
1204   //     MemBarAcquire
1205   //
1206   // In this case the acquire membar does not directly depend on the
1207   // load. However, we can be sure that the load is generated from an
1208   // inlined unsafe volatile get if we see it dependent on this unique
1209   // sequence of membar nodes. Similarly, given an acquire membar we
1210   // can know that it was added because of an inlined unsafe volatile
1211   // get if it is fed and feeds a cpuorder membar and if its feed
1212   // membar also feeds an acquiring load.
1213   //
1214   // Finally an inlined (Unsafe) CAS operation is translated to the
1215   // following ideal graph
1216   //
1217   //   MemBarRelease
1218   //   MemBarCPUOrder
1219   //   CompareAndSwapX {CardMark}-optional
1220   //   MemBarCPUOrder
1221   //   MemBarAcquire
1222   //
1223   // So, where we can identify these volatile read and write
1224   // signatures we can choose to plant either of the above two code
1225   // sequences. For a volatile read we can simply plant a normal
1226   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1227   // also choose to inhibit translation of the MemBarAcquire and
1228   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1229   //
1230   // When we recognise a volatile store signature we can choose to
1231   // plant at a dmb ish as a translation for the MemBarRelease, a
1232   // normal str<x> and then a dmb ish for the MemBarVolatile.
1233   // Alternatively, we can inhibit translation of the MemBarRelease
1234   // and MemBarVolatile and instead plant a simple stlr<x>
1235   // instruction.
1236   //
1237   // when we recognise a CAS signature we can choose to plant a dmb
1238   // ish as a translation for the MemBarRelease, the conventional
1239   // macro-instruction sequence for the CompareAndSwap node (which
1240   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1241   // Alternatively, we can elide generation of the dmb instructions
1242   // and plant the alternative CompareAndSwap macro-instruction
1243   // sequence (which uses ldaxr<x>).
1244   //
1245   // Of course, the above only applies when we see these signature
1246   // configurations. We still want to plant dmb instructions in any
1247   // other cases where we may see a MemBarAcquire, MemBarRelease or
1248   // MemBarVolatile. For example, at the end of a constructor which
1249   // writes final/volatile fields we will see a MemBarRelease
1250   // instruction and this needs a 'dmb ish' lest we risk the
1251   // constructed object being visible without making the
1252   // final/volatile field writes visible.
1253   //
1254   // n.b. the translation rules below which rely on detection of the
1255   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1256   // If we see anything other than the signature configurations we
1257   // always just translate the loads and stores to ldr<x> and str<x>
1258   // and translate acquire, release and volatile membars to the
1259   // relevant dmb instructions.
1260   //
1261 
1262   // is_CAS(int opcode, bool maybe_volatile)
1263   //
1264   // return true if opcode is one of the possible CompareAndSwapX
1265   // values otherwise false.
1266 
1267   bool is_CAS(int opcode, bool maybe_volatile)
1268   {
1269     switch(opcode) {
1270       // We handle these
1271     case Op_CompareAndSwapI:
1272     case Op_CompareAndSwapL:
1273     case Op_CompareAndSwapP:
1274     case Op_CompareAndSwapN:
1275     case Op_ShenandoahCompareAndSwapP:
1276     case Op_ShenandoahCompareAndSwapN:
1277     case Op_CompareAndSwapB:
1278     case Op_CompareAndSwapS:
1279     case Op_GetAndSetI:
1280     case Op_GetAndSetL:
1281     case Op_GetAndSetP:
1282     case Op_GetAndSetN:
1283     case Op_GetAndAddI:
1284     case Op_GetAndAddL:
1285       return true;
1286     case Op_CompareAndExchangeI:
1287     case Op_CompareAndExchangeN:
1288     case Op_CompareAndExchangeB:
1289     case Op_CompareAndExchangeS:
1290     case Op_CompareAndExchangeL:
1291     case Op_CompareAndExchangeP:
1292     case Op_WeakCompareAndSwapB:
1293     case Op_WeakCompareAndSwapS:
1294     case Op_WeakCompareAndSwapI:
1295     case Op_WeakCompareAndSwapL:
1296     case Op_WeakCompareAndSwapP:
1297     case Op_WeakCompareAndSwapN:
1298     case Op_ShenandoahWeakCompareAndSwapP:
1299     case Op_ShenandoahWeakCompareAndSwapN:
1300     case Op_ShenandoahCompareAndExchangeP:
1301     case Op_ShenandoahCompareAndExchangeN:
1302       return maybe_volatile;
1303     default:
1304       return false;
1305     }
1306   }
1307 
1308   // helper to determine the maximum number of Phi nodes we may need to
1309   // traverse when searching from a card mark membar for the merge mem
1310   // feeding a trailing membar or vice versa
1311 
1312 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1313 
1314 bool unnecessary_acquire(const Node *barrier)
1315 {
1316   assert(barrier->is_MemBar(), "expecting a membar");
1317 
1318   if (UseBarriersForVolatile) {
1319     // we need to plant a dmb
1320     return false;
1321   }
1322 
1323   MemBarNode* mb = barrier->as_MemBar();
1324 
1325   if (mb->trailing_load()) {
1326     return true;
1327   }
1328 
1329   if (mb->trailing_load_store()) {
1330     Node* load_store = mb->in(MemBarNode::Precedent);
1331     assert(load_store->is_LoadStore(), "unexpected graph shape");
1332     return is_CAS(load_store->Opcode(), true);
1333   }
1334 
1335   return false;
1336 }
1337 
1338 bool needs_acquiring_load(const Node *n)
1339 {
1340   assert(n->is_Load(), "expecting a load");
1341   if (UseBarriersForVolatile) {
1342     // we use a normal load and a dmb
1343     return false;
1344   }
1345 
1346   LoadNode *ld = n->as_Load();
1347 
1348   return ld->is_acquire();
1349 }
1350 
1351 bool unnecessary_release(const Node *n)
1352 {
1353   assert((n->is_MemBar() &&
1354           n->Opcode() == Op_MemBarRelease),
1355          "expecting a release membar");
1356 
1357   if (UseBarriersForVolatile) {
1358     // we need to plant a dmb
1359     return false;
1360   }
1361 
1362   MemBarNode *barrier = n->as_MemBar();
1363   if (!barrier->leading()) {
1364     return false;
1365   } else {
1366     Node* trailing = barrier->trailing_membar();
1367     MemBarNode* trailing_mb = trailing->as_MemBar();
1368     assert(trailing_mb->trailing(), "Not a trailing membar?");
1369     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1370 
1371     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1372     if (mem->is_Store()) {
1373       assert(mem->as_Store()->is_release(), "");
1374       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1375       return true;
1376     } else {
1377       assert(mem->is_LoadStore(), "");
1378       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1379       return is_CAS(mem->Opcode(), true);
1380     }
1381   }
1382   return false;
1383 }
1384 
1385 bool unnecessary_volatile(const Node *n)
1386 {
1387   // assert n->is_MemBar();
1388   if (UseBarriersForVolatile) {
1389     // we need to plant a dmb
1390     return false;
1391   }
1392 
1393   MemBarNode *mbvol = n->as_MemBar();
1394 
1395   bool release = mbvol->trailing_store();
1396   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1397 #ifdef ASSERT
1398   if (release) {
1399     Node* leading = mbvol->leading_membar();
1400     assert(leading->Opcode() == Op_MemBarRelease, "");
1401     assert(leading->as_MemBar()->leading_store(), "");
1402     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1403   }
1404 #endif
1405 
1406   return release;
1407 }
1408 
1409 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1410 
1411 bool needs_releasing_store(const Node *n)
1412 {
1413   // assert n->is_Store();
1414   if (UseBarriersForVolatile) {
1415     // we use a normal store and dmb combination
1416     return false;
1417   }
1418 
1419   StoreNode *st = n->as_Store();
1420 
1421   return st->trailing_membar() != NULL;
1422 }
1423 
1424 // predicate controlling translation of CAS
1425 //
1426 // returns true if CAS needs to use an acquiring load otherwise false
1427 
1428 bool needs_acquiring_load_exclusive(const Node *n)
1429 {
1430   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1431   if (UseBarriersForVolatile) {
1432     return false;
1433   }
1434 
1435   LoadStoreNode* ldst = n->as_LoadStore();
1436   if (is_CAS(n->Opcode(), false)) {
1437     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1438   } else {
1439     return ldst->trailing_membar() != NULL;
1440   }
1441 
1442   // so we can just return true here
1443   return true;
1444 }
1445 
1446 // predicate controlling translation of StoreCM
1447 //
1448 // returns true if a StoreStore must precede the card write otherwise
1449 // false
1450 
1451 bool unnecessary_storestore(const Node *storecm)
1452 {
1453   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1454 
1455   // we need to generate a dmb ishst between an object put and the
1456   // associated card mark when we are using CMS without conditional
1457   // card marking
1458 
1459   if (UseConcMarkSweepGC && !UseCondCardMark) {
1460     return false;
1461   }
1462 
1463   // a storestore is unnecesary in all other cases
1464 
1465   return true;
1466 }
1467 
1468 
1469 #define __ _masm.
1470 
1471 // advance declarations for helper functions to convert register
1472 // indices to register objects
1473 
1474 // the ad file has to provide implementations of certain methods
1475 // expected by the generic code
1476 //
1477 // REQUIRED FUNCTIONALITY
1478 
1479 //=============================================================================
1480 
1481 // !!!!! Special hack to get all types of calls to specify the byte offset
1482 //       from the start of the call to the point where the return address
1483 //       will point.
1484 
1485 int MachCallStaticJavaNode::ret_addr_offset()
1486 {
1487   // call should be a simple bl
1488   int off = 4;
1489   return off;
1490 }
1491 
1492 int MachCallDynamicJavaNode::ret_addr_offset()
1493 {
1494   return 16; // movz, movk, movk, bl
1495 }
1496 
1497 int MachCallRuntimeNode::ret_addr_offset() {
1498   // for generated stubs the call will be
1499   //   far_call(addr)
1500   // for real runtime callouts it will be six instructions
1501   // see aarch64_enc_java_to_runtime
1502   //   adr(rscratch2, retaddr)
1503   //   lea(rscratch1, RuntimeAddress(addr)
1504   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1505   //   blrt rscratch1
1506   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1507   if (cb) {
1508     return MacroAssembler::far_branch_size();
1509   } else {
1510     return 6 * NativeInstruction::instruction_size;
1511   }
1512 }
1513 
1514 // Indicate if the safepoint node needs the polling page as an input
1515 
1516 // the shared code plants the oop data at the start of the generated
1517 // code for the safepoint node and that needs ot be at the load
1518 // instruction itself. so we cannot plant a mov of the safepoint poll
1519 // address followed by a load. setting this to true means the mov is
1520 // scheduled as a prior instruction. that's better for scheduling
1521 // anyway.
1522 
1523 bool SafePointNode::needs_polling_address_input()
1524 {
1525   return true;
1526 }
1527 
1528 //=============================================================================
1529 
1530 #ifndef PRODUCT
1531 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1532   st->print("BREAKPOINT");
1533 }
1534 #endif
1535 
1536 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1537   MacroAssembler _masm(&cbuf);
1538   __ brk(0);
1539 }
1540 
1541 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1542   return MachNode::size(ra_);
1543 }
1544 
1545 //=============================================================================
1546 
1547 #ifndef PRODUCT
1548   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1549     st->print("nop \t# %d bytes pad for loops and calls", _count);
1550   }
1551 #endif
1552 
1553   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1554     MacroAssembler _masm(&cbuf);
1555     for (int i = 0; i < _count; i++) {
1556       __ nop();
1557     }
1558   }
1559 
1560   uint MachNopNode::size(PhaseRegAlloc*) const {
1561     return _count * NativeInstruction::instruction_size;
1562   }
1563 
1564 //=============================================================================
1565 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1566 
1567 int Compile::ConstantTable::calculate_table_base_offset() const {
1568   return 0;  // absolute addressing, no offset
1569 }
1570 
1571 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1572 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1573   ShouldNotReachHere();
1574 }
1575 
1576 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1577   // Empty encoding
1578 }
1579 
1580 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1581   return 0;
1582 }
1583 
1584 #ifndef PRODUCT
1585 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1586   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1587 }
1588 #endif
1589 
1590 #ifndef PRODUCT
1591 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1592   Compile* C = ra_->C;
1593 
1594   int framesize = C->frame_slots() << LogBytesPerInt;
1595 
1596   if (C->need_stack_bang(framesize))
1597     st->print("# stack bang size=%d\n\t", framesize);
1598 
1599   if (framesize < ((1 << 9) + 2 * wordSize)) {
1600     st->print("sub  sp, sp, #%d\n\t", framesize);
1601     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1602     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1603   } else {
1604     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1605     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1606     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1607     st->print("sub  sp, sp, rscratch1");
1608   }
1609 }
1610 #endif
1611 
1612 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1613   Compile* C = ra_->C;
1614   MacroAssembler _masm(&cbuf);
1615 
1616   // n.b. frame size includes space for return pc and rfp
1617   const long framesize = C->frame_size_in_bytes();
1618   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1619 
1620   // insert a nop at the start of the prolog so we can patch in a
1621   // branch if we need to invalidate the method later
1622   __ nop();
1623 
1624   int bangsize = C->bang_size_in_bytes();
1625   if (C->need_stack_bang(bangsize) && UseStackBanging)
1626     __ generate_stack_overflow_check(bangsize);
1627 
1628   __ build_frame(framesize);
1629 
1630   if (NotifySimulator) {
1631     __ notify(Assembler::method_entry);
1632   }
1633 
1634   if (VerifyStackAtCalls) {
1635     Unimplemented();
1636   }
1637 
1638   C->set_frame_complete(cbuf.insts_size());
1639 
1640   if (C->has_mach_constant_base_node()) {
1641     // NOTE: We set the table base offset here because users might be
1642     // emitted before MachConstantBaseNode.
1643     Compile::ConstantTable& constant_table = C->constant_table();
1644     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1645   }
1646 }
1647 
1648 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1649 {
1650   return MachNode::size(ra_); // too many variables; just compute it
1651                               // the hard way
1652 }
1653 
1654 int MachPrologNode::reloc() const
1655 {
1656   return 0;
1657 }
1658 
1659 //=============================================================================
1660 
1661 #ifndef PRODUCT
1662 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1663   Compile* C = ra_->C;
1664   int framesize = C->frame_slots() << LogBytesPerInt;
1665 
1666   st->print("# pop frame %d\n\t",framesize);
1667 
1668   if (framesize == 0) {
1669     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1670   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1671     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1672     st->print("add  sp, sp, #%d\n\t", framesize);
1673   } else {
1674     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1675     st->print("add  sp, sp, rscratch1\n\t");
1676     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1677   }
1678 
1679   if (do_polling() && C->is_method_compilation()) {
1680     st->print("# touch polling page\n\t");
1681     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1682     st->print("ldr zr, [rscratch1]");
1683   }
1684 }
1685 #endif
1686 
1687 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1688   Compile* C = ra_->C;
1689   MacroAssembler _masm(&cbuf);
1690   int framesize = C->frame_slots() << LogBytesPerInt;
1691 
1692   __ remove_frame(framesize);
1693 
1694   if (NotifySimulator) {
1695     __ notify(Assembler::method_reentry);
1696   }
1697 
1698   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1699     __ reserved_stack_check();
1700   }
1701 
1702   if (do_polling() && C->is_method_compilation()) {
1703     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1704   }
1705 }
1706 
1707 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1708   // Variable size. Determine dynamically.
1709   return MachNode::size(ra_);
1710 }
1711 
1712 int MachEpilogNode::reloc() const {
1713   // Return number of relocatable values contained in this instruction.
1714   return 1; // 1 for polling page.
1715 }
1716 
1717 const Pipeline * MachEpilogNode::pipeline() const {
1718   return MachNode::pipeline_class();
1719 }
1720 
1721 // This method seems to be obsolete. It is declared in machnode.hpp
1722 // and defined in all *.ad files, but it is never called. Should we
1723 // get rid of it?
1724 int MachEpilogNode::safepoint_offset() const {
1725   assert(do_polling(), "no return for this epilog node");
1726   return 4;
1727 }
1728 
1729 //=============================================================================
1730 
1731 // Figure out which register class each belongs in: rc_int, rc_float or
1732 // rc_stack.
1733 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1734 
1735 static enum RC rc_class(OptoReg::Name reg) {
1736 
1737   if (reg == OptoReg::Bad) {
1738     return rc_bad;
1739   }
1740 
1741   // we have 30 int registers * 2 halves
1742   // (rscratch1 and rscratch2 are omitted)
1743 
1744   if (reg < 60) {
1745     return rc_int;
1746   }
1747 
1748   // we have 32 float register * 2 halves
1749   if (reg < 60 + 128) {
1750     return rc_float;
1751   }
1752 
1753   // Between float regs & stack is the flags regs.
1754   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1755 
1756   return rc_stack;
1757 }
1758 
1759 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1760   Compile* C = ra_->C;
1761 
1762   // Get registers to move.
1763   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1764   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1765   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1766   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1767 
1768   enum RC src_hi_rc = rc_class(src_hi);
1769   enum RC src_lo_rc = rc_class(src_lo);
1770   enum RC dst_hi_rc = rc_class(dst_hi);
1771   enum RC dst_lo_rc = rc_class(dst_lo);
1772 
1773   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1774 
1775   if (src_hi != OptoReg::Bad) {
1776     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1777            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1778            "expected aligned-adjacent pairs");
1779   }
1780 
1781   if (src_lo == dst_lo && src_hi == dst_hi) {
1782     return 0;            // Self copy, no move.
1783   }
1784 
1785   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1786               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1787   int src_offset = ra_->reg2offset(src_lo);
1788   int dst_offset = ra_->reg2offset(dst_lo);
1789 
1790   if (bottom_type()->isa_vect() != NULL) {
1791     uint ireg = ideal_reg();
1792     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1793     if (cbuf) {
1794       MacroAssembler _masm(cbuf);
1795       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1796       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1797         // stack->stack
1798         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1799         if (ireg == Op_VecD) {
1800           __ unspill(rscratch1, true, src_offset);
1801           __ spill(rscratch1, true, dst_offset);
1802         } else {
1803           __ spill_copy128(src_offset, dst_offset);
1804         }
1805       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1806         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1807                ireg == Op_VecD ? __ T8B : __ T16B,
1808                as_FloatRegister(Matcher::_regEncode[src_lo]));
1809       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1810         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1811                        ireg == Op_VecD ? __ D : __ Q,
1812                        ra_->reg2offset(dst_lo));
1813       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1814         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1815                        ireg == Op_VecD ? __ D : __ Q,
1816                        ra_->reg2offset(src_lo));
1817       } else {
1818         ShouldNotReachHere();
1819       }
1820     }
1821   } else if (cbuf) {
1822     MacroAssembler _masm(cbuf);
1823     switch (src_lo_rc) {
1824     case rc_int:
1825       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1826         if (is64) {
1827             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1828                    as_Register(Matcher::_regEncode[src_lo]));
1829         } else {
1830             MacroAssembler _masm(cbuf);
1831             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1832                     as_Register(Matcher::_regEncode[src_lo]));
1833         }
1834       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1835         if (is64) {
1836             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1837                      as_Register(Matcher::_regEncode[src_lo]));
1838         } else {
1839             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1840                      as_Register(Matcher::_regEncode[src_lo]));
1841         }
1842       } else {                    // gpr --> stack spill
1843         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1844         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1845       }
1846       break;
1847     case rc_float:
1848       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1849         if (is64) {
1850             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1851                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1852         } else {
1853             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1854                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1855         }
1856       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1857           if (cbuf) {
1858             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1859                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1860         } else {
1861             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1862                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1863         }
1864       } else {                    // fpr --> stack spill
1865         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1866         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1867                  is64 ? __ D : __ S, dst_offset);
1868       }
1869       break;
1870     case rc_stack:
1871       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1872         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1873       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1874         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1875                    is64 ? __ D : __ S, src_offset);
1876       } else {                    // stack --> stack copy
1877         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1878         __ unspill(rscratch1, is64, src_offset);
1879         __ spill(rscratch1, is64, dst_offset);
1880       }
1881       break;
1882     default:
1883       assert(false, "bad rc_class for spill");
1884       ShouldNotReachHere();
1885     }
1886   }
1887 
1888   if (st) {
1889     st->print("spill ");
1890     if (src_lo_rc == rc_stack) {
1891       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1892     } else {
1893       st->print("%s -> ", Matcher::regName[src_lo]);
1894     }
1895     if (dst_lo_rc == rc_stack) {
1896       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1897     } else {
1898       st->print("%s", Matcher::regName[dst_lo]);
1899     }
1900     if (bottom_type()->isa_vect() != NULL) {
1901       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1902     } else {
1903       st->print("\t# spill size = %d", is64 ? 64:32);
1904     }
1905   }
1906 
1907   return 0;
1908 
1909 }
1910 
1911 #ifndef PRODUCT
1912 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1913   if (!ra_)
1914     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1915   else
1916     implementation(NULL, ra_, false, st);
1917 }
1918 #endif
1919 
1920 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1921   implementation(&cbuf, ra_, false, NULL);
1922 }
1923 
1924 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1925   return MachNode::size(ra_);
1926 }
1927 
1928 //=============================================================================
1929 
1930 #ifndef PRODUCT
1931 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1932   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1933   int reg = ra_->get_reg_first(this);
1934   st->print("add %s, rsp, #%d]\t# box lock",
1935             Matcher::regName[reg], offset);
1936 }
1937 #endif
1938 
1939 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1940   MacroAssembler _masm(&cbuf);
1941 
1942   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1943   int reg    = ra_->get_encode(this);
1944 
1945   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1946     __ add(as_Register(reg), sp, offset);
1947   } else {
1948     ShouldNotReachHere();
1949   }
1950 }
1951 
1952 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1953   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1954   return 4;
1955 }
1956 
1957 //=============================================================================
1958 
1959 #ifndef PRODUCT
1960 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1961 {
1962   st->print_cr("# MachUEPNode");
1963   if (UseCompressedClassPointers) {
1964     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1965     if (Universe::narrow_klass_shift() != 0) {
1966       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1967     }
1968   } else {
1969    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1970   }
1971   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1972   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1973 }
1974 #endif
1975 
1976 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1977 {
1978   // This is the unverified entry point.
1979   MacroAssembler _masm(&cbuf);
1980 
1981   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1982   Label skip;
1983   // TODO
1984   // can we avoid this skip and still use a reloc?
1985   __ br(Assembler::EQ, skip);
1986   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1987   __ bind(skip);
1988 }
1989 
1990 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1991 {
1992   return MachNode::size(ra_);
1993 }
1994 
1995 // REQUIRED EMIT CODE
1996 
1997 //=============================================================================
1998 
1999 // Emit exception handler code.
2000 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2001 {
2002   // mov rscratch1 #exception_blob_entry_point
2003   // br rscratch1
2004   // Note that the code buffer's insts_mark is always relative to insts.
2005   // That's why we must use the macroassembler to generate a handler.
2006   MacroAssembler _masm(&cbuf);
2007   address base = __ start_a_stub(size_exception_handler());
2008   if (base == NULL) {
2009     ciEnv::current()->record_failure("CodeCache is full");
2010     return 0;  // CodeBuffer::expand failed
2011   }
2012   int offset = __ offset();
2013   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2014   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2015   __ end_a_stub();
2016   return offset;
2017 }
2018 
2019 // Emit deopt handler code.
2020 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2021 {
2022   // Note that the code buffer's insts_mark is always relative to insts.
2023   // That's why we must use the macroassembler to generate a handler.
2024   MacroAssembler _masm(&cbuf);
2025   address base = __ start_a_stub(size_deopt_handler());
2026   if (base == NULL) {
2027     ciEnv::current()->record_failure("CodeCache is full");
2028     return 0;  // CodeBuffer::expand failed
2029   }
2030   int offset = __ offset();
2031 
2032   __ adr(lr, __ pc());
2033   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2034 
2035   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2036   __ end_a_stub();
2037   return offset;
2038 }
2039 
2040 // REQUIRED MATCHER CODE
2041 
2042 //=============================================================================
2043 
2044 const bool Matcher::match_rule_supported(int opcode) {
2045 
2046   switch (opcode) {
2047   default:
2048     break;
2049   }
2050 
2051   if (!has_match_rule(opcode)) {
2052     return false;
2053   }
2054 
2055   return true;  // Per default match rules are supported.
2056 }
2057 
2058 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt, int op_arity) {
2059   int bit_size = vlen * type2aelembytes(bt) * 8;
2060   if (bit_size > 128) {
2061     return false;
2062   }
2063   // identify extra cases that we might want to provide match rules for
2064   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2065   bool ret_value = match_rule_supported(opcode) && vector_size_supported(bt, vlen);
2066   // Add rules here.
2067 
2068   return ret_value;  // Per default match rules are supported.
2069 }
2070 
2071 const bool Matcher::has_predicated_vectors(void) {
2072   return false;
2073 }
2074 
2075 // Vector calling convention not yet implemented.
2076 const bool Matcher::supports_vector_calling_convention() {
2077   return false;
2078 }
2079 
2080 void Matcher::vector_calling_convention(VMRegPair *regs, uint num_bits, uint total_args_passed) {
2081   (void) SharedRuntime::vector_calling_convention(regs, num_bits, total_args_passed);
2082 }
2083 
2084 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
2085   Unimplemented();
2086   return OptoRegPair(0, 0);
2087 }
2088 
2089 const int Matcher::float_pressure(int default_pressure_threshold) {
2090   return default_pressure_threshold;
2091 }
2092 
2093 int Matcher::regnum_to_fpu_offset(int regnum)
2094 {
2095   Unimplemented();
2096   return 0;
2097 }
2098 
2099 // Is this branch offset short enough that a short branch can be used?
2100 //
2101 // NOTE: If the platform does not provide any short branch variants, then
2102 //       this method should return false for offset 0.
2103 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2104   // The passed offset is relative to address of the branch.
2105 
2106   return (-32768 <= offset && offset < 32768);
2107 }
2108 
2109 const bool Matcher::isSimpleConstant64(jlong value) {
2110   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2111   // Probably always true, even if a temp register is required.
2112   return true;
2113 }
2114 
2115 // true just means we have fast l2f conversion
2116 const bool Matcher::convL2FSupported(void) {
2117   return true;
2118 }
2119 
2120 // Vector width in bytes.
2121 const int Matcher::vector_width_in_bytes(BasicType bt) {
2122   int size = MIN2(UseSVE ? 256: 16, (int)MaxVectorSize);
2123   // Minimum 2 values in vector
2124   if (size < 2*type2aelembytes(bt)) size = 0;
2125   // But never < 4
2126   if (size < 4) size = 0;
2127   return size;
2128 }
2129 
2130 // Limits on vector size (number of elements) loaded into vector.
2131 const int Matcher::max_vector_size(const BasicType bt) {
2132   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2133 }
2134 const int Matcher::min_vector_size(const BasicType bt) {
2135   int max_size = max_vector_size(bt);
2136   // To support vector load mask for long data,  set min size
2137   // which can be loaded into vector as 2 bytes.
2138   int size = 2;
2139   return MIN2(size,max_size);
2140 }
2141 
2142 // Vector ideal reg.
2143 const uint Matcher::vector_ideal_reg(int len) {
2144   switch(len) {
2145     // For 16-bit/32-bit mask vector, reuse VecD.
2146     case  2:
2147     case  4:
2148     case  8: return Op_VecD;
2149     case 16: return Op_VecX;
2150   }
2151   ShouldNotReachHere();
2152   return 0;
2153 }
2154 
2155 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2156   switch(size) {
2157     case  4:
2158     case  8: return Op_VecD;
2159     case 16: return Op_VecX;
2160   }
2161   ShouldNotReachHere();
2162   return 0;
2163 }
2164 
2165 // AES support not yet implemented
2166 const bool Matcher::pass_original_key_for_aes() {
2167   return false;
2168 }
2169 
2170 // x86 supports misaligned vectors store/load.
2171 const bool Matcher::misaligned_vectors_ok() {
2172   return !AlignVector; // can be changed by flag
2173 }
2174 
2175 // false => size gets scaled to BytesPerLong, ok.
2176 const bool Matcher::init_array_count_is_in_bytes = false;
2177 
2178 // Use conditional move (CMOVL)
2179 const int Matcher::long_cmove_cost() {
2180   // long cmoves are no more expensive than int cmoves
2181   return 0;
2182 }
2183 
2184 const int Matcher::float_cmove_cost() {
2185   // float cmoves are no more expensive than int cmoves
2186   return 0;
2187 }
2188 
2189 // Does the CPU require late expand (see block.cpp for description of late expand)?
2190 const bool Matcher::require_postalloc_expand = false;
2191 
2192 // Do we need to mask the count passed to shift instructions or does
2193 // the cpu only look at the lower 5/6 bits anyway?
2194 const bool Matcher::need_masked_shift_count = false;
2195 
2196 // This affects two different things:
2197 //  - how Decode nodes are matched
2198 //  - how ImplicitNullCheck opportunities are recognized
2199 // If true, the matcher will try to remove all Decodes and match them
2200 // (as operands) into nodes. NullChecks are not prepared to deal with
2201 // Decodes by final_graph_reshaping().
2202 // If false, final_graph_reshaping() forces the decode behind the Cmp
2203 // for a NullCheck. The matcher matches the Decode node into a register.
2204 // Implicit_null_check optimization moves the Decode along with the
2205 // memory operation back up before the NullCheck.
2206 bool Matcher::narrow_oop_use_complex_address() {
2207   return Universe::narrow_oop_shift() == 0;
2208 }
2209 
2210 bool Matcher::narrow_klass_use_complex_address() {
2211 // TODO
2212 // decide whether we need to set this to true
2213   return false;
2214 }
2215 
2216 bool Matcher::const_oop_prefer_decode() {
2217   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2218   return Universe::narrow_oop_base() == NULL;
2219 }
2220 
2221 bool Matcher::const_klass_prefer_decode() {
2222   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2223   return Universe::narrow_klass_base() == NULL;
2224 }
2225 
2226 // Is it better to copy float constants, or load them directly from
2227 // memory?  Intel can load a float constant from a direct address,
2228 // requiring no extra registers.  Most RISCs will have to materialize
2229 // an address into a register first, so they would do better to copy
2230 // the constant from stack.
2231 const bool Matcher::rematerialize_float_constants = false;
2232 
2233 // If CPU can load and store mis-aligned doubles directly then no
2234 // fixup is needed.  Else we split the double into 2 integer pieces
2235 // and move it piece-by-piece.  Only happens when passing doubles into
2236 // C code as the Java calling convention forces doubles to be aligned.
2237 const bool Matcher::misaligned_doubles_ok = true;
2238 
2239 // No-op on amd64
2240 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2241   Unimplemented();
2242 }
2243 
2244 // Advertise here if the CPU requires explicit rounding operations to
2245 // implement the UseStrictFP mode.
2246 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2247 
2248 // Are floats converted to double when stored to stack during
2249 // deoptimization?
2250 bool Matcher::float_in_double() { return false; }
2251 
2252 // Do ints take an entire long register or just half?
2253 // The relevant question is how the int is callee-saved:
2254 // the whole long is written but de-opt'ing will have to extract
2255 // the relevant 32 bits.
2256 const bool Matcher::int_in_long = true;
2257 
2258 // Return whether or not this register is ever used as an argument.
2259 // This function is used on startup to build the trampoline stubs in
2260 // generateOptoStub.  Registers not mentioned will be killed by the VM
2261 // call in the trampoline, and arguments in those registers not be
2262 // available to the callee.
2263 bool Matcher::can_be_java_arg(int reg)
2264 {
2265   return
2266     reg ==  R0_num || reg == R0_H_num ||
2267     reg ==  R1_num || reg == R1_H_num ||
2268     reg ==  R2_num || reg == R2_H_num ||
2269     reg ==  R3_num || reg == R3_H_num ||
2270     reg ==  R4_num || reg == R4_H_num ||
2271     reg ==  R5_num || reg == R5_H_num ||
2272     reg ==  R6_num || reg == R6_H_num ||
2273     reg ==  R7_num || reg == R7_H_num ||
2274     reg ==  V0_num || reg == V0_H_num ||
2275     reg ==  V1_num || reg == V1_H_num ||
2276     reg ==  V2_num || reg == V2_H_num ||
2277     reg ==  V3_num || reg == V3_H_num ||
2278     reg ==  V4_num || reg == V4_H_num ||
2279     reg ==  V5_num || reg == V5_H_num ||
2280     reg ==  V6_num || reg == V6_H_num ||
2281     reg ==  V7_num || reg == V7_H_num;
2282 }
2283 
2284 bool Matcher::is_spillable_arg(int reg)
2285 {
2286   return can_be_java_arg(reg);
2287 }
2288 
2289 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2290   return false;
2291 }
2292 
2293 RegMask Matcher::divI_proj_mask() {
2294   ShouldNotReachHere();
2295   return RegMask();
2296 }
2297 
2298 // Register for MODI projection of divmodI.
2299 RegMask Matcher::modI_proj_mask() {
2300   ShouldNotReachHere();
2301   return RegMask();
2302 }
2303 
2304 // Register for DIVL projection of divmodL.
2305 RegMask Matcher::divL_proj_mask() {
2306   ShouldNotReachHere();
2307   return RegMask();
2308 }
2309 
2310 // Register for MODL projection of divmodL.
2311 RegMask Matcher::modL_proj_mask() {
2312   ShouldNotReachHere();
2313   return RegMask();
2314 }
2315 
2316 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2317   return FP_REG_mask();
2318 }
2319 
2320 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2321   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2322     Node* u = addp->fast_out(i);
2323     if (u->is_Mem()) {
2324       int opsize = u->as_Mem()->memory_size();
2325       assert(opsize > 0, "unexpected memory operand size");
2326       if (u->as_Mem()->memory_size() != (1<<shift)) {
2327         return false;
2328       }
2329     }
2330   }
2331   return true;
2332 }
2333 
2334 const bool Matcher::convi2l_type_required = false;
2335 
2336 // Should the Matcher clone shifts on addressing modes, expecting them
2337 // to be subsumed into complex addressing expressions or compute them
2338 // into registers?
2339 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2340   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2341     return true;
2342   }
2343 
2344   Node *off = m->in(AddPNode::Offset);
2345   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2346       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2347       // Are there other uses besides address expressions?
2348       !is_visited(off)) {
2349     address_visited.set(off->_idx); // Flag as address_visited
2350     mstack.push(off->in(2), Visit);
2351     Node *conv = off->in(1);
2352     if (conv->Opcode() == Op_ConvI2L &&
2353         // Are there other uses besides address expressions?
2354         !is_visited(conv)) {
2355       address_visited.set(conv->_idx); // Flag as address_visited
2356       mstack.push(conv->in(1), Pre_Visit);
2357     } else {
2358       mstack.push(conv, Pre_Visit);
2359     }
2360     address_visited.test_set(m->_idx); // Flag as address_visited
2361     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2362     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2363     return true;
2364   } else if (off->Opcode() == Op_ConvI2L &&
2365              // Are there other uses besides address expressions?
2366              !is_visited(off)) {
2367     address_visited.test_set(m->_idx); // Flag as address_visited
2368     address_visited.set(off->_idx); // Flag as address_visited
2369     mstack.push(off->in(1), Pre_Visit);
2370     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2371     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2372     return true;
2373   }
2374   return false;
2375 }
2376 
2377 void Compile::reshape_address(AddPNode* addp) {
2378 }
2379 
2380 // helper for encoding java_to_runtime calls on sim
2381 //
2382 // this is needed to compute the extra arguments required when
2383 // planting a call to the simulator blrt instruction. the TypeFunc
2384 // can be queried to identify the counts for integral, and floating
2385 // arguments and the return type
2386 
2387 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2388 {
2389   int gps = 0;
2390   int fps = 0;
2391   const TypeTuple *domain = tf->domain();
2392   int max = domain->cnt();
2393   for (int i = TypeFunc::Parms; i < max; i++) {
2394     const Type *t = domain->field_at(i);
2395     switch(t->basic_type()) {
2396     case T_FLOAT:
2397     case T_DOUBLE:
2398       fps++;
2399     default:
2400       gps++;
2401     }
2402   }
2403   gpcnt = gps;
2404   fpcnt = fps;
2405   BasicType rt = tf->return_type();
2406   switch (rt) {
2407   case T_VOID:
2408     rtype = MacroAssembler::ret_type_void;
2409     break;
2410   default:
2411     rtype = MacroAssembler::ret_type_integral;
2412     break;
2413   case T_FLOAT:
2414     rtype = MacroAssembler::ret_type_float;
2415     break;
2416   case T_DOUBLE:
2417     rtype = MacroAssembler::ret_type_double;
2418     break;
2419   }
2420 }
2421 
2422 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2423   MacroAssembler _masm(&cbuf);                                          \
2424   {                                                                     \
2425     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2426     guarantee(DISP == 0, "mode not permitted for volatile");            \
2427     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2428     __ INSN(REG, as_Register(BASE));                                    \
2429   }
2430 
2431 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2432 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2433 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2434                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2435 
2436   // Used for all non-volatile memory accesses.  The use of
2437   // $mem->opcode() to discover whether this pattern uses sign-extended
2438   // offsets is something of a kludge.
2439   static void loadStore(MacroAssembler masm, mem_insn insn,
2440                          Register reg, int opcode,
2441                          Register base, int index, int size, int disp)
2442   {
2443     Address::extend scale;
2444 
2445     // Hooboy, this is fugly.  We need a way to communicate to the
2446     // encoder that the index needs to be sign extended, so we have to
2447     // enumerate all the cases.
2448     switch (opcode) {
2449     case INDINDEXSCALEDI2L:
2450     case INDINDEXSCALEDI2LN:
2451     case INDINDEXI2L:
2452     case INDINDEXI2LN:
2453       scale = Address::sxtw(size);
2454       break;
2455     default:
2456       scale = Address::lsl(size);
2457     }
2458 
2459     if (index == -1) {
2460       (masm.*insn)(reg, Address(base, disp));
2461     } else {
2462       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2463       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2464     }
2465   }
2466 
2467   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2468                          FloatRegister reg, int opcode,
2469                          Register base, int index, int size, int disp)
2470   {
2471     Address::extend scale;
2472 
2473     switch (opcode) {
2474     case INDINDEXSCALEDI2L:
2475     case INDINDEXSCALEDI2LN:
2476       scale = Address::sxtw(size);
2477       break;
2478     default:
2479       scale = Address::lsl(size);
2480     }
2481 
2482      if (index == -1) {
2483       (masm.*insn)(reg, Address(base, disp));
2484     } else {
2485       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2486       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2487     }
2488   }
2489 
2490   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2491                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2492                          int opcode, Register base, int index, int size, int disp)
2493   {
2494     if (index == -1) {
2495       (masm.*insn)(reg, T, Address(base, disp));
2496     } else {
2497       assert(disp == 0, "unsupported address mode");
2498       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2499     }
2500   }
2501 
2502 %}
2503 
2504 
2505 
2506 //----------ENCODING BLOCK-----------------------------------------------------
2507 // This block specifies the encoding classes used by the compiler to
2508 // output byte streams.  Encoding classes are parameterized macros
2509 // used by Machine Instruction Nodes in order to generate the bit
2510 // encoding of the instruction.  Operands specify their base encoding
2511 // interface with the interface keyword.  There are currently
2512 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2513 // COND_INTER.  REG_INTER causes an operand to generate a function
2514 // which returns its register number when queried.  CONST_INTER causes
2515 // an operand to generate a function which returns the value of the
2516 // constant when queried.  MEMORY_INTER causes an operand to generate
2517 // four functions which return the Base Register, the Index Register,
2518 // the Scale Value, and the Offset Value of the operand when queried.
2519 // COND_INTER causes an operand to generate six functions which return
2520 // the encoding code (ie - encoding bits for the instruction)
2521 // associated with each basic boolean condition for a conditional
2522 // instruction.
2523 //
2524 // Instructions specify two basic values for encoding.  Again, a
2525 // function is available to check if the constant displacement is an
2526 // oop. They use the ins_encode keyword to specify their encoding
2527 // classes (which must be a sequence of enc_class names, and their
2528 // parameters, specified in the encoding block), and they use the
2529 // opcode keyword to specify, in order, their primary, secondary, and
2530 // tertiary opcode.  Only the opcode sections which a particular
2531 // instruction needs for encoding need to be specified.
2532 encode %{
2533   // Build emit functions for each basic byte or larger field in the
2534   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2535   // from C++ code in the enc_class source block.  Emit functions will
2536   // live in the main source block for now.  In future, we can
2537   // generalize this by adding a syntax that specifies the sizes of
2538   // fields in an order, so that the adlc can build the emit functions
2539   // automagically
2540 
2541   // catch all for unimplemented encodings
2542   enc_class enc_unimplemented %{
2543     MacroAssembler _masm(&cbuf);
2544     __ unimplemented("C2 catch all");
2545   %}
2546 
2547   // BEGIN Non-volatile memory access
2548 
2549   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2550     Register dst_reg = as_Register($dst$$reg);
2551     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2552                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2553   %}
2554 
2555   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2556     Register dst_reg = as_Register($dst$$reg);
2557     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2558                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2559   %}
2560 
2561   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2562     Register dst_reg = as_Register($dst$$reg);
2563     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2564                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2565   %}
2566 
2567   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2568     Register dst_reg = as_Register($dst$$reg);
2569     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2570                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2571   %}
2572 
2573   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2574     Register dst_reg = as_Register($dst$$reg);
2575     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2576                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2577   %}
2578 
2579   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2580     Register dst_reg = as_Register($dst$$reg);
2581     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2582                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2583   %}
2584 
2585   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2586     Register dst_reg = as_Register($dst$$reg);
2587     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2588                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2589   %}
2590 
2591   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2592     Register dst_reg = as_Register($dst$$reg);
2593     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2594                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2595   %}
2596 
2597   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2598     Register dst_reg = as_Register($dst$$reg);
2599     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2600                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2601   %}
2602 
2603   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2604     Register dst_reg = as_Register($dst$$reg);
2605     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2606                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2607   %}
2608 
2609   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2610     Register dst_reg = as_Register($dst$$reg);
2611     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2612                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2613   %}
2614 
2615   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2616     Register dst_reg = as_Register($dst$$reg);
2617     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2618                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2619   %}
2620 
2621   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2622     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2623     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2624                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2625   %}
2626 
2627   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2628     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2629     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2630                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2631   %}
2632 
2633   enc_class aarch64_enc_ldrvH(vecD dst, memory mem) %{
2634     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2635     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
2636        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2637   %}
2638 
2639   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2640     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2641     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2642        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2643   %}
2644 
2645   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2646     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2647     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2648        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2649   %}
2650 
2651   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2652     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2653     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2654        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2655   %}
2656 
2657   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2658     Register src_reg = as_Register($src$$reg);
2659     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2660                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2661   %}
2662 
2663   enc_class aarch64_enc_strb0(memory mem) %{
2664     MacroAssembler _masm(&cbuf);
2665     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2666                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2667   %}
2668 
2669   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2670     MacroAssembler _masm(&cbuf);
2671     __ membar(Assembler::StoreStore);
2672     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2673                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2674   %}
2675 
2676   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2677     Register src_reg = as_Register($src$$reg);
2678     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2679                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2680   %}
2681 
2682   enc_class aarch64_enc_strh0(memory mem) %{
2683     MacroAssembler _masm(&cbuf);
2684     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2685                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2686   %}
2687 
2688   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2689     Register src_reg = as_Register($src$$reg);
2690     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2691                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2692   %}
2693 
2694   enc_class aarch64_enc_strw0(memory mem) %{
2695     MacroAssembler _masm(&cbuf);
2696     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2697                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2698   %}
2699 
2700   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2701     Register src_reg = as_Register($src$$reg);
2702     // we sometimes get asked to store the stack pointer into the
2703     // current thread -- we cannot do that directly on AArch64
2704     if (src_reg == r31_sp) {
2705       MacroAssembler _masm(&cbuf);
2706       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2707       __ mov(rscratch2, sp);
2708       src_reg = rscratch2;
2709     }
2710     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2711                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2712   %}
2713 
2714   enc_class aarch64_enc_str0(memory mem) %{
2715     MacroAssembler _masm(&cbuf);
2716     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2717                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2718   %}
2719 
2720   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2721     FloatRegister src_reg = as_FloatRegister($src$$reg);
2722     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2723                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2724   %}
2725 
2726   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2727     FloatRegister src_reg = as_FloatRegister($src$$reg);
2728     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2729                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2730   %}
2731 
2732   enc_class aarch64_enc_strvH(vecD src, memory mem) %{
2733     FloatRegister src_reg = as_FloatRegister($src$$reg);
2734     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::H,
2735        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2736   %}
2737 
2738   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2739     FloatRegister src_reg = as_FloatRegister($src$$reg);
2740     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2741        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2742   %}
2743 
2744   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2745     FloatRegister src_reg = as_FloatRegister($src$$reg);
2746     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2747        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2748   %}
2749 
2750   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2751     FloatRegister src_reg = as_FloatRegister($src$$reg);
2752     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2753        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2754   %}
2755 
2756   // END Non-volatile memory access
2757 
2758   // volatile loads and stores
2759 
2760   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2761     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2762                  rscratch1, stlrb);
2763   %}
2764 
2765   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2766     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2767                  rscratch1, stlrh);
2768   %}
2769 
2770   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2771     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2772                  rscratch1, stlrw);
2773   %}
2774 
2775 
2776   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2777     Register dst_reg = as_Register($dst$$reg);
2778     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2779              rscratch1, ldarb);
2780     __ sxtbw(dst_reg, dst_reg);
2781   %}
2782 
2783   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2784     Register dst_reg = as_Register($dst$$reg);
2785     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2786              rscratch1, ldarb);
2787     __ sxtb(dst_reg, dst_reg);
2788   %}
2789 
2790   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2791     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2792              rscratch1, ldarb);
2793   %}
2794 
2795   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2796     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2797              rscratch1, ldarb);
2798   %}
2799 
2800   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2801     Register dst_reg = as_Register($dst$$reg);
2802     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2803              rscratch1, ldarh);
2804     __ sxthw(dst_reg, dst_reg);
2805   %}
2806 
2807   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2808     Register dst_reg = as_Register($dst$$reg);
2809     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2810              rscratch1, ldarh);
2811     __ sxth(dst_reg, dst_reg);
2812   %}
2813 
2814   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2815     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2816              rscratch1, ldarh);
2817   %}
2818 
2819   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2820     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2821              rscratch1, ldarh);
2822   %}
2823 
2824   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2825     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2826              rscratch1, ldarw);
2827   %}
2828 
2829   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2830     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2831              rscratch1, ldarw);
2832   %}
2833 
2834   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2835     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2836              rscratch1, ldar);
2837   %}
2838 
2839   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2840     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2841              rscratch1, ldarw);
2842     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2843   %}
2844 
2845   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2846     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2847              rscratch1, ldar);
2848     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2849   %}
2850 
2851   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2852     Register src_reg = as_Register($src$$reg);
2853     // we sometimes get asked to store the stack pointer into the
2854     // current thread -- we cannot do that directly on AArch64
2855     if (src_reg == r31_sp) {
2856         MacroAssembler _masm(&cbuf);
2857       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2858       __ mov(rscratch2, sp);
2859       src_reg = rscratch2;
2860     }
2861     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2862                  rscratch1, stlr);
2863   %}
2864 
2865   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2866     {
2867       MacroAssembler _masm(&cbuf);
2868       FloatRegister src_reg = as_FloatRegister($src$$reg);
2869       __ fmovs(rscratch2, src_reg);
2870     }
2871     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2872                  rscratch1, stlrw);
2873   %}
2874 
2875   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2876     {
2877       MacroAssembler _masm(&cbuf);
2878       FloatRegister src_reg = as_FloatRegister($src$$reg);
2879       __ fmovd(rscratch2, src_reg);
2880     }
2881     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2882                  rscratch1, stlr);
2883   %}
2884 
2885   // synchronized read/update encodings
2886 
2887   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2888     MacroAssembler _masm(&cbuf);
2889     Register dst_reg = as_Register($dst$$reg);
2890     Register base = as_Register($mem$$base);
2891     int index = $mem$$index;
2892     int scale = $mem$$scale;
2893     int disp = $mem$$disp;
2894     if (index == -1) {
2895        if (disp != 0) {
2896         __ lea(rscratch1, Address(base, disp));
2897         __ ldaxr(dst_reg, rscratch1);
2898       } else {
2899         // TODO
2900         // should we ever get anything other than this case?
2901         __ ldaxr(dst_reg, base);
2902       }
2903     } else {
2904       Register index_reg = as_Register(index);
2905       if (disp == 0) {
2906         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2907         __ ldaxr(dst_reg, rscratch1);
2908       } else {
2909         __ lea(rscratch1, Address(base, disp));
2910         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2911         __ ldaxr(dst_reg, rscratch1);
2912       }
2913     }
2914   %}
2915 
2916   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2917     MacroAssembler _masm(&cbuf);
2918     Register src_reg = as_Register($src$$reg);
2919     Register base = as_Register($mem$$base);
2920     int index = $mem$$index;
2921     int scale = $mem$$scale;
2922     int disp = $mem$$disp;
2923     if (index == -1) {
2924        if (disp != 0) {
2925         __ lea(rscratch2, Address(base, disp));
2926         __ stlxr(rscratch1, src_reg, rscratch2);
2927       } else {
2928         // TODO
2929         // should we ever get anything other than this case?
2930         __ stlxr(rscratch1, src_reg, base);
2931       }
2932     } else {
2933       Register index_reg = as_Register(index);
2934       if (disp == 0) {
2935         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
2936         __ stlxr(rscratch1, src_reg, rscratch2);
2937       } else {
2938         __ lea(rscratch2, Address(base, disp));
2939         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
2940         __ stlxr(rscratch1, src_reg, rscratch2);
2941       }
2942     }
2943     __ cmpw(rscratch1, zr);
2944   %}
2945 
2946   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2947     MacroAssembler _masm(&cbuf);
2948     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2949     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2950                Assembler::xword, /*acquire*/ false, /*release*/ true,
2951                /*weak*/ false, noreg);
2952   %}
2953 
2954   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2955     MacroAssembler _masm(&cbuf);
2956     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2957     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2958                Assembler::word, /*acquire*/ false, /*release*/ true,
2959                /*weak*/ false, noreg);
2960   %}
2961 
2962   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2963     MacroAssembler _masm(&cbuf);
2964     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2965     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2966                Assembler::halfword, /*acquire*/ false, /*release*/ true,
2967                /*weak*/ false, noreg);
2968   %}
2969 
2970   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2971     MacroAssembler _masm(&cbuf);
2972     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2973     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2974                Assembler::byte, /*acquire*/ false, /*release*/ true,
2975                /*weak*/ false, noreg);
2976   %}
2977 
2978 
2979   // The only difference between aarch64_enc_cmpxchg and
2980   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
2981   // CompareAndSwap sequence to serve as a barrier on acquiring a
2982   // lock.
2983   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2984     MacroAssembler _masm(&cbuf);
2985     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2986     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2987                Assembler::xword, /*acquire*/ true, /*release*/ true,
2988                /*weak*/ false, noreg);
2989   %}
2990 
2991   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2992     MacroAssembler _masm(&cbuf);
2993     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2994     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2995                Assembler::word, /*acquire*/ true, /*release*/ true,
2996                /*weak*/ false, noreg);
2997   %}
2998 
2999   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3000     MacroAssembler _masm(&cbuf);
3001     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3002     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3003                Assembler::halfword, /*acquire*/ true, /*release*/ true,
3004                /*weak*/ false, noreg);
3005   %}
3006 
3007   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3008     MacroAssembler _masm(&cbuf);
3009     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3010     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3011                Assembler::byte, /*acquire*/ true, /*release*/ true,
3012                /*weak*/ false, noreg);
3013   %}
3014 
3015   // auxiliary used for CompareAndSwapX to set result register
3016   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3017     MacroAssembler _masm(&cbuf);
3018     Register res_reg = as_Register($res$$reg);
3019     __ cset(res_reg, Assembler::EQ);
3020   %}
3021 
3022   // prefetch encodings
3023 
3024   enc_class aarch64_enc_prefetchw(memory mem) %{
3025     MacroAssembler _masm(&cbuf);
3026     Register base = as_Register($mem$$base);
3027     int index = $mem$$index;
3028     int scale = $mem$$scale;
3029     int disp = $mem$$disp;
3030     if (index == -1) {
3031       __ prfm(Address(base, disp), PSTL1KEEP);
3032     } else {
3033       Register index_reg = as_Register(index);
3034       if (disp == 0) {
3035         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3036       } else {
3037         __ lea(rscratch1, Address(base, disp));
3038         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3039       }
3040     }
3041   %}
3042 
3043   /// mov envcodings
3044 
3045   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3046     MacroAssembler _masm(&cbuf);
3047     u_int32_t con = (u_int32_t)$src$$constant;
3048     Register dst_reg = as_Register($dst$$reg);
3049     if (con == 0) {
3050       __ movw(dst_reg, zr);
3051     } else {
3052       __ movw(dst_reg, con);
3053     }
3054   %}
3055 
3056   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3057     MacroAssembler _masm(&cbuf);
3058     Register dst_reg = as_Register($dst$$reg);
3059     u_int64_t con = (u_int64_t)$src$$constant;
3060     if (con == 0) {
3061       __ mov(dst_reg, zr);
3062     } else {
3063       __ mov(dst_reg, con);
3064     }
3065   %}
3066 
3067   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3068     MacroAssembler _masm(&cbuf);
3069     Register dst_reg = as_Register($dst$$reg);
3070     address con = (address)$src$$constant;
3071     if (con == NULL || con == (address)1) {
3072       ShouldNotReachHere();
3073     } else {
3074       relocInfo::relocType rtype = $src->constant_reloc();
3075       if (rtype == relocInfo::oop_type) {
3076         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3077       } else if (rtype == relocInfo::metadata_type) {
3078         __ mov_metadata(dst_reg, (Metadata*)con);
3079       } else {
3080         assert(rtype == relocInfo::none, "unexpected reloc type");
3081         if (con < (address)(uintptr_t)os::vm_page_size()) {
3082           __ mov(dst_reg, con);
3083         } else {
3084           unsigned long offset;
3085           __ adrp(dst_reg, con, offset);
3086           __ add(dst_reg, dst_reg, offset);
3087         }
3088       }
3089     }
3090   %}
3091 
3092   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3093     MacroAssembler _masm(&cbuf);
3094     Register dst_reg = as_Register($dst$$reg);
3095     __ mov(dst_reg, zr);
3096   %}
3097 
3098   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3099     MacroAssembler _masm(&cbuf);
3100     Register dst_reg = as_Register($dst$$reg);
3101     __ mov(dst_reg, (u_int64_t)1);
3102   %}
3103 
3104   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3105     MacroAssembler _masm(&cbuf);
3106     address page = (address)$src$$constant;
3107     Register dst_reg = as_Register($dst$$reg);
3108     unsigned long off;
3109     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3110     assert(off == 0, "assumed offset == 0");
3111   %}
3112 
3113   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3114     MacroAssembler _masm(&cbuf);
3115     __ load_byte_map_base($dst$$Register);
3116   %}
3117 
3118   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3119     MacroAssembler _masm(&cbuf);
3120     Register dst_reg = as_Register($dst$$reg);
3121     address con = (address)$src$$constant;
3122     if (con == NULL) {
3123       ShouldNotReachHere();
3124     } else {
3125       relocInfo::relocType rtype = $src->constant_reloc();
3126       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3127       __ set_narrow_oop(dst_reg, (jobject)con);
3128     }
3129   %}
3130 
3131   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3132     MacroAssembler _masm(&cbuf);
3133     Register dst_reg = as_Register($dst$$reg);
3134     __ mov(dst_reg, zr);
3135   %}
3136 
3137   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3138     MacroAssembler _masm(&cbuf);
3139     Register dst_reg = as_Register($dst$$reg);
3140     address con = (address)$src$$constant;
3141     if (con == NULL) {
3142       ShouldNotReachHere();
3143     } else {
3144       relocInfo::relocType rtype = $src->constant_reloc();
3145       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3146       __ set_narrow_klass(dst_reg, (Klass *)con);
3147     }
3148   %}
3149 
3150   // arithmetic encodings
3151 
3152   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3153     MacroAssembler _masm(&cbuf);
3154     Register dst_reg = as_Register($dst$$reg);
3155     Register src_reg = as_Register($src1$$reg);
3156     int32_t con = (int32_t)$src2$$constant;
3157     // add has primary == 0, subtract has primary == 1
3158     if ($primary) { con = -con; }
3159     if (con < 0) {
3160       __ subw(dst_reg, src_reg, -con);
3161     } else {
3162       __ addw(dst_reg, src_reg, con);
3163     }
3164   %}
3165 
3166   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3167     MacroAssembler _masm(&cbuf);
3168     Register dst_reg = as_Register($dst$$reg);
3169     Register src_reg = as_Register($src1$$reg);
3170     int32_t con = (int32_t)$src2$$constant;
3171     // add has primary == 0, subtract has primary == 1
3172     if ($primary) { con = -con; }
3173     if (con < 0) {
3174       __ sub(dst_reg, src_reg, -con);
3175     } else {
3176       __ add(dst_reg, src_reg, con);
3177     }
3178   %}
3179 
3180   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3181     MacroAssembler _masm(&cbuf);
3182    Register dst_reg = as_Register($dst$$reg);
3183    Register src1_reg = as_Register($src1$$reg);
3184    Register src2_reg = as_Register($src2$$reg);
3185     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3186   %}
3187 
3188   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3189     MacroAssembler _masm(&cbuf);
3190    Register dst_reg = as_Register($dst$$reg);
3191    Register src1_reg = as_Register($src1$$reg);
3192    Register src2_reg = as_Register($src2$$reg);
3193     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3194   %}
3195 
3196   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3197     MacroAssembler _masm(&cbuf);
3198    Register dst_reg = as_Register($dst$$reg);
3199    Register src1_reg = as_Register($src1$$reg);
3200    Register src2_reg = as_Register($src2$$reg);
3201     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3202   %}
3203 
3204   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3205     MacroAssembler _masm(&cbuf);
3206    Register dst_reg = as_Register($dst$$reg);
3207    Register src1_reg = as_Register($src1$$reg);
3208    Register src2_reg = as_Register($src2$$reg);
3209     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3210   %}
3211 
3212   // compare instruction encodings
3213 
3214   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3215     MacroAssembler _masm(&cbuf);
3216     Register reg1 = as_Register($src1$$reg);
3217     Register reg2 = as_Register($src2$$reg);
3218     __ cmpw(reg1, reg2);
3219   %}
3220 
3221   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3222     MacroAssembler _masm(&cbuf);
3223     Register reg = as_Register($src1$$reg);
3224     int32_t val = $src2$$constant;
3225     if (val >= 0) {
3226       __ subsw(zr, reg, val);
3227     } else {
3228       __ addsw(zr, reg, -val);
3229     }
3230   %}
3231 
3232   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3233     MacroAssembler _masm(&cbuf);
3234     Register reg1 = as_Register($src1$$reg);
3235     u_int32_t val = (u_int32_t)$src2$$constant;
3236     __ movw(rscratch1, val);
3237     __ cmpw(reg1, rscratch1);
3238   %}
3239 
3240   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3241     MacroAssembler _masm(&cbuf);
3242     Register reg1 = as_Register($src1$$reg);
3243     Register reg2 = as_Register($src2$$reg);
3244     __ cmp(reg1, reg2);
3245   %}
3246 
3247   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3248     MacroAssembler _masm(&cbuf);
3249     Register reg = as_Register($src1$$reg);
3250     int64_t val = $src2$$constant;
3251     if (val >= 0) {
3252       __ subs(zr, reg, val);
3253     } else if (val != -val) {
3254       __ adds(zr, reg, -val);
3255     } else {
3256     // aargh, Long.MIN_VALUE is a special case
3257       __ orr(rscratch1, zr, (u_int64_t)val);
3258       __ subs(zr, reg, rscratch1);
3259     }
3260   %}
3261 
3262   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3263     MacroAssembler _masm(&cbuf);
3264     Register reg1 = as_Register($src1$$reg);
3265     u_int64_t val = (u_int64_t)$src2$$constant;
3266     __ mov(rscratch1, val);
3267     __ cmp(reg1, rscratch1);
3268   %}
3269 
3270   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3271     MacroAssembler _masm(&cbuf);
3272     Register reg1 = as_Register($src1$$reg);
3273     Register reg2 = as_Register($src2$$reg);
3274     __ cmp(reg1, reg2);
3275   %}
3276 
3277   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3278     MacroAssembler _masm(&cbuf);
3279     Register reg1 = as_Register($src1$$reg);
3280     Register reg2 = as_Register($src2$$reg);
3281     __ cmpw(reg1, reg2);
3282   %}
3283 
3284   enc_class aarch64_enc_testp(iRegP src) %{
3285     MacroAssembler _masm(&cbuf);
3286     Register reg = as_Register($src$$reg);
3287     __ cmp(reg, zr);
3288   %}
3289 
3290   enc_class aarch64_enc_testn(iRegN src) %{
3291     MacroAssembler _masm(&cbuf);
3292     Register reg = as_Register($src$$reg);
3293     __ cmpw(reg, zr);
3294   %}
3295 
3296   enc_class aarch64_enc_b(label lbl) %{
3297     MacroAssembler _masm(&cbuf);
3298     Label *L = $lbl$$label;
3299     __ b(*L);
3300   %}
3301 
3302   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3303     MacroAssembler _masm(&cbuf);
3304     Label *L = $lbl$$label;
3305     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3306   %}
3307 
3308   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3309     MacroAssembler _masm(&cbuf);
3310     Label *L = $lbl$$label;
3311     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3312   %}
3313 
3314   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3315   %{
3316      Register sub_reg = as_Register($sub$$reg);
3317      Register super_reg = as_Register($super$$reg);
3318      Register temp_reg = as_Register($temp$$reg);
3319      Register result_reg = as_Register($result$$reg);
3320 
3321      Label miss;
3322      MacroAssembler _masm(&cbuf);
3323      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3324                                      NULL, &miss,
3325                                      /*set_cond_codes:*/ true);
3326      if ($primary) {
3327        __ mov(result_reg, zr);
3328      }
3329      __ bind(miss);
3330   %}
3331 
3332   enc_class aarch64_enc_java_static_call(method meth) %{
3333     MacroAssembler _masm(&cbuf);
3334 
3335     address addr = (address)$meth$$method;
3336     address call;
3337     if (!_method) {
3338       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3339       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3340     } else {
3341       int method_index = resolved_method_index(cbuf);
3342       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3343                                                   : static_call_Relocation::spec(method_index);
3344       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3345 
3346       // Emit stub for static call
3347       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3348       if (stub == NULL) {
3349         ciEnv::current()->record_failure("CodeCache is full");
3350         return;
3351       }
3352     }
3353     if (call == NULL) {
3354       ciEnv::current()->record_failure("CodeCache is full");
3355       return;
3356     }
3357   %}
3358 
3359   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3360     MacroAssembler _masm(&cbuf);
3361     int method_index = resolved_method_index(cbuf);
3362     address call = __ ic_call((address)$meth$$method, method_index);
3363     if (call == NULL) {
3364       ciEnv::current()->record_failure("CodeCache is full");
3365       return;
3366     }
3367   %}
3368 
3369   enc_class aarch64_enc_call_epilog() %{
3370     MacroAssembler _masm(&cbuf);
3371     if (VerifyStackAtCalls) {
3372       // Check that stack depth is unchanged: find majik cookie on stack
3373       __ call_Unimplemented();
3374     }
3375   %}
3376 
3377   enc_class aarch64_enc_java_to_runtime(method meth) %{
3378     MacroAssembler _masm(&cbuf);
3379 
3380     // some calls to generated routines (arraycopy code) are scheduled
3381     // by C2 as runtime calls. if so we can call them using a br (they
3382     // will be in a reachable segment) otherwise we have to use a blrt
3383     // which loads the absolute address into a register.
3384     address entry = (address)$meth$$method;
3385     CodeBlob *cb = CodeCache::find_blob(entry);
3386     if (cb) {
3387       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3388       if (call == NULL) {
3389         ciEnv::current()->record_failure("CodeCache is full");
3390         return;
3391       }
3392     } else {
3393       int gpcnt;
3394       int fpcnt;
3395       int rtype;
3396       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3397       Label retaddr;
3398       __ adr(rscratch2, retaddr);
3399       __ lea(rscratch1, RuntimeAddress(entry));
3400       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3401       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3402       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3403       __ bind(retaddr);
3404       __ add(sp, sp, 2 * wordSize);
3405     }
3406   %}
3407 
3408   enc_class aarch64_enc_rethrow() %{
3409     MacroAssembler _masm(&cbuf);
3410     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3411   %}
3412 
3413   enc_class aarch64_enc_ret() %{
3414     MacroAssembler _masm(&cbuf);
3415     __ ret(lr);
3416   %}
3417 
3418   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3419     MacroAssembler _masm(&cbuf);
3420     Register target_reg = as_Register($jump_target$$reg);
3421     __ br(target_reg);
3422   %}
3423 
3424   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3425     MacroAssembler _masm(&cbuf);
3426     Register target_reg = as_Register($jump_target$$reg);
3427     // exception oop should be in r0
3428     // ret addr has been popped into lr
3429     // callee expects it in r3
3430     __ mov(r3, lr);
3431     __ br(target_reg);
3432   %}
3433 
3434   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3435     MacroAssembler _masm(&cbuf);
3436     Register oop = as_Register($object$$reg);
3437     Register box = as_Register($box$$reg);
3438     Register disp_hdr = as_Register($tmp$$reg);
3439     Register tmp = as_Register($tmp2$$reg);
3440     Label cont;
3441     Label object_has_monitor;
3442     Label cas_failed;
3443 
3444     assert_different_registers(oop, box, tmp, disp_hdr);
3445 
3446     // Load markOop from object into displaced_header.
3447     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3448 
3449     if (UseBiasedLocking && !UseOptoBiasInlining) {
3450       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3451     }
3452 
3453     // Handle existing monitor
3454     // we can use AArch64's bit test and branch here but
3455     // markoopDesc does not define a bit index just the bit value
3456     // so assert in case the bit pos changes
3457 #   define __monitor_value_log2 1
3458     assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
3459     __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
3460 #   undef __monitor_value_log2
3461 
3462     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
3463     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
3464 
3465     // Load Compare Value application register.
3466 
3467     // Initialize the box. (Must happen before we update the object mark!)
3468     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3469 
3470     // Compare object markOop with mark and if equal exchange scratch1
3471     // with object markOop.
3472     if (UseLSE) {
3473       __ mov(tmp, disp_hdr);
3474       __ casal(Assembler::xword, tmp, box, oop);
3475       __ cmp(tmp, disp_hdr);
3476       __ br(Assembler::EQ, cont);
3477     } else {
3478       Label retry_load;
3479       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3480         __ prfm(Address(oop), PSTL1STRM);
3481       __ bind(retry_load);
3482       __ ldaxr(tmp, oop);
3483       __ cmp(tmp, disp_hdr);
3484       __ br(Assembler::NE, cas_failed);
3485       // use stlxr to ensure update is immediately visible
3486       __ stlxr(tmp, box, oop);
3487       __ cbzw(tmp, cont);
3488       __ b(retry_load);
3489     }
3490 
3491     // Formerly:
3492     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3493     //               /*newv=*/box,
3494     //               /*addr=*/oop,
3495     //               /*tmp=*/tmp,
3496     //               cont,
3497     //               /*fail*/NULL);
3498 
3499     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3500 
3501     // If the compare-and-exchange succeeded, then we found an unlocked
3502     // object, will have now locked it will continue at label cont
3503 
3504     __ bind(cas_failed);
3505     // We did not see an unlocked object so try the fast recursive case.
3506 
3507     // Check if the owner is self by comparing the value in the
3508     // markOop of object (disp_hdr) with the stack pointer.
3509     __ mov(rscratch1, sp);
3510     __ sub(disp_hdr, disp_hdr, rscratch1);
3511     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3512     // If condition is true we are cont and hence we can store 0 as the
3513     // displaced header in the box, which indicates that it is a recursive lock.
3514     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3515     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3516 
3517     // Handle existing monitor.
3518     __ b(cont);
3519 
3520     __ bind(object_has_monitor);
3521     // The object's monitor m is unlocked iff m->owner == NULL,
3522     // otherwise m->owner may contain a thread or a stack address.
3523     //
3524     // Try to CAS m->owner from NULL to current thread.
3525     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3526     __ mov(disp_hdr, zr);
3527 
3528     if (UseLSE) {
3529       __ mov(rscratch1, disp_hdr);
3530       __ casal(Assembler::xword, rscratch1, rthread, tmp);
3531       __ cmp(rscratch1, disp_hdr);
3532     } else {
3533       Label retry_load, fail;
3534       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) {
3535         __ prfm(Address(tmp), PSTL1STRM);
3536       }
3537       __ bind(retry_load);
3538       __ ldaxr(rscratch1, tmp);
3539       __ cmp(disp_hdr, rscratch1);
3540       __ br(Assembler::NE, fail);
3541       // use stlxr to ensure update is immediately visible
3542       __ stlxr(rscratch1, rthread, tmp);
3543       __ cbnzw(rscratch1, retry_load);
3544       __ bind(fail);
3545     }
3546 
3547     // Label next;
3548     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3549     //               /*newv=*/rthread,
3550     //               /*addr=*/tmp,
3551     //               /*tmp=*/rscratch1,
3552     //               /*succeed*/next,
3553     //               /*fail*/NULL);
3554     // __ bind(next);
3555 
3556     // store a non-null value into the box.
3557     __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3558 
3559     // PPC port checks the following invariants
3560     // #ifdef ASSERT
3561     // bne(flag, cont);
3562     // We have acquired the monitor, check some invariants.
3563     // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
3564     // Invariant 1: _recursions should be 0.
3565     // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
3566     // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
3567     //                        "monitor->_recursions should be 0", -1);
3568     // Invariant 2: OwnerIsThread shouldn't be 0.
3569     // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
3570     //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
3571     //                           "monitor->OwnerIsThread shouldn't be 0", -1);
3572     // #endif
3573 
3574     __ bind(cont);
3575     // flag == EQ indicates success
3576     // flag == NE indicates failure
3577 
3578   %}
3579 
3580   // TODO
3581   // reimplement this with custom cmpxchgptr code
3582   // which avoids some of the unnecessary branching
3583   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3584     MacroAssembler _masm(&cbuf);
3585     Register oop = as_Register($object$$reg);
3586     Register box = as_Register($box$$reg);
3587     Register disp_hdr = as_Register($tmp$$reg);
3588     Register tmp = as_Register($tmp2$$reg);
3589     Label cont;
3590     Label object_has_monitor;
3591     Label cas_failed;
3592 
3593     assert_different_registers(oop, box, tmp, disp_hdr);
3594 
3595     if (UseBiasedLocking && !UseOptoBiasInlining) {
3596       __ biased_locking_exit(oop, tmp, cont);
3597     }
3598 
3599     // Find the lock address and load the displaced header from the stack.
3600     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3601 
3602     // If the displaced header is 0, we have a recursive unlock.
3603     __ cmp(disp_hdr, zr);
3604     __ br(Assembler::EQ, cont);
3605 
3606 
3607     // Handle existing monitor.
3608     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3609     __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3610 
3611     // Check if it is still a light weight lock, this is is true if we
3612     // see the stack address of the basicLock in the markOop of the
3613     // object.
3614 
3615       if (UseLSE) {
3616         __ mov(tmp, box);
3617         __ casl(Assembler::xword, tmp, disp_hdr, oop);
3618         __ cmp(tmp, box);
3619       } else {
3620         Label retry_load;
3621         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3622           __ prfm(Address(oop), PSTL1STRM);
3623         __ bind(retry_load);
3624         __ ldxr(tmp, oop);
3625         __ cmp(box, tmp);
3626         __ br(Assembler::NE, cas_failed);
3627         // use stlxr to ensure update is immediately visible
3628         __ stlxr(tmp, disp_hdr, oop);
3629         __ cbzw(tmp, cont);
3630         __ b(retry_load);
3631       }
3632 
3633     // __ cmpxchgptr(/*compare_value=*/box,
3634     //               /*exchange_value=*/disp_hdr,
3635     //               /*where=*/oop,
3636     //               /*result=*/tmp,
3637     //               cont,
3638     //               /*cas_failed*/NULL);
3639     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3640 
3641     __ bind(cas_failed);
3642 
3643     // Handle existing monitor.
3644     __ b(cont);
3645 
3646     __ bind(object_has_monitor);
3647     __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3648     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3649     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3650     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3651     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3652     __ cmp(rscratch1, zr);
3653     __ br(Assembler::NE, cont);
3654 
3655     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3656     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3657     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3658     __ cmp(rscratch1, zr);
3659     __ cbnz(rscratch1, cont);
3660     // need a release store here
3661     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3662     __ stlr(rscratch1, tmp); // rscratch1 is zero
3663 
3664     __ bind(cont);
3665     // flag == EQ indicates success
3666     // flag == NE indicates failure
3667   %}
3668 
3669 %}
3670 
3671 //----------FRAME--------------------------------------------------------------
3672 // Definition of frame structure and management information.
3673 //
3674 //  S T A C K   L A Y O U T    Allocators stack-slot number
3675 //                             |   (to get allocators register number
3676 //  G  Owned by    |        |  v    add OptoReg::stack0())
3677 //  r   CALLER     |        |
3678 //  o     |        +--------+      pad to even-align allocators stack-slot
3679 //  w     V        |  pad0  |        numbers; owned by CALLER
3680 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3681 //  h     ^        |   in   |  5
3682 //        |        |  args  |  4   Holes in incoming args owned by SELF
3683 //  |     |        |        |  3
3684 //  |     |        +--------+
3685 //  V     |        | old out|      Empty on Intel, window on Sparc
3686 //        |    old |preserve|      Must be even aligned.
3687 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3688 //        |        |   in   |  3   area for Intel ret address
3689 //     Owned by    |preserve|      Empty on Sparc.
3690 //       SELF      +--------+
3691 //        |        |  pad2  |  2   pad to align old SP
3692 //        |        +--------+  1
3693 //        |        | locks  |  0
3694 //        |        +--------+----> OptoReg::stack0(), even aligned
3695 //        |        |  pad1  | 11   pad to align new SP
3696 //        |        +--------+
3697 //        |        |        | 10
3698 //        |        | spills |  9   spills
3699 //        V        |        |  8   (pad0 slot for callee)
3700 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3701 //        ^        |  out   |  7
3702 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3703 //     Owned by    +--------+
3704 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3705 //        |    new |preserve|      Must be even-aligned.
3706 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3707 //        |        |        |
3708 //
3709 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3710 //         known from SELF's arguments and the Java calling convention.
3711 //         Region 6-7 is determined per call site.
3712 // Note 2: If the calling convention leaves holes in the incoming argument
3713 //         area, those holes are owned by SELF.  Holes in the outgoing area
3714 //         are owned by the CALLEE.  Holes should not be nessecary in the
3715 //         incoming area, as the Java calling convention is completely under
3716 //         the control of the AD file.  Doubles can be sorted and packed to
3717 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3718 //         varargs C calling conventions.
3719 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3720 //         even aligned with pad0 as needed.
3721 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3722 //           (the latter is true on Intel but is it false on AArch64?)
3723 //         region 6-11 is even aligned; it may be padded out more so that
3724 //         the region from SP to FP meets the minimum stack alignment.
3725 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3726 //         alignment.  Region 11, pad1, may be dynamically extended so that
3727 //         SP meets the minimum alignment.
3728 
3729 frame %{
3730   // What direction does stack grow in (assumed to be same for C & Java)
3731   stack_direction(TOWARDS_LOW);
3732 
3733   // These three registers define part of the calling convention
3734   // between compiled code and the interpreter.
3735 
3736   // Inline Cache Register or methodOop for I2C.
3737   inline_cache_reg(R12);
3738 
3739   // Method Oop Register when calling interpreter.
3740   interpreter_method_oop_reg(R12);
3741 
3742   // Number of stack slots consumed by locking an object
3743   sync_stack_slots(2);
3744 
3745   // Compiled code's Frame Pointer
3746   frame_pointer(R31);
3747 
3748   // Interpreter stores its frame pointer in a register which is
3749   // stored to the stack by I2CAdaptors.
3750   // I2CAdaptors convert from interpreted java to compiled java.
3751   interpreter_frame_pointer(R29);
3752 
3753   // Stack alignment requirement
3754   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3755 
3756   // Number of stack slots between incoming argument block and the start of
3757   // a new frame.  The PROLOG must add this many slots to the stack.  The
3758   // EPILOG must remove this many slots. aarch64 needs two slots for
3759   // return address and fp.
3760   // TODO think this is correct but check
3761   in_preserve_stack_slots(4);
3762 
3763   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3764   // for calls to C.  Supports the var-args backing area for register parms.
3765   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3766 
3767   // The after-PROLOG location of the return address.  Location of
3768   // return address specifies a type (REG or STACK) and a number
3769   // representing the register number (i.e. - use a register name) or
3770   // stack slot.
3771   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3772   // Otherwise, it is above the locks and verification slot and alignment word
3773   // TODO this may well be correct but need to check why that - 2 is there
3774   // ppc port uses 0 but we definitely need to allow for fixed_slots
3775   // which folds in the space used for monitors
3776   return_addr(STACK - 2 +
3777               align_up((Compile::current()->in_preserve_stack_slots() +
3778                         Compile::current()->fixed_slots()),
3779                        stack_alignment_in_slots()));
3780 
3781   // Body of function which returns an integer array locating
3782   // arguments either in registers or in stack slots.  Passed an array
3783   // of ideal registers called "sig" and a "length" count.  Stack-slot
3784   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3785   // arguments for a CALLEE.  Incoming stack arguments are
3786   // automatically biased by the preserve_stack_slots field above.
3787 
3788   calling_convention
3789   %{
3790     // No difference between ingoing/outgoing just pass false
3791     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3792   %}
3793 
3794   c_calling_convention
3795   %{
3796     // This is obviously always outgoing
3797     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3798   %}
3799 
3800   // Location of compiled Java return values.  Same as C for now.
3801   return_value
3802   %{
3803     // TODO do we allow ideal_reg == Op_RegN???
3804     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3805            "only return normal values");
3806 
3807     static const int lo[Op_RegL + 1] = { // enum name
3808       0,                                 // Op_Node
3809       0,                                 // Op_Set
3810       R0_num,                            // Op_RegN
3811       R0_num,                            // Op_RegI
3812       R0_num,                            // Op_RegP
3813       V0_num,                            // Op_RegF
3814       V0_num,                            // Op_RegD
3815       R0_num                             // Op_RegL
3816     };
3817 
3818     static const int hi[Op_RegL + 1] = { // enum name
3819       0,                                 // Op_Node
3820       0,                                 // Op_Set
3821       OptoReg::Bad,                       // Op_RegN
3822       OptoReg::Bad,                      // Op_RegI
3823       R0_H_num,                          // Op_RegP
3824       OptoReg::Bad,                      // Op_RegF
3825       V0_H_num,                          // Op_RegD
3826       R0_H_num                           // Op_RegL
3827     };
3828 
3829     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3830   %}
3831 %}
3832 
3833 //----------ATTRIBUTES---------------------------------------------------------
3834 //----------Operand Attributes-------------------------------------------------
3835 op_attrib op_cost(1);        // Required cost attribute
3836 
3837 //----------Instruction Attributes---------------------------------------------
3838 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3839 ins_attrib ins_size(32);        // Required size attribute (in bits)
3840 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3841                                 // a non-matching short branch variant
3842                                 // of some long branch?
3843 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3844                                 // be a power of 2) specifies the
3845                                 // alignment that some part of the
3846                                 // instruction (not necessarily the
3847                                 // start) requires.  If > 1, a
3848                                 // compute_padding() function must be
3849                                 // provided for the instruction
3850 
3851 //----------OPERANDS-----------------------------------------------------------
3852 // Operand definitions must precede instruction definitions for correct parsing
3853 // in the ADLC because operands constitute user defined types which are used in
3854 // instruction definitions.
3855 
3856 //----------Simple Operands----------------------------------------------------
3857 
3858 // Integer operands 32 bit
3859 // 32 bit immediate
3860 operand immI()
3861 %{
3862   match(ConI);
3863 
3864   op_cost(0);
3865   format %{ %}
3866   interface(CONST_INTER);
3867 %}
3868 
3869 // 32 bit zero
3870 operand immI0()
3871 %{
3872   predicate(n->get_int() == 0);
3873   match(ConI);
3874 
3875   op_cost(0);
3876   format %{ %}
3877   interface(CONST_INTER);
3878 %}
3879 
3880 // 32 bit unit increment
3881 operand immI_1()
3882 %{
3883   predicate(n->get_int() == 1);
3884   match(ConI);
3885 
3886   op_cost(0);
3887   format %{ %}
3888   interface(CONST_INTER);
3889 %}
3890 
3891 // 32 bit unit decrement
3892 operand immI_M1()
3893 %{
3894   predicate(n->get_int() == -1);
3895   match(ConI);
3896 
3897   op_cost(0);
3898   format %{ %}
3899   interface(CONST_INTER);
3900 %}
3901 
3902 // Shift values for add/sub extension shift
3903 operand immIExt()
3904 %{
3905   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3906   match(ConI);
3907 
3908   op_cost(0);
3909   format %{ %}
3910   interface(CONST_INTER);
3911 %}
3912 
3913 operand immI_le_4()
3914 %{
3915   predicate(n->get_int() <= 4);
3916   match(ConI);
3917 
3918   op_cost(0);
3919   format %{ %}
3920   interface(CONST_INTER);
3921 %}
3922 
3923 operand immI_31()
3924 %{
3925   predicate(n->get_int() == 31);
3926   match(ConI);
3927 
3928   op_cost(0);
3929   format %{ %}
3930   interface(CONST_INTER);
3931 %}
3932 
3933 operand immI_8()
3934 %{
3935   predicate(n->get_int() == 8);
3936   match(ConI);
3937 
3938   op_cost(0);
3939   format %{ %}
3940   interface(CONST_INTER);
3941 %}
3942 
3943 operand immI_16()
3944 %{
3945   predicate(n->get_int() == 16);
3946   match(ConI);
3947 
3948   op_cost(0);
3949   format %{ %}
3950   interface(CONST_INTER);
3951 %}
3952 
3953 operand immI_24()
3954 %{
3955   predicate(n->get_int() == 24);
3956   match(ConI);
3957 
3958   op_cost(0);
3959   format %{ %}
3960   interface(CONST_INTER);
3961 %}
3962 
3963 operand immI_32()
3964 %{
3965   predicate(n->get_int() == 32);
3966   match(ConI);
3967 
3968   op_cost(0);
3969   format %{ %}
3970   interface(CONST_INTER);
3971 %}
3972 
3973 operand immI_48()
3974 %{
3975   predicate(n->get_int() == 48);
3976   match(ConI);
3977 
3978   op_cost(0);
3979   format %{ %}
3980   interface(CONST_INTER);
3981 %}
3982 
3983 operand immI_56()
3984 %{
3985   predicate(n->get_int() == 56);
3986   match(ConI);
3987 
3988   op_cost(0);
3989   format %{ %}
3990   interface(CONST_INTER);
3991 %}
3992 
3993 operand immI_63()
3994 %{
3995   predicate(n->get_int() == 63);
3996   match(ConI);
3997 
3998   op_cost(0);
3999   format %{ %}
4000   interface(CONST_INTER);
4001 %}
4002 
4003 operand immI_64()
4004 %{
4005   predicate(n->get_int() == 64);
4006   match(ConI);
4007 
4008   op_cost(0);
4009   format %{ %}
4010   interface(CONST_INTER);
4011 %}
4012 
4013 operand immI_255()
4014 %{
4015   predicate(n->get_int() == 255);
4016   match(ConI);
4017 
4018   op_cost(0);
4019   format %{ %}
4020   interface(CONST_INTER);
4021 %}
4022 
4023 operand immI_65535()
4024 %{
4025   predicate(n->get_int() == 65535);
4026   match(ConI);
4027 
4028   op_cost(0);
4029   format %{ %}
4030   interface(CONST_INTER);
4031 %}
4032 
4033 operand immL_255()
4034 %{
4035   predicate(n->get_long() == 255L);
4036   match(ConL);
4037 
4038   op_cost(0);
4039   format %{ %}
4040   interface(CONST_INTER);
4041 %}
4042 
4043 operand immL_65535()
4044 %{
4045   predicate(n->get_long() == 65535L);
4046   match(ConL);
4047 
4048   op_cost(0);
4049   format %{ %}
4050   interface(CONST_INTER);
4051 %}
4052 
4053 operand immL_4294967295()
4054 %{
4055   predicate(n->get_long() == 4294967295L);
4056   match(ConL);
4057 
4058   op_cost(0);
4059   format %{ %}
4060   interface(CONST_INTER);
4061 %}
4062 
4063 operand immL_bitmask()
4064 %{
4065   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4066             && is_power_of_2(n->get_long() + 1));
4067   match(ConL);
4068 
4069   op_cost(0);
4070   format %{ %}
4071   interface(CONST_INTER);
4072 %}
4073 
4074 operand immI_bitmask()
4075 %{
4076   predicate(((n->get_int() & 0xc0000000) == 0)
4077             && is_power_of_2(n->get_int() + 1));
4078   match(ConI);
4079 
4080   op_cost(0);
4081   format %{ %}
4082   interface(CONST_INTER);
4083 %}
4084 
4085 // Scale values for scaled offset addressing modes (up to long but not quad)
4086 operand immIScale()
4087 %{
4088   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4089   match(ConI);
4090 
4091   op_cost(0);
4092   format %{ %}
4093   interface(CONST_INTER);
4094 %}
4095 
4096 // 26 bit signed offset -- for pc-relative branches
4097 operand immI26()
4098 %{
4099   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4100   match(ConI);
4101 
4102   op_cost(0);
4103   format %{ %}
4104   interface(CONST_INTER);
4105 %}
4106 
4107 // 19 bit signed offset -- for pc-relative loads
4108 operand immI19()
4109 %{
4110   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4111   match(ConI);
4112 
4113   op_cost(0);
4114   format %{ %}
4115   interface(CONST_INTER);
4116 %}
4117 
4118 // 12 bit unsigned offset -- for base plus immediate loads
4119 operand immIU12()
4120 %{
4121   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4122   match(ConI);
4123 
4124   op_cost(0);
4125   format %{ %}
4126   interface(CONST_INTER);
4127 %}
4128 
4129 operand immLU12()
4130 %{
4131   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4132   match(ConL);
4133 
4134   op_cost(0);
4135   format %{ %}
4136   interface(CONST_INTER);
4137 %}
4138 
4139 // Offset for scaled or unscaled immediate loads and stores
4140 operand immIOffset()
4141 %{
4142   predicate(Address::offset_ok_for_immed(n->get_int()));
4143   match(ConI);
4144 
4145   op_cost(0);
4146   format %{ %}
4147   interface(CONST_INTER);
4148 %}
4149 
4150 operand immIOffset4()
4151 %{
4152   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4153   match(ConI);
4154 
4155   op_cost(0);
4156   format %{ %}
4157   interface(CONST_INTER);
4158 %}
4159 
4160 operand immIOffset8()
4161 %{
4162   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4163   match(ConI);
4164 
4165   op_cost(0);
4166   format %{ %}
4167   interface(CONST_INTER);
4168 %}
4169 
4170 operand immIOffset16()
4171 %{
4172   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4173   match(ConI);
4174 
4175   op_cost(0);
4176   format %{ %}
4177   interface(CONST_INTER);
4178 %}
4179 
4180 operand immLoffset()
4181 %{
4182   predicate(Address::offset_ok_for_immed(n->get_long()));
4183   match(ConL);
4184 
4185   op_cost(0);
4186   format %{ %}
4187   interface(CONST_INTER);
4188 %}
4189 
4190 operand immLoffset4()
4191 %{
4192   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4193   match(ConL);
4194 
4195   op_cost(0);
4196   format %{ %}
4197   interface(CONST_INTER);
4198 %}
4199 
4200 operand immLoffset8()
4201 %{
4202   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4203   match(ConL);
4204 
4205   op_cost(0);
4206   format %{ %}
4207   interface(CONST_INTER);
4208 %}
4209 
4210 operand immLoffset16()
4211 %{
4212   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4213   match(ConL);
4214 
4215   op_cost(0);
4216   format %{ %}
4217   interface(CONST_INTER);
4218 %}
4219 
4220 // 32 bit integer valid for add sub immediate
4221 operand immIAddSub()
4222 %{
4223   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4224   match(ConI);
4225   op_cost(0);
4226   format %{ %}
4227   interface(CONST_INTER);
4228 %}
4229 
4230 // 32 bit unsigned integer valid for logical immediate
4231 // TODO -- check this is right when e.g the mask is 0x80000000
4232 operand immILog()
4233 %{
4234   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4235   match(ConI);
4236 
4237   op_cost(0);
4238   format %{ %}
4239   interface(CONST_INTER);
4240 %}
4241 
4242 // Integer operands 64 bit
4243 // 64 bit immediate
4244 operand immL()
4245 %{
4246   match(ConL);
4247 
4248   op_cost(0);
4249   format %{ %}
4250   interface(CONST_INTER);
4251 %}
4252 
4253 // 64 bit zero
4254 operand immL0()
4255 %{
4256   predicate(n->get_long() == 0);
4257   match(ConL);
4258 
4259   op_cost(0);
4260   format %{ %}
4261   interface(CONST_INTER);
4262 %}
4263 
4264 // 64 bit unit increment
4265 operand immL_1()
4266 %{
4267   predicate(n->get_long() == 1);
4268   match(ConL);
4269 
4270   op_cost(0);
4271   format %{ %}
4272   interface(CONST_INTER);
4273 %}
4274 
4275 // 64 bit unit decrement
4276 operand immL_M1()
4277 %{
4278   predicate(n->get_long() == -1);
4279   match(ConL);
4280 
4281   op_cost(0);
4282   format %{ %}
4283   interface(CONST_INTER);
4284 %}
4285 
4286 // 32 bit offset of pc in thread anchor
4287 
4288 operand immL_pc_off()
4289 %{
4290   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4291                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4292   match(ConL);
4293 
4294   op_cost(0);
4295   format %{ %}
4296   interface(CONST_INTER);
4297 %}
4298 
4299 // 64 bit integer valid for add sub immediate
4300 operand immLAddSub()
4301 %{
4302   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4303   match(ConL);
4304   op_cost(0);
4305   format %{ %}
4306   interface(CONST_INTER);
4307 %}
4308 
4309 // 64 bit integer valid for logical immediate
4310 operand immLLog()
4311 %{
4312   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4313   match(ConL);
4314   op_cost(0);
4315   format %{ %}
4316   interface(CONST_INTER);
4317 %}
4318 
4319 // Long Immediate: low 32-bit mask
4320 operand immL_32bits()
4321 %{
4322   predicate(n->get_long() == 0xFFFFFFFFL);
4323   match(ConL);
4324   op_cost(0);
4325   format %{ %}
4326   interface(CONST_INTER);
4327 %}
4328 
4329 // Pointer operands
4330 // Pointer Immediate
4331 operand immP()
4332 %{
4333   match(ConP);
4334 
4335   op_cost(0);
4336   format %{ %}
4337   interface(CONST_INTER);
4338 %}
4339 
4340 // NULL Pointer Immediate
4341 operand immP0()
4342 %{
4343   predicate(n->get_ptr() == 0);
4344   match(ConP);
4345 
4346   op_cost(0);
4347   format %{ %}
4348   interface(CONST_INTER);
4349 %}
4350 
4351 // Pointer Immediate One
4352 // this is used in object initialization (initial object header)
4353 operand immP_1()
4354 %{
4355   predicate(n->get_ptr() == 1);
4356   match(ConP);
4357 
4358   op_cost(0);
4359   format %{ %}
4360   interface(CONST_INTER);
4361 %}
4362 
4363 // Polling Page Pointer Immediate
4364 operand immPollPage()
4365 %{
4366   predicate((address)n->get_ptr() == os::get_polling_page());
4367   match(ConP);
4368 
4369   op_cost(0);
4370   format %{ %}
4371   interface(CONST_INTER);
4372 %}
4373 
4374 // Card Table Byte Map Base
4375 operand immByteMapBase()
4376 %{
4377   // Get base of card map
4378   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4379             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4380   match(ConP);
4381 
4382   op_cost(0);
4383   format %{ %}
4384   interface(CONST_INTER);
4385 %}
4386 
4387 // Pointer Immediate Minus One
4388 // this is used when we want to write the current PC to the thread anchor
4389 operand immP_M1()
4390 %{
4391   predicate(n->get_ptr() == -1);
4392   match(ConP);
4393 
4394   op_cost(0);
4395   format %{ %}
4396   interface(CONST_INTER);
4397 %}
4398 
4399 // Pointer Immediate Minus Two
4400 // this is used when we want to write the current PC to the thread anchor
4401 operand immP_M2()
4402 %{
4403   predicate(n->get_ptr() == -2);
4404   match(ConP);
4405 
4406   op_cost(0);
4407   format %{ %}
4408   interface(CONST_INTER);
4409 %}
4410 
4411 // Float and Double operands
4412 // Double Immediate
4413 operand immD()
4414 %{
4415   match(ConD);
4416   op_cost(0);
4417   format %{ %}
4418   interface(CONST_INTER);
4419 %}
4420 
4421 // Double Immediate: +0.0d
4422 operand immD0()
4423 %{
4424   predicate(jlong_cast(n->getd()) == 0);
4425   match(ConD);
4426 
4427   op_cost(0);
4428   format %{ %}
4429   interface(CONST_INTER);
4430 %}
4431 
4432 // constant 'double +0.0'.
4433 operand immDPacked()
4434 %{
4435   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4436   match(ConD);
4437   op_cost(0);
4438   format %{ %}
4439   interface(CONST_INTER);
4440 %}
4441 
4442 // Float Immediate
4443 operand immF()
4444 %{
4445   match(ConF);
4446   op_cost(0);
4447   format %{ %}
4448   interface(CONST_INTER);
4449 %}
4450 
4451 // Float Immediate: +0.0f.
4452 operand immF0()
4453 %{
4454   predicate(jint_cast(n->getf()) == 0);
4455   match(ConF);
4456 
4457   op_cost(0);
4458   format %{ %}
4459   interface(CONST_INTER);
4460 %}
4461 
4462 //
4463 operand immFPacked()
4464 %{
4465   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4466   match(ConF);
4467   op_cost(0);
4468   format %{ %}
4469   interface(CONST_INTER);
4470 %}
4471 
4472 // Narrow pointer operands
4473 // Narrow Pointer Immediate
4474 operand immN()
4475 %{
4476   match(ConN);
4477 
4478   op_cost(0);
4479   format %{ %}
4480   interface(CONST_INTER);
4481 %}
4482 
4483 // Narrow NULL Pointer Immediate
4484 operand immN0()
4485 %{
4486   predicate(n->get_narrowcon() == 0);
4487   match(ConN);
4488 
4489   op_cost(0);
4490   format %{ %}
4491   interface(CONST_INTER);
4492 %}
4493 
4494 operand immNKlass()
4495 %{
4496   match(ConNKlass);
4497 
4498   op_cost(0);
4499   format %{ %}
4500   interface(CONST_INTER);
4501 %}
4502 
4503 // Integer 32 bit Register Operands
4504 // Integer 32 bitRegister (excludes SP)
4505 operand iRegI()
4506 %{
4507   constraint(ALLOC_IN_RC(any_reg32));
4508   match(RegI);
4509   match(iRegINoSp);
4510   op_cost(0);
4511   format %{ %}
4512   interface(REG_INTER);
4513 %}
4514 
4515 // Integer 32 bit Register not Special
4516 operand iRegINoSp()
4517 %{
4518   constraint(ALLOC_IN_RC(no_special_reg32));
4519   match(RegI);
4520   op_cost(0);
4521   format %{ %}
4522   interface(REG_INTER);
4523 %}
4524 
4525 // Integer 64 bit Register Operands
4526 // Integer 64 bit Register (includes SP)
4527 operand iRegL()
4528 %{
4529   constraint(ALLOC_IN_RC(any_reg));
4530   match(RegL);
4531   match(iRegLNoSp);
4532   op_cost(0);
4533   format %{ %}
4534   interface(REG_INTER);
4535 %}
4536 
4537 // Integer 64 bit Register not Special
4538 operand iRegLNoSp()
4539 %{
4540   constraint(ALLOC_IN_RC(no_special_reg));
4541   match(RegL);
4542   match(iRegL_R0);
4543   format %{ %}
4544   interface(REG_INTER);
4545 %}
4546 
4547 // Pointer Register Operands
4548 // Pointer Register
4549 operand iRegP()
4550 %{
4551   constraint(ALLOC_IN_RC(ptr_reg));
4552   match(RegP);
4553   match(iRegPNoSp);
4554   match(iRegP_R0);
4555   //match(iRegP_R2);
4556   //match(iRegP_R4);
4557   //match(iRegP_R5);
4558   match(thread_RegP);
4559   op_cost(0);
4560   format %{ %}
4561   interface(REG_INTER);
4562 %}
4563 
4564 // Pointer 64 bit Register not Special
4565 operand iRegPNoSp()
4566 %{
4567   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4568   match(RegP);
4569   // match(iRegP);
4570   // match(iRegP_R0);
4571   // match(iRegP_R2);
4572   // match(iRegP_R4);
4573   // match(iRegP_R5);
4574   // match(thread_RegP);
4575   op_cost(0);
4576   format %{ %}
4577   interface(REG_INTER);
4578 %}
4579 
4580 // Pointer 64 bit Register R0 only
4581 operand iRegP_R0()
4582 %{
4583   constraint(ALLOC_IN_RC(r0_reg));
4584   match(RegP);
4585   // match(iRegP);
4586   match(iRegPNoSp);
4587   op_cost(0);
4588   format %{ %}
4589   interface(REG_INTER);
4590 %}
4591 
4592 // Pointer 64 bit Register R1 only
4593 operand iRegP_R1()
4594 %{
4595   constraint(ALLOC_IN_RC(r1_reg));
4596   match(RegP);
4597   // match(iRegP);
4598   match(iRegPNoSp);
4599   op_cost(0);
4600   format %{ %}
4601   interface(REG_INTER);
4602 %}
4603 
4604 // Pointer 64 bit Register R2 only
4605 operand iRegP_R2()
4606 %{
4607   constraint(ALLOC_IN_RC(r2_reg));
4608   match(RegP);
4609   // match(iRegP);
4610   match(iRegPNoSp);
4611   op_cost(0);
4612   format %{ %}
4613   interface(REG_INTER);
4614 %}
4615 
4616 // Pointer 64 bit Register R3 only
4617 operand iRegP_R3()
4618 %{
4619   constraint(ALLOC_IN_RC(r3_reg));
4620   match(RegP);
4621   // match(iRegP);
4622   match(iRegPNoSp);
4623   op_cost(0);
4624   format %{ %}
4625   interface(REG_INTER);
4626 %}
4627 
4628 // Pointer 64 bit Register R4 only
4629 operand iRegP_R4()
4630 %{
4631   constraint(ALLOC_IN_RC(r4_reg));
4632   match(RegP);
4633   // match(iRegP);
4634   match(iRegPNoSp);
4635   op_cost(0);
4636   format %{ %}
4637   interface(REG_INTER);
4638 %}
4639 
4640 // Pointer 64 bit Register R5 only
4641 operand iRegP_R5()
4642 %{
4643   constraint(ALLOC_IN_RC(r5_reg));
4644   match(RegP);
4645   // match(iRegP);
4646   match(iRegPNoSp);
4647   op_cost(0);
4648   format %{ %}
4649   interface(REG_INTER);
4650 %}
4651 
4652 // Pointer 64 bit Register R10 only
4653 operand iRegP_R10()
4654 %{
4655   constraint(ALLOC_IN_RC(r10_reg));
4656   match(RegP);
4657   // match(iRegP);
4658   match(iRegPNoSp);
4659   op_cost(0);
4660   format %{ %}
4661   interface(REG_INTER);
4662 %}
4663 
4664 // Long 64 bit Register R0 only
4665 operand iRegL_R0()
4666 %{
4667   constraint(ALLOC_IN_RC(r0_reg));
4668   match(RegL);
4669   match(iRegLNoSp);
4670   op_cost(0);
4671   format %{ %}
4672   interface(REG_INTER);
4673 %}
4674 
4675 // Long 64 bit Register R2 only
4676 operand iRegL_R2()
4677 %{
4678   constraint(ALLOC_IN_RC(r2_reg));
4679   match(RegL);
4680   match(iRegLNoSp);
4681   op_cost(0);
4682   format %{ %}
4683   interface(REG_INTER);
4684 %}
4685 
4686 // Long 64 bit Register R3 only
4687 operand iRegL_R3()
4688 %{
4689   constraint(ALLOC_IN_RC(r3_reg));
4690   match(RegL);
4691   match(iRegLNoSp);
4692   op_cost(0);
4693   format %{ %}
4694   interface(REG_INTER);
4695 %}
4696 
4697 // Long 64 bit Register R11 only
4698 operand iRegL_R11()
4699 %{
4700   constraint(ALLOC_IN_RC(r11_reg));
4701   match(RegL);
4702   match(iRegLNoSp);
4703   op_cost(0);
4704   format %{ %}
4705   interface(REG_INTER);
4706 %}
4707 
4708 // Pointer 64 bit Register FP only
4709 operand iRegP_FP()
4710 %{
4711   constraint(ALLOC_IN_RC(fp_reg));
4712   match(RegP);
4713   // match(iRegP);
4714   op_cost(0);
4715   format %{ %}
4716   interface(REG_INTER);
4717 %}
4718 
4719 // Register R0 only
4720 operand iRegI_R0()
4721 %{
4722   constraint(ALLOC_IN_RC(int_r0_reg));
4723   match(RegI);
4724   match(iRegINoSp);
4725   op_cost(0);
4726   format %{ %}
4727   interface(REG_INTER);
4728 %}
4729 
4730 // Register R2 only
4731 operand iRegI_R2()
4732 %{
4733   constraint(ALLOC_IN_RC(int_r2_reg));
4734   match(RegI);
4735   match(iRegINoSp);
4736   op_cost(0);
4737   format %{ %}
4738   interface(REG_INTER);
4739 %}
4740 
4741 // Register R3 only
4742 operand iRegI_R3()
4743 %{
4744   constraint(ALLOC_IN_RC(int_r3_reg));
4745   match(RegI);
4746   match(iRegINoSp);
4747   op_cost(0);
4748   format %{ %}
4749   interface(REG_INTER);
4750 %}
4751 
4752 
4753 // Register R4 only
4754 operand iRegI_R4()
4755 %{
4756   constraint(ALLOC_IN_RC(int_r4_reg));
4757   match(RegI);
4758   match(iRegINoSp);
4759   op_cost(0);
4760   format %{ %}
4761   interface(REG_INTER);
4762 %}
4763 
4764 
4765 // Pointer Register Operands
4766 // Narrow Pointer Register
4767 operand iRegN()
4768 %{
4769   constraint(ALLOC_IN_RC(any_reg32));
4770   match(RegN);
4771   match(iRegNNoSp);
4772   op_cost(0);
4773   format %{ %}
4774   interface(REG_INTER);
4775 %}
4776 
4777 operand iRegN_R0()
4778 %{
4779   constraint(ALLOC_IN_RC(r0_reg));
4780   match(iRegN);
4781   op_cost(0);
4782   format %{ %}
4783   interface(REG_INTER);
4784 %}
4785 
4786 operand iRegN_R2()
4787 %{
4788   constraint(ALLOC_IN_RC(r2_reg));
4789   match(iRegN);
4790   op_cost(0);
4791   format %{ %}
4792   interface(REG_INTER);
4793 %}
4794 
4795 operand iRegN_R3()
4796 %{
4797   constraint(ALLOC_IN_RC(r3_reg));
4798   match(iRegN);
4799   op_cost(0);
4800   format %{ %}
4801   interface(REG_INTER);
4802 %}
4803 
4804 // Integer 64 bit Register not Special
4805 operand iRegNNoSp()
4806 %{
4807   constraint(ALLOC_IN_RC(no_special_reg32));
4808   match(RegN);
4809   op_cost(0);
4810   format %{ %}
4811   interface(REG_INTER);
4812 %}
4813 
4814 // heap base register -- used for encoding immN0
4815 
4816 operand iRegIHeapbase()
4817 %{
4818   constraint(ALLOC_IN_RC(heapbase_reg));
4819   match(RegI);
4820   op_cost(0);
4821   format %{ %}
4822   interface(REG_INTER);
4823 %}
4824 
4825 // Float Register
4826 // Float register operands
4827 operand vRegF()
4828 %{
4829   constraint(ALLOC_IN_RC(float_reg));
4830   match(RegF);
4831 
4832   op_cost(0);
4833   format %{ %}
4834   interface(REG_INTER);
4835 %}
4836 
4837 // Double Register
4838 // Double register operands
4839 operand vRegD()
4840 %{
4841   constraint(ALLOC_IN_RC(double_reg));
4842   match(RegD);
4843 
4844   op_cost(0);
4845   format %{ %}
4846   interface(REG_INTER);
4847 %}
4848 
4849 operand vecD()
4850 %{
4851   constraint(ALLOC_IN_RC(vectord_reg));
4852   match(VecD);
4853 
4854   op_cost(0);
4855   format %{ %}
4856   interface(REG_INTER);
4857 %}
4858 
4859 operand vecX()
4860 %{
4861   constraint(ALLOC_IN_RC(vectorx_reg));
4862   match(VecX);
4863 
4864   op_cost(0);
4865   format %{ %}
4866   interface(REG_INTER);
4867 %}
4868 
4869 operand vRegD_V0()
4870 %{
4871   constraint(ALLOC_IN_RC(v0_reg));
4872   match(RegD);
4873   op_cost(0);
4874   format %{ %}
4875   interface(REG_INTER);
4876 %}
4877 
4878 operand vRegD_V1()
4879 %{
4880   constraint(ALLOC_IN_RC(v1_reg));
4881   match(RegD);
4882   op_cost(0);
4883   format %{ %}
4884   interface(REG_INTER);
4885 %}
4886 
4887 operand vRegD_V2()
4888 %{
4889   constraint(ALLOC_IN_RC(v2_reg));
4890   match(RegD);
4891   op_cost(0);
4892   format %{ %}
4893   interface(REG_INTER);
4894 %}
4895 
4896 operand vRegD_V3()
4897 %{
4898   constraint(ALLOC_IN_RC(v3_reg));
4899   match(RegD);
4900   op_cost(0);
4901   format %{ %}
4902   interface(REG_INTER);
4903 %}
4904 
4905 // Flags register, used as output of signed compare instructions
4906 
4907 // note that on AArch64 we also use this register as the output for
4908 // for floating point compare instructions (CmpF CmpD). this ensures
4909 // that ordered inequality tests use GT, GE, LT or LE none of which
4910 // pass through cases where the result is unordered i.e. one or both
4911 // inputs to the compare is a NaN. this means that the ideal code can
4912 // replace e.g. a GT with an LE and not end up capturing the NaN case
4913 // (where the comparison should always fail). EQ and NE tests are
4914 // always generated in ideal code so that unordered folds into the NE
4915 // case, matching the behaviour of AArch64 NE.
4916 //
4917 // This differs from x86 where the outputs of FP compares use a
4918 // special FP flags registers and where compares based on this
4919 // register are distinguished into ordered inequalities (cmpOpUCF) and
4920 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4921 // to explicitly handle the unordered case in branches. x86 also has
4922 // to include extra CMoveX rules to accept a cmpOpUCF input.
4923 
4924 operand rFlagsReg()
4925 %{
4926   constraint(ALLOC_IN_RC(int_flags));
4927   match(RegFlags);
4928 
4929   op_cost(0);
4930   format %{ "RFLAGS" %}
4931   interface(REG_INTER);
4932 %}
4933 
4934 // Flags register, used as output of unsigned compare instructions
4935 operand rFlagsRegU()
4936 %{
4937   constraint(ALLOC_IN_RC(int_flags));
4938   match(RegFlags);
4939 
4940   op_cost(0);
4941   format %{ "RFLAGSU" %}
4942   interface(REG_INTER);
4943 %}
4944 
4945 // Special Registers
4946 
4947 // Method Register
4948 operand inline_cache_RegP(iRegP reg)
4949 %{
4950   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4951   match(reg);
4952   match(iRegPNoSp);
4953   op_cost(0);
4954   format %{ %}
4955   interface(REG_INTER);
4956 %}
4957 
4958 operand interpreter_method_oop_RegP(iRegP reg)
4959 %{
4960   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4961   match(reg);
4962   match(iRegPNoSp);
4963   op_cost(0);
4964   format %{ %}
4965   interface(REG_INTER);
4966 %}
4967 
4968 // Thread Register
4969 operand thread_RegP(iRegP reg)
4970 %{
4971   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
4972   match(reg);
4973   op_cost(0);
4974   format %{ %}
4975   interface(REG_INTER);
4976 %}
4977 
4978 operand lr_RegP(iRegP reg)
4979 %{
4980   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
4981   match(reg);
4982   op_cost(0);
4983   format %{ %}
4984   interface(REG_INTER);
4985 %}
4986 
4987 //----------Memory Operands----------------------------------------------------
4988 
4989 operand indirect(iRegP reg)
4990 %{
4991   constraint(ALLOC_IN_RC(ptr_reg));
4992   match(reg);
4993   op_cost(0);
4994   format %{ "[$reg]" %}
4995   interface(MEMORY_INTER) %{
4996     base($reg);
4997     index(0xffffffff);
4998     scale(0x0);
4999     disp(0x0);
5000   %}
5001 %}
5002 
5003 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5004 %{
5005   constraint(ALLOC_IN_RC(ptr_reg));
5006   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5007   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5008   op_cost(0);
5009   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5010   interface(MEMORY_INTER) %{
5011     base($reg);
5012     index($ireg);
5013     scale($scale);
5014     disp(0x0);
5015   %}
5016 %}
5017 
5018 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5019 %{
5020   constraint(ALLOC_IN_RC(ptr_reg));
5021   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5022   match(AddP reg (LShiftL lreg scale));
5023   op_cost(0);
5024   format %{ "$reg, $lreg lsl($scale)" %}
5025   interface(MEMORY_INTER) %{
5026     base($reg);
5027     index($lreg);
5028     scale($scale);
5029     disp(0x0);
5030   %}
5031 %}
5032 
5033 operand indIndexI2L(iRegP reg, iRegI ireg)
5034 %{
5035   constraint(ALLOC_IN_RC(ptr_reg));
5036   match(AddP reg (ConvI2L ireg));
5037   op_cost(0);
5038   format %{ "$reg, $ireg, 0, I2L" %}
5039   interface(MEMORY_INTER) %{
5040     base($reg);
5041     index($ireg);
5042     scale(0x0);
5043     disp(0x0);
5044   %}
5045 %}
5046 
5047 operand indIndex(iRegP reg, iRegL lreg)
5048 %{
5049   constraint(ALLOC_IN_RC(ptr_reg));
5050   match(AddP reg lreg);
5051   op_cost(0);
5052   format %{ "$reg, $lreg" %}
5053   interface(MEMORY_INTER) %{
5054     base($reg);
5055     index($lreg);
5056     scale(0x0);
5057     disp(0x0);
5058   %}
5059 %}
5060 
5061 operand indOffI(iRegP reg, immIOffset off)
5062 %{
5063   constraint(ALLOC_IN_RC(ptr_reg));
5064   match(AddP reg off);
5065   op_cost(0);
5066   format %{ "[$reg, $off]" %}
5067   interface(MEMORY_INTER) %{
5068     base($reg);
5069     index(0xffffffff);
5070     scale(0x0);
5071     disp($off);
5072   %}
5073 %}
5074 
5075 operand indOffI4(iRegP reg, immIOffset4 off)
5076 %{
5077   constraint(ALLOC_IN_RC(ptr_reg));
5078   match(AddP reg off);
5079   op_cost(0);
5080   format %{ "[$reg, $off]" %}
5081   interface(MEMORY_INTER) %{
5082     base($reg);
5083     index(0xffffffff);
5084     scale(0x0);
5085     disp($off);
5086   %}
5087 %}
5088 
5089 operand indOffI8(iRegP reg, immIOffset8 off)
5090 %{
5091   constraint(ALLOC_IN_RC(ptr_reg));
5092   match(AddP reg off);
5093   op_cost(0);
5094   format %{ "[$reg, $off]" %}
5095   interface(MEMORY_INTER) %{
5096     base($reg);
5097     index(0xffffffff);
5098     scale(0x0);
5099     disp($off);
5100   %}
5101 %}
5102 
5103 operand indOffI16(iRegP reg, immIOffset16 off)
5104 %{
5105   constraint(ALLOC_IN_RC(ptr_reg));
5106   match(AddP reg off);
5107   op_cost(0);
5108   format %{ "[$reg, $off]" %}
5109   interface(MEMORY_INTER) %{
5110     base($reg);
5111     index(0xffffffff);
5112     scale(0x0);
5113     disp($off);
5114   %}
5115 %}
5116 
5117 operand indOffL(iRegP reg, immLoffset off)
5118 %{
5119   constraint(ALLOC_IN_RC(ptr_reg));
5120   match(AddP reg off);
5121   op_cost(0);
5122   format %{ "[$reg, $off]" %}
5123   interface(MEMORY_INTER) %{
5124     base($reg);
5125     index(0xffffffff);
5126     scale(0x0);
5127     disp($off);
5128   %}
5129 %}
5130 
5131 operand indOffL4(iRegP reg, immLoffset4 off)
5132 %{
5133   constraint(ALLOC_IN_RC(ptr_reg));
5134   match(AddP reg off);
5135   op_cost(0);
5136   format %{ "[$reg, $off]" %}
5137   interface(MEMORY_INTER) %{
5138     base($reg);
5139     index(0xffffffff);
5140     scale(0x0);
5141     disp($off);
5142   %}
5143 %}
5144 
5145 operand indOffL8(iRegP reg, immLoffset8 off)
5146 %{
5147   constraint(ALLOC_IN_RC(ptr_reg));
5148   match(AddP reg off);
5149   op_cost(0);
5150   format %{ "[$reg, $off]" %}
5151   interface(MEMORY_INTER) %{
5152     base($reg);
5153     index(0xffffffff);
5154     scale(0x0);
5155     disp($off);
5156   %}
5157 %}
5158 
5159 operand indOffL16(iRegP reg, immLoffset16 off)
5160 %{
5161   constraint(ALLOC_IN_RC(ptr_reg));
5162   match(AddP reg off);
5163   op_cost(0);
5164   format %{ "[$reg, $off]" %}
5165   interface(MEMORY_INTER) %{
5166     base($reg);
5167     index(0xffffffff);
5168     scale(0x0);
5169     disp($off);
5170   %}
5171 %}
5172 
5173 operand indirectN(iRegN reg)
5174 %{
5175   predicate(Universe::narrow_oop_shift() == 0);
5176   constraint(ALLOC_IN_RC(ptr_reg));
5177   match(DecodeN reg);
5178   op_cost(0);
5179   format %{ "[$reg]\t# narrow" %}
5180   interface(MEMORY_INTER) %{
5181     base($reg);
5182     index(0xffffffff);
5183     scale(0x0);
5184     disp(0x0);
5185   %}
5186 %}
5187 
5188 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5189 %{
5190   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5191   constraint(ALLOC_IN_RC(ptr_reg));
5192   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5193   op_cost(0);
5194   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5195   interface(MEMORY_INTER) %{
5196     base($reg);
5197     index($ireg);
5198     scale($scale);
5199     disp(0x0);
5200   %}
5201 %}
5202 
5203 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5204 %{
5205   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5206   constraint(ALLOC_IN_RC(ptr_reg));
5207   match(AddP (DecodeN reg) (LShiftL lreg scale));
5208   op_cost(0);
5209   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5210   interface(MEMORY_INTER) %{
5211     base($reg);
5212     index($lreg);
5213     scale($scale);
5214     disp(0x0);
5215   %}
5216 %}
5217 
5218 operand indIndexI2LN(iRegN reg, iRegI ireg)
5219 %{
5220   predicate(Universe::narrow_oop_shift() == 0);
5221   constraint(ALLOC_IN_RC(ptr_reg));
5222   match(AddP (DecodeN reg) (ConvI2L ireg));
5223   op_cost(0);
5224   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5225   interface(MEMORY_INTER) %{
5226     base($reg);
5227     index($ireg);
5228     scale(0x0);
5229     disp(0x0);
5230   %}
5231 %}
5232 
5233 operand indIndexN(iRegN reg, iRegL lreg)
5234 %{
5235   predicate(Universe::narrow_oop_shift() == 0);
5236   constraint(ALLOC_IN_RC(ptr_reg));
5237   match(AddP (DecodeN reg) lreg);
5238   op_cost(0);
5239   format %{ "$reg, $lreg\t# narrow" %}
5240   interface(MEMORY_INTER) %{
5241     base($reg);
5242     index($lreg);
5243     scale(0x0);
5244     disp(0x0);
5245   %}
5246 %}
5247 
5248 operand indOffIN(iRegN reg, immIOffset off)
5249 %{
5250   predicate(Universe::narrow_oop_shift() == 0);
5251   constraint(ALLOC_IN_RC(ptr_reg));
5252   match(AddP (DecodeN reg) off);
5253   op_cost(0);
5254   format %{ "[$reg, $off]\t# narrow" %}
5255   interface(MEMORY_INTER) %{
5256     base($reg);
5257     index(0xffffffff);
5258     scale(0x0);
5259     disp($off);
5260   %}
5261 %}
5262 
5263 operand indOffLN(iRegN reg, immLoffset off)
5264 %{
5265   predicate(Universe::narrow_oop_shift() == 0);
5266   constraint(ALLOC_IN_RC(ptr_reg));
5267   match(AddP (DecodeN reg) off);
5268   op_cost(0);
5269   format %{ "[$reg, $off]\t# narrow" %}
5270   interface(MEMORY_INTER) %{
5271     base($reg);
5272     index(0xffffffff);
5273     scale(0x0);
5274     disp($off);
5275   %}
5276 %}
5277 
5278 
5279 
5280 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5281 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5282 %{
5283   constraint(ALLOC_IN_RC(ptr_reg));
5284   match(AddP reg off);
5285   op_cost(0);
5286   format %{ "[$reg, $off]" %}
5287   interface(MEMORY_INTER) %{
5288     base($reg);
5289     index(0xffffffff);
5290     scale(0x0);
5291     disp($off);
5292   %}
5293 %}
5294 
5295 //----------Special Memory Operands--------------------------------------------
5296 // Stack Slot Operand - This operand is used for loading and storing temporary
5297 //                      values on the stack where a match requires a value to
5298 //                      flow through memory.
5299 operand stackSlotP(sRegP reg)
5300 %{
5301   constraint(ALLOC_IN_RC(stack_slots));
5302   op_cost(100);
5303   // No match rule because this operand is only generated in matching
5304   // match(RegP);
5305   format %{ "[$reg]" %}
5306   interface(MEMORY_INTER) %{
5307     base(0x1e);  // RSP
5308     index(0x0);  // No Index
5309     scale(0x0);  // No Scale
5310     disp($reg);  // Stack Offset
5311   %}
5312 %}
5313 
5314 operand stackSlotI(sRegI reg)
5315 %{
5316   constraint(ALLOC_IN_RC(stack_slots));
5317   // No match rule because this operand is only generated in matching
5318   // match(RegI);
5319   format %{ "[$reg]" %}
5320   interface(MEMORY_INTER) %{
5321     base(0x1e);  // RSP
5322     index(0x0);  // No Index
5323     scale(0x0);  // No Scale
5324     disp($reg);  // Stack Offset
5325   %}
5326 %}
5327 
5328 operand stackSlotF(sRegF reg)
5329 %{
5330   constraint(ALLOC_IN_RC(stack_slots));
5331   // No match rule because this operand is only generated in matching
5332   // match(RegF);
5333   format %{ "[$reg]" %}
5334   interface(MEMORY_INTER) %{
5335     base(0x1e);  // RSP
5336     index(0x0);  // No Index
5337     scale(0x0);  // No Scale
5338     disp($reg);  // Stack Offset
5339   %}
5340 %}
5341 
5342 operand stackSlotD(sRegD reg)
5343 %{
5344   constraint(ALLOC_IN_RC(stack_slots));
5345   // No match rule because this operand is only generated in matching
5346   // match(RegD);
5347   format %{ "[$reg]" %}
5348   interface(MEMORY_INTER) %{
5349     base(0x1e);  // RSP
5350     index(0x0);  // No Index
5351     scale(0x0);  // No Scale
5352     disp($reg);  // Stack Offset
5353   %}
5354 %}
5355 
5356 operand stackSlotL(sRegL reg)
5357 %{
5358   constraint(ALLOC_IN_RC(stack_slots));
5359   // No match rule because this operand is only generated in matching
5360   // match(RegL);
5361   format %{ "[$reg]" %}
5362   interface(MEMORY_INTER) %{
5363     base(0x1e);  // RSP
5364     index(0x0);  // No Index
5365     scale(0x0);  // No Scale
5366     disp($reg);  // Stack Offset
5367   %}
5368 %}
5369 
5370 // Operands for expressing Control Flow
5371 // NOTE: Label is a predefined operand which should not be redefined in
5372 //       the AD file. It is generically handled within the ADLC.
5373 
5374 //----------Conditional Branch Operands----------------------------------------
5375 // Comparison Op  - This is the operation of the comparison, and is limited to
5376 //                  the following set of codes:
5377 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5378 //
5379 // Other attributes of the comparison, such as unsignedness, are specified
5380 // by the comparison instruction that sets a condition code flags register.
5381 // That result is represented by a flags operand whose subtype is appropriate
5382 // to the unsignedness (etc.) of the comparison.
5383 //
5384 // Later, the instruction which matches both the Comparison Op (a Bool) and
5385 // the flags (produced by the Cmp) specifies the coding of the comparison op
5386 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5387 
5388 // used for signed integral comparisons and fp comparisons
5389 
5390 operand cmpOp()
5391 %{
5392   match(Bool);
5393 
5394   format %{ "" %}
5395   interface(COND_INTER) %{
5396     equal(0x0, "eq");
5397     not_equal(0x1, "ne");
5398     less(0xb, "lt");
5399     greater_equal(0xa, "ge");
5400     less_equal(0xd, "le");
5401     greater(0xc, "gt");
5402     overflow(0x6, "vs");
5403     no_overflow(0x7, "vc");
5404   %}
5405 %}
5406 
5407 // used for unsigned integral comparisons
5408 
5409 operand cmpOpU()
5410 %{
5411   match(Bool);
5412 
5413   format %{ "" %}
5414   interface(COND_INTER) %{
5415     equal(0x0, "eq");
5416     not_equal(0x1, "ne");
5417     less(0x3, "lo");
5418     greater_equal(0x2, "hs");
5419     less_equal(0x9, "ls");
5420     greater(0x8, "hi");
5421     overflow(0x6, "vs");
5422     no_overflow(0x7, "vc");
5423   %}
5424 %}
5425 
5426 // used for certain integral comparisons which can be
5427 // converted to cbxx or tbxx instructions
5428 
5429 operand cmpOpEqNe()
5430 %{
5431   match(Bool);
5432   match(CmpOp);
5433   op_cost(0);
5434   predicate(n->as_Bool()->_test._test == BoolTest::ne
5435             || n->as_Bool()->_test._test == BoolTest::eq);
5436 
5437   format %{ "" %}
5438   interface(COND_INTER) %{
5439     equal(0x0, "eq");
5440     not_equal(0x1, "ne");
5441     less(0xb, "lt");
5442     greater_equal(0xa, "ge");
5443     less_equal(0xd, "le");
5444     greater(0xc, "gt");
5445     overflow(0x6, "vs");
5446     no_overflow(0x7, "vc");
5447   %}
5448 %}
5449 
5450 // used for certain integral comparisons which can be
5451 // converted to cbxx or tbxx instructions
5452 
5453 operand cmpOpLtGe()
5454 %{
5455   match(Bool);
5456   match(CmpOp);
5457   op_cost(0);
5458 
5459   predicate(n->as_Bool()->_test._test == BoolTest::lt
5460             || n->as_Bool()->_test._test == BoolTest::ge);
5461 
5462   format %{ "" %}
5463   interface(COND_INTER) %{
5464     equal(0x0, "eq");
5465     not_equal(0x1, "ne");
5466     less(0xb, "lt");
5467     greater_equal(0xa, "ge");
5468     less_equal(0xd, "le");
5469     greater(0xc, "gt");
5470     overflow(0x6, "vs");
5471     no_overflow(0x7, "vc");
5472   %}
5473 %}
5474 
5475 // used for certain unsigned integral comparisons which can be
5476 // converted to cbxx or tbxx instructions
5477 
5478 operand cmpOpUEqNeLtGe()
5479 %{
5480   match(Bool);
5481   match(CmpOp);
5482   op_cost(0);
5483 
5484   predicate(n->as_Bool()->_test._test == BoolTest::eq
5485             || n->as_Bool()->_test._test == BoolTest::ne
5486             || n->as_Bool()->_test._test == BoolTest::lt
5487             || n->as_Bool()->_test._test == BoolTest::ge);
5488 
5489   format %{ "" %}
5490   interface(COND_INTER) %{
5491     equal(0x0, "eq");
5492     not_equal(0x1, "ne");
5493     less(0xb, "lt");
5494     greater_equal(0xa, "ge");
5495     less_equal(0xd, "le");
5496     greater(0xc, "gt");
5497     overflow(0x6, "vs");
5498     no_overflow(0x7, "vc");
5499   %}
5500 %}
5501 
5502 // Special operand allowing long args to int ops to be truncated for free
5503 
5504 operand iRegL2I(iRegL reg) %{
5505 
5506   op_cost(0);
5507 
5508   match(ConvL2I reg);
5509 
5510   format %{ "l2i($reg)" %}
5511 
5512   interface(REG_INTER)
5513 %}
5514 
5515 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5516 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5517 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5518 
5519 //----------OPERAND CLASSES----------------------------------------------------
5520 // Operand Classes are groups of operands that are used as to simplify
5521 // instruction definitions by not requiring the AD writer to specify
5522 // separate instructions for every form of operand when the
5523 // instruction accepts multiple operand types with the same basic
5524 // encoding and format. The classic case of this is memory operands.
5525 
5526 // memory is used to define read/write location for load/store
5527 // instruction defs. we can turn a memory op into an Address
5528 
5529 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5530                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5531 
5532 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5533 // operations. it allows the src to be either an iRegI or a (ConvL2I
5534 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5535 // can be elided because the 32-bit instruction will just employ the
5536 // lower 32 bits anyway.
5537 //
5538 // n.b. this does not elide all L2I conversions. if the truncated
5539 // value is consumed by more than one operation then the ConvL2I
5540 // cannot be bundled into the consuming nodes so an l2i gets planted
5541 // (actually a movw $dst $src) and the downstream instructions consume
5542 // the result of the l2i as an iRegI input. That's a shame since the
5543 // movw is actually redundant but its not too costly.
5544 
5545 opclass iRegIorL2I(iRegI, iRegL2I);
5546 
5547 //----------PIPELINE-----------------------------------------------------------
5548 // Rules which define the behavior of the target architectures pipeline.
5549 
5550 // For specific pipelines, eg A53, define the stages of that pipeline
5551 //pipe_desc(ISS, EX1, EX2, WR);
5552 #define ISS S0
5553 #define EX1 S1
5554 #define EX2 S2
5555 #define WR  S3
5556 
5557 // Integer ALU reg operation
5558 pipeline %{
5559 
5560 attributes %{
5561   // ARM instructions are of fixed length
5562   fixed_size_instructions;        // Fixed size instructions TODO does
5563   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5564   // ARM instructions come in 32-bit word units
5565   instruction_unit_size = 4;         // An instruction is 4 bytes long
5566   instruction_fetch_unit_size = 64;  // The processor fetches one line
5567   instruction_fetch_units = 1;       // of 64 bytes
5568 
5569   // List of nop instructions
5570   nops( MachNop );
5571 %}
5572 
5573 // We don't use an actual pipeline model so don't care about resources
5574 // or description. we do use pipeline classes to introduce fixed
5575 // latencies
5576 
5577 //----------RESOURCES----------------------------------------------------------
5578 // Resources are the functional units available to the machine
5579 
5580 resources( INS0, INS1, INS01 = INS0 | INS1,
5581            ALU0, ALU1, ALU = ALU0 | ALU1,
5582            MAC,
5583            DIV,
5584            BRANCH,
5585            LDST,
5586            NEON_FP);
5587 
5588 //----------PIPELINE DESCRIPTION-----------------------------------------------
5589 // Pipeline Description specifies the stages in the machine's pipeline
5590 
5591 // Define the pipeline as a generic 6 stage pipeline
5592 pipe_desc(S0, S1, S2, S3, S4, S5);
5593 
5594 //----------PIPELINE CLASSES---------------------------------------------------
5595 // Pipeline Classes describe the stages in which input and output are
5596 // referenced by the hardware pipeline.
5597 
5598 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5599 %{
5600   single_instruction;
5601   src1   : S1(read);
5602   src2   : S2(read);
5603   dst    : S5(write);
5604   INS01  : ISS;
5605   NEON_FP : S5;
5606 %}
5607 
5608 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5609 %{
5610   single_instruction;
5611   src1   : S1(read);
5612   src2   : S2(read);
5613   dst    : S5(write);
5614   INS01  : ISS;
5615   NEON_FP : S5;
5616 %}
5617 
5618 pipe_class fp_uop_s(vRegF dst, vRegF src)
5619 %{
5620   single_instruction;
5621   src    : S1(read);
5622   dst    : S5(write);
5623   INS01  : ISS;
5624   NEON_FP : S5;
5625 %}
5626 
5627 pipe_class fp_uop_d(vRegD dst, vRegD src)
5628 %{
5629   single_instruction;
5630   src    : S1(read);
5631   dst    : S5(write);
5632   INS01  : ISS;
5633   NEON_FP : S5;
5634 %}
5635 
5636 pipe_class fp_d2f(vRegF dst, vRegD src)
5637 %{
5638   single_instruction;
5639   src    : S1(read);
5640   dst    : S5(write);
5641   INS01  : ISS;
5642   NEON_FP : S5;
5643 %}
5644 
5645 pipe_class fp_f2d(vRegD dst, vRegF src)
5646 %{
5647   single_instruction;
5648   src    : S1(read);
5649   dst    : S5(write);
5650   INS01  : ISS;
5651   NEON_FP : S5;
5652 %}
5653 
5654 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5655 %{
5656   single_instruction;
5657   src    : S1(read);
5658   dst    : S5(write);
5659   INS01  : ISS;
5660   NEON_FP : S5;
5661 %}
5662 
5663 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5664 %{
5665   single_instruction;
5666   src    : S1(read);
5667   dst    : S5(write);
5668   INS01  : ISS;
5669   NEON_FP : S5;
5670 %}
5671 
5672 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5673 %{
5674   single_instruction;
5675   src    : S1(read);
5676   dst    : S5(write);
5677   INS01  : ISS;
5678   NEON_FP : S5;
5679 %}
5680 
5681 pipe_class fp_l2f(vRegF dst, iRegL src)
5682 %{
5683   single_instruction;
5684   src    : S1(read);
5685   dst    : S5(write);
5686   INS01  : ISS;
5687   NEON_FP : S5;
5688 %}
5689 
5690 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5691 %{
5692   single_instruction;
5693   src    : S1(read);
5694   dst    : S5(write);
5695   INS01  : ISS;
5696   NEON_FP : S5;
5697 %}
5698 
5699 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5700 %{
5701   single_instruction;
5702   src    : S1(read);
5703   dst    : S5(write);
5704   INS01  : ISS;
5705   NEON_FP : S5;
5706 %}
5707 
5708 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5709 %{
5710   single_instruction;
5711   src    : S1(read);
5712   dst    : S5(write);
5713   INS01  : ISS;
5714   NEON_FP : S5;
5715 %}
5716 
5717 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5718 %{
5719   single_instruction;
5720   src    : S1(read);
5721   dst    : S5(write);
5722   INS01  : ISS;
5723   NEON_FP : S5;
5724 %}
5725 
5726 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5727 %{
5728   single_instruction;
5729   src1   : S1(read);
5730   src2   : S2(read);
5731   dst    : S5(write);
5732   INS0   : ISS;
5733   NEON_FP : S5;
5734 %}
5735 
5736 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5737 %{
5738   single_instruction;
5739   src1   : S1(read);
5740   src2   : S2(read);
5741   dst    : S5(write);
5742   INS0   : ISS;
5743   NEON_FP : S5;
5744 %}
5745 
5746 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5747 %{
5748   single_instruction;
5749   cr     : S1(read);
5750   src1   : S1(read);
5751   src2   : S1(read);
5752   dst    : S3(write);
5753   INS01  : ISS;
5754   NEON_FP : S3;
5755 %}
5756 
5757 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5758 %{
5759   single_instruction;
5760   cr     : S1(read);
5761   src1   : S1(read);
5762   src2   : S1(read);
5763   dst    : S3(write);
5764   INS01  : ISS;
5765   NEON_FP : S3;
5766 %}
5767 
5768 pipe_class fp_imm_s(vRegF dst)
5769 %{
5770   single_instruction;
5771   dst    : S3(write);
5772   INS01  : ISS;
5773   NEON_FP : S3;
5774 %}
5775 
5776 pipe_class fp_imm_d(vRegD dst)
5777 %{
5778   single_instruction;
5779   dst    : S3(write);
5780   INS01  : ISS;
5781   NEON_FP : S3;
5782 %}
5783 
5784 pipe_class fp_load_constant_s(vRegF dst)
5785 %{
5786   single_instruction;
5787   dst    : S4(write);
5788   INS01  : ISS;
5789   NEON_FP : S4;
5790 %}
5791 
5792 pipe_class fp_load_constant_d(vRegD dst)
5793 %{
5794   single_instruction;
5795   dst    : S4(write);
5796   INS01  : ISS;
5797   NEON_FP : S4;
5798 %}
5799 
5800 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
5801 %{
5802   single_instruction;
5803   dst    : S5(write);
5804   src1   : S1(read);
5805   src2   : S1(read);
5806   INS01  : ISS;
5807   NEON_FP : S5;
5808 %}
5809 
5810 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
5811 %{
5812   single_instruction;
5813   dst    : S5(write);
5814   src1   : S1(read);
5815   src2   : S1(read);
5816   INS0   : ISS;
5817   NEON_FP : S5;
5818 %}
5819 
5820 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
5821 %{
5822   single_instruction;
5823   dst    : S5(write);
5824   src1   : S1(read);
5825   src2   : S1(read);
5826   dst    : S1(read);
5827   INS01  : ISS;
5828   NEON_FP : S5;
5829 %}
5830 
5831 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
5832 %{
5833   single_instruction;
5834   dst    : S5(write);
5835   src1   : S1(read);
5836   src2   : S1(read);
5837   dst    : S1(read);
5838   INS0   : ISS;
5839   NEON_FP : S5;
5840 %}
5841 
5842 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
5843 %{
5844   single_instruction;
5845   dst    : S4(write);
5846   src1   : S2(read);
5847   src2   : S2(read);
5848   INS01  : ISS;
5849   NEON_FP : S4;
5850 %}
5851 
5852 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
5853 %{
5854   single_instruction;
5855   dst    : S4(write);
5856   src1   : S2(read);
5857   src2   : S2(read);
5858   INS0   : ISS;
5859   NEON_FP : S4;
5860 %}
5861 
5862 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
5863 %{
5864   single_instruction;
5865   dst    : S3(write);
5866   src1   : S2(read);
5867   src2   : S2(read);
5868   INS01  : ISS;
5869   NEON_FP : S3;
5870 %}
5871 
5872 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
5873 %{
5874   single_instruction;
5875   dst    : S3(write);
5876   src1   : S2(read);
5877   src2   : S2(read);
5878   INS0   : ISS;
5879   NEON_FP : S3;
5880 %}
5881 
5882 pipe_class vshift64(vecD dst, vecD src, vecX shift)
5883 %{
5884   single_instruction;
5885   dst    : S3(write);
5886   src    : S1(read);
5887   shift  : S1(read);
5888   INS01  : ISS;
5889   NEON_FP : S3;
5890 %}
5891 
5892 pipe_class vshift128(vecX dst, vecX src, vecX shift)
5893 %{
5894   single_instruction;
5895   dst    : S3(write);
5896   src    : S1(read);
5897   shift  : S1(read);
5898   INS0   : ISS;
5899   NEON_FP : S3;
5900 %}
5901 
5902 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
5903 %{
5904   single_instruction;
5905   dst    : S3(write);
5906   src    : S1(read);
5907   INS01  : ISS;
5908   NEON_FP : S3;
5909 %}
5910 
5911 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
5912 %{
5913   single_instruction;
5914   dst    : S3(write);
5915   src    : S1(read);
5916   INS0   : ISS;
5917   NEON_FP : S3;
5918 %}
5919 
5920 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
5921 %{
5922   single_instruction;
5923   dst    : S5(write);
5924   src1   : S1(read);
5925   src2   : S1(read);
5926   INS01  : ISS;
5927   NEON_FP : S5;
5928 %}
5929 
5930 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
5931 %{
5932   single_instruction;
5933   dst    : S5(write);
5934   src1   : S1(read);
5935   src2   : S1(read);
5936   INS0   : ISS;
5937   NEON_FP : S5;
5938 %}
5939 
5940 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
5941 %{
5942   single_instruction;
5943   dst    : S5(write);
5944   src1   : S1(read);
5945   src2   : S1(read);
5946   INS0   : ISS;
5947   NEON_FP : S5;
5948 %}
5949 
5950 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
5951 %{
5952   single_instruction;
5953   dst    : S5(write);
5954   src1   : S1(read);
5955   src2   : S1(read);
5956   INS0   : ISS;
5957   NEON_FP : S5;
5958 %}
5959 
5960 pipe_class vsqrt_fp128(vecX dst, vecX src)
5961 %{
5962   single_instruction;
5963   dst    : S5(write);
5964   src    : S1(read);
5965   INS0   : ISS;
5966   NEON_FP : S5;
5967 %}
5968 
5969 pipe_class vunop_fp64(vecD dst, vecD src)
5970 %{
5971   single_instruction;
5972   dst    : S5(write);
5973   src    : S1(read);
5974   INS01  : ISS;
5975   NEON_FP : S5;
5976 %}
5977 
5978 pipe_class vunop_fp128(vecX dst, vecX src)
5979 %{
5980   single_instruction;
5981   dst    : S5(write);
5982   src    : S1(read);
5983   INS0   : ISS;
5984   NEON_FP : S5;
5985 %}
5986 
5987 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
5988 %{
5989   single_instruction;
5990   dst    : S3(write);
5991   src    : S1(read);
5992   INS01  : ISS;
5993   NEON_FP : S3;
5994 %}
5995 
5996 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
5997 %{
5998   single_instruction;
5999   dst    : S3(write);
6000   src    : S1(read);
6001   INS01  : ISS;
6002   NEON_FP : S3;
6003 %}
6004 
6005 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
6006 %{
6007   single_instruction;
6008   dst    : S3(write);
6009   src    : S1(read);
6010   INS01  : ISS;
6011   NEON_FP : S3;
6012 %}
6013 
6014 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
6015 %{
6016   single_instruction;
6017   dst    : S3(write);
6018   src    : S1(read);
6019   INS01  : ISS;
6020   NEON_FP : S3;
6021 %}
6022 
6023 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
6024 %{
6025   single_instruction;
6026   dst    : S3(write);
6027   src    : S1(read);
6028   INS01  : ISS;
6029   NEON_FP : S3;
6030 %}
6031 
6032 pipe_class vmovi_reg_imm64(vecD dst)
6033 %{
6034   single_instruction;
6035   dst    : S3(write);
6036   INS01  : ISS;
6037   NEON_FP : S3;
6038 %}
6039 
6040 pipe_class vmovi_reg_imm128(vecX dst)
6041 %{
6042   single_instruction;
6043   dst    : S3(write);
6044   INS0   : ISS;
6045   NEON_FP : S3;
6046 %}
6047 
6048 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6049 %{
6050   single_instruction;
6051   dst    : S5(write);
6052   mem    : ISS(read);
6053   INS01  : ISS;
6054   NEON_FP : S3;
6055 %}
6056 
6057 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6058 %{
6059   single_instruction;
6060   dst    : S5(write);
6061   mem    : ISS(read);
6062   INS01  : ISS;
6063   NEON_FP : S3;
6064 %}
6065 
6066 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6067 %{
6068   single_instruction;
6069   mem    : ISS(read);
6070   src    : S2(read);
6071   INS01  : ISS;
6072   NEON_FP : S3;
6073 %}
6074 
6075 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6076 %{
6077   single_instruction;
6078   mem    : ISS(read);
6079   src    : S2(read);
6080   INS01  : ISS;
6081   NEON_FP : S3;
6082 %}
6083 
6084 //------- Integer ALU operations --------------------------
6085 
6086 // Integer ALU reg-reg operation
6087 // Operands needed in EX1, result generated in EX2
6088 // Eg.  ADD     x0, x1, x2
6089 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6090 %{
6091   single_instruction;
6092   dst    : EX2(write);
6093   src1   : EX1(read);
6094   src2   : EX1(read);
6095   INS01  : ISS; // Dual issue as instruction 0 or 1
6096   ALU    : EX2;
6097 %}
6098 
6099 // Integer ALU reg-reg operation with constant shift
6100 // Shifted register must be available in LATE_ISS instead of EX1
6101 // Eg.  ADD     x0, x1, x2, LSL #2
6102 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6103 %{
6104   single_instruction;
6105   dst    : EX2(write);
6106   src1   : EX1(read);
6107   src2   : ISS(read);
6108   INS01  : ISS;
6109   ALU    : EX2;
6110 %}
6111 
6112 // Integer ALU reg operation with constant shift
6113 // Eg.  LSL     x0, x1, #shift
6114 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6115 %{
6116   single_instruction;
6117   dst    : EX2(write);
6118   src1   : ISS(read);
6119   INS01  : ISS;
6120   ALU    : EX2;
6121 %}
6122 
6123 // Integer ALU reg-reg operation with variable shift
6124 // Both operands must be available in LATE_ISS instead of EX1
6125 // Result is available in EX1 instead of EX2
6126 // Eg.  LSLV    x0, x1, x2
6127 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6128 %{
6129   single_instruction;
6130   dst    : EX1(write);
6131   src1   : ISS(read);
6132   src2   : ISS(read);
6133   INS01  : ISS;
6134   ALU    : EX1;
6135 %}
6136 
6137 // Integer ALU reg-reg operation with extract
6138 // As for _vshift above, but result generated in EX2
6139 // Eg.  EXTR    x0, x1, x2, #N
6140 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6141 %{
6142   single_instruction;
6143   dst    : EX2(write);
6144   src1   : ISS(read);
6145   src2   : ISS(read);
6146   INS1   : ISS; // Can only dual issue as Instruction 1
6147   ALU    : EX1;
6148 %}
6149 
6150 // Integer ALU reg operation
6151 // Eg.  NEG     x0, x1
6152 pipe_class ialu_reg(iRegI dst, iRegI src)
6153 %{
6154   single_instruction;
6155   dst    : EX2(write);
6156   src    : EX1(read);
6157   INS01  : ISS;
6158   ALU    : EX2;
6159 %}
6160 
6161 // Integer ALU reg mmediate operation
6162 // Eg.  ADD     x0, x1, #N
6163 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6164 %{
6165   single_instruction;
6166   dst    : EX2(write);
6167   src1   : EX1(read);
6168   INS01  : ISS;
6169   ALU    : EX2;
6170 %}
6171 
6172 // Integer ALU immediate operation (no source operands)
6173 // Eg.  MOV     x0, #N
6174 pipe_class ialu_imm(iRegI dst)
6175 %{
6176   single_instruction;
6177   dst    : EX1(write);
6178   INS01  : ISS;
6179   ALU    : EX1;
6180 %}
6181 
6182 //------- Compare operation -------------------------------
6183 
6184 // Compare reg-reg
6185 // Eg.  CMP     x0, x1
6186 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6187 %{
6188   single_instruction;
6189 //  fixed_latency(16);
6190   cr     : EX2(write);
6191   op1    : EX1(read);
6192   op2    : EX1(read);
6193   INS01  : ISS;
6194   ALU    : EX2;
6195 %}
6196 
6197 // Compare reg-reg
6198 // Eg.  CMP     x0, #N
6199 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6200 %{
6201   single_instruction;
6202 //  fixed_latency(16);
6203   cr     : EX2(write);
6204   op1    : EX1(read);
6205   INS01  : ISS;
6206   ALU    : EX2;
6207 %}
6208 
6209 //------- Conditional instructions ------------------------
6210 
6211 // Conditional no operands
6212 // Eg.  CSINC   x0, zr, zr, <cond>
6213 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6214 %{
6215   single_instruction;
6216   cr     : EX1(read);
6217   dst    : EX2(write);
6218   INS01  : ISS;
6219   ALU    : EX2;
6220 %}
6221 
6222 // Conditional 2 operand
6223 // EG.  CSEL    X0, X1, X2, <cond>
6224 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6225 %{
6226   single_instruction;
6227   cr     : EX1(read);
6228   src1   : EX1(read);
6229   src2   : EX1(read);
6230   dst    : EX2(write);
6231   INS01  : ISS;
6232   ALU    : EX2;
6233 %}
6234 
6235 // Conditional 2 operand
6236 // EG.  CSEL    X0, X1, X2, <cond>
6237 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6238 %{
6239   single_instruction;
6240   cr     : EX1(read);
6241   src    : EX1(read);
6242   dst    : EX2(write);
6243   INS01  : ISS;
6244   ALU    : EX2;
6245 %}
6246 
6247 //------- Multiply pipeline operations --------------------
6248 
6249 // Multiply reg-reg
6250 // Eg.  MUL     w0, w1, w2
6251 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6252 %{
6253   single_instruction;
6254   dst    : WR(write);
6255   src1   : ISS(read);
6256   src2   : ISS(read);
6257   INS01  : ISS;
6258   MAC    : WR;
6259 %}
6260 
6261 // Multiply accumulate
6262 // Eg.  MADD    w0, w1, w2, w3
6263 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6264 %{
6265   single_instruction;
6266   dst    : WR(write);
6267   src1   : ISS(read);
6268   src2   : ISS(read);
6269   src3   : ISS(read);
6270   INS01  : ISS;
6271   MAC    : WR;
6272 %}
6273 
6274 // Eg.  MUL     w0, w1, w2
6275 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6276 %{
6277   single_instruction;
6278   fixed_latency(3); // Maximum latency for 64 bit mul
6279   dst    : WR(write);
6280   src1   : ISS(read);
6281   src2   : ISS(read);
6282   INS01  : ISS;
6283   MAC    : WR;
6284 %}
6285 
6286 // Multiply accumulate
6287 // Eg.  MADD    w0, w1, w2, w3
6288 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6289 %{
6290   single_instruction;
6291   fixed_latency(3); // Maximum latency for 64 bit mul
6292   dst    : WR(write);
6293   src1   : ISS(read);
6294   src2   : ISS(read);
6295   src3   : ISS(read);
6296   INS01  : ISS;
6297   MAC    : WR;
6298 %}
6299 
6300 //------- Divide pipeline operations --------------------
6301 
6302 // Eg.  SDIV    w0, w1, w2
6303 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6304 %{
6305   single_instruction;
6306   fixed_latency(8); // Maximum latency for 32 bit divide
6307   dst    : WR(write);
6308   src1   : ISS(read);
6309   src2   : ISS(read);
6310   INS0   : ISS; // Can only dual issue as instruction 0
6311   DIV    : WR;
6312 %}
6313 
6314 // Eg.  SDIV    x0, x1, x2
6315 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6316 %{
6317   single_instruction;
6318   fixed_latency(16); // Maximum latency for 64 bit divide
6319   dst    : WR(write);
6320   src1   : ISS(read);
6321   src2   : ISS(read);
6322   INS0   : ISS; // Can only dual issue as instruction 0
6323   DIV    : WR;
6324 %}
6325 
6326 //------- Load pipeline operations ------------------------
6327 
6328 // Load - prefetch
6329 // Eg.  PFRM    <mem>
6330 pipe_class iload_prefetch(memory mem)
6331 %{
6332   single_instruction;
6333   mem    : ISS(read);
6334   INS01  : ISS;
6335   LDST   : WR;
6336 %}
6337 
6338 // Load - reg, mem
6339 // Eg.  LDR     x0, <mem>
6340 pipe_class iload_reg_mem(iRegI dst, memory mem)
6341 %{
6342   single_instruction;
6343   dst    : WR(write);
6344   mem    : ISS(read);
6345   INS01  : ISS;
6346   LDST   : WR;
6347 %}
6348 
6349 // Load - reg, reg
6350 // Eg.  LDR     x0, [sp, x1]
6351 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6352 %{
6353   single_instruction;
6354   dst    : WR(write);
6355   src    : ISS(read);
6356   INS01  : ISS;
6357   LDST   : WR;
6358 %}
6359 
6360 //------- Store pipeline operations -----------------------
6361 
6362 // Store - zr, mem
6363 // Eg.  STR     zr, <mem>
6364 pipe_class istore_mem(memory mem)
6365 %{
6366   single_instruction;
6367   mem    : ISS(read);
6368   INS01  : ISS;
6369   LDST   : WR;
6370 %}
6371 
6372 // Store - reg, mem
6373 // Eg.  STR     x0, <mem>
6374 pipe_class istore_reg_mem(iRegI src, memory mem)
6375 %{
6376   single_instruction;
6377   mem    : ISS(read);
6378   src    : EX2(read);
6379   INS01  : ISS;
6380   LDST   : WR;
6381 %}
6382 
6383 // Store - reg, reg
6384 // Eg. STR      x0, [sp, x1]
6385 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6386 %{
6387   single_instruction;
6388   dst    : ISS(read);
6389   src    : EX2(read);
6390   INS01  : ISS;
6391   LDST   : WR;
6392 %}
6393 
6394 //------- Store pipeline operations -----------------------
6395 
6396 // Branch
6397 pipe_class pipe_branch()
6398 %{
6399   single_instruction;
6400   INS01  : ISS;
6401   BRANCH : EX1;
6402 %}
6403 
6404 // Conditional branch
6405 pipe_class pipe_branch_cond(rFlagsReg cr)
6406 %{
6407   single_instruction;
6408   cr     : EX1(read);
6409   INS01  : ISS;
6410   BRANCH : EX1;
6411 %}
6412 
6413 // Compare & Branch
6414 // EG.  CBZ/CBNZ
6415 pipe_class pipe_cmp_branch(iRegI op1)
6416 %{
6417   single_instruction;
6418   op1    : EX1(read);
6419   INS01  : ISS;
6420   BRANCH : EX1;
6421 %}
6422 
6423 //------- Synchronisation operations ----------------------
6424 
6425 // Any operation requiring serialization.
6426 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6427 pipe_class pipe_serial()
6428 %{
6429   single_instruction;
6430   force_serialization;
6431   fixed_latency(16);
6432   INS01  : ISS(2); // Cannot dual issue with any other instruction
6433   LDST   : WR;
6434 %}
6435 
6436 // Generic big/slow expanded idiom - also serialized
6437 pipe_class pipe_slow()
6438 %{
6439   instruction_count(10);
6440   multiple_bundles;
6441   force_serialization;
6442   fixed_latency(16);
6443   INS01  : ISS(2); // Cannot dual issue with any other instruction
6444   LDST   : WR;
6445 %}
6446 
6447 // Empty pipeline class
6448 pipe_class pipe_class_empty()
6449 %{
6450   single_instruction;
6451   fixed_latency(0);
6452 %}
6453 
6454 // Default pipeline class.
6455 pipe_class pipe_class_default()
6456 %{
6457   single_instruction;
6458   fixed_latency(2);
6459 %}
6460 
6461 // Pipeline class for compares.
6462 pipe_class pipe_class_compare()
6463 %{
6464   single_instruction;
6465   fixed_latency(16);
6466 %}
6467 
6468 // Pipeline class for memory operations.
6469 pipe_class pipe_class_memory()
6470 %{
6471   single_instruction;
6472   fixed_latency(16);
6473 %}
6474 
6475 // Pipeline class for call.
6476 pipe_class pipe_class_call()
6477 %{
6478   single_instruction;
6479   fixed_latency(100);
6480 %}
6481 
6482 // Define the class for the Nop node.
6483 define %{
6484    MachNop = pipe_class_empty;
6485 %}
6486 
6487 %}
6488 //----------INSTRUCTIONS-------------------------------------------------------
6489 //
6490 // match      -- States which machine-independent subtree may be replaced
6491 //               by this instruction.
6492 // ins_cost   -- The estimated cost of this instruction is used by instruction
6493 //               selection to identify a minimum cost tree of machine
6494 //               instructions that matches a tree of machine-independent
6495 //               instructions.
6496 // format     -- A string providing the disassembly for this instruction.
6497 //               The value of an instruction's operand may be inserted
6498 //               by referring to it with a '$' prefix.
6499 // opcode     -- Three instruction opcodes may be provided.  These are referred
6500 //               to within an encode class as $primary, $secondary, and $tertiary
6501 //               rrspectively.  The primary opcode is commonly used to
6502 //               indicate the type of machine instruction, while secondary
6503 //               and tertiary are often used for prefix options or addressing
6504 //               modes.
6505 // ins_encode -- A list of encode classes with parameters. The encode class
6506 //               name must have been defined in an 'enc_class' specification
6507 //               in the encode section of the architecture description.
6508 
6509 // ============================================================================
6510 // Memory (Load/Store) Instructions
6511 
6512 // Load Instructions
6513 
6514 // Load Byte (8 bit signed)
6515 instruct loadB(iRegINoSp dst, memory mem)
6516 %{
6517   match(Set dst (LoadB mem));
6518   predicate(!needs_acquiring_load(n));
6519 
6520   ins_cost(4 * INSN_COST);
6521   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6522 
6523   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6524 
6525   ins_pipe(iload_reg_mem);
6526 %}
6527 
6528 // Load Byte (8 bit signed) into long
6529 instruct loadB2L(iRegLNoSp dst, memory mem)
6530 %{
6531   match(Set dst (ConvI2L (LoadB mem)));
6532   predicate(!needs_acquiring_load(n->in(1)));
6533 
6534   ins_cost(4 * INSN_COST);
6535   format %{ "ldrsb  $dst, $mem\t# byte" %}
6536 
6537   ins_encode(aarch64_enc_ldrsb(dst, mem));
6538 
6539   ins_pipe(iload_reg_mem);
6540 %}
6541 
6542 // Load Byte (8 bit unsigned)
6543 instruct loadUB(iRegINoSp dst, memory mem)
6544 %{
6545   match(Set dst (LoadUB mem));
6546   predicate(!needs_acquiring_load(n));
6547 
6548   ins_cost(4 * INSN_COST);
6549   format %{ "ldrbw  $dst, $mem\t# byte" %}
6550 
6551   ins_encode(aarch64_enc_ldrb(dst, mem));
6552 
6553   ins_pipe(iload_reg_mem);
6554 %}
6555 
6556 // Load Byte (8 bit unsigned) into long
6557 instruct loadUB2L(iRegLNoSp dst, memory mem)
6558 %{
6559   match(Set dst (ConvI2L (LoadUB mem)));
6560   predicate(!needs_acquiring_load(n->in(1)));
6561 
6562   ins_cost(4 * INSN_COST);
6563   format %{ "ldrb  $dst, $mem\t# byte" %}
6564 
6565   ins_encode(aarch64_enc_ldrb(dst, mem));
6566 
6567   ins_pipe(iload_reg_mem);
6568 %}
6569 
6570 // Load Short (16 bit signed)
6571 instruct loadS(iRegINoSp dst, memory mem)
6572 %{
6573   match(Set dst (LoadS mem));
6574   predicate(!needs_acquiring_load(n));
6575 
6576   ins_cost(4 * INSN_COST);
6577   format %{ "ldrshw  $dst, $mem\t# short" %}
6578 
6579   ins_encode(aarch64_enc_ldrshw(dst, mem));
6580 
6581   ins_pipe(iload_reg_mem);
6582 %}
6583 
6584 // Load Short (16 bit signed) into long
6585 instruct loadS2L(iRegLNoSp dst, memory mem)
6586 %{
6587   match(Set dst (ConvI2L (LoadS mem)));
6588   predicate(!needs_acquiring_load(n->in(1)));
6589 
6590   ins_cost(4 * INSN_COST);
6591   format %{ "ldrsh  $dst, $mem\t# short" %}
6592 
6593   ins_encode(aarch64_enc_ldrsh(dst, mem));
6594 
6595   ins_pipe(iload_reg_mem);
6596 %}
6597 
6598 // Load Char (16 bit unsigned)
6599 instruct loadUS(iRegINoSp dst, memory mem)
6600 %{
6601   match(Set dst (LoadUS mem));
6602   predicate(!needs_acquiring_load(n));
6603 
6604   ins_cost(4 * INSN_COST);
6605   format %{ "ldrh  $dst, $mem\t# short" %}
6606 
6607   ins_encode(aarch64_enc_ldrh(dst, mem));
6608 
6609   ins_pipe(iload_reg_mem);
6610 %}
6611 
6612 // Load Short/Char (16 bit unsigned) into long
6613 instruct loadUS2L(iRegLNoSp dst, memory mem)
6614 %{
6615   match(Set dst (ConvI2L (LoadUS mem)));
6616   predicate(!needs_acquiring_load(n->in(1)));
6617 
6618   ins_cost(4 * INSN_COST);
6619   format %{ "ldrh  $dst, $mem\t# short" %}
6620 
6621   ins_encode(aarch64_enc_ldrh(dst, mem));
6622 
6623   ins_pipe(iload_reg_mem);
6624 %}
6625 
6626 // Load Integer (32 bit signed)
6627 instruct loadI(iRegINoSp dst, memory mem)
6628 %{
6629   match(Set dst (LoadI mem));
6630   predicate(!needs_acquiring_load(n));
6631 
6632   ins_cost(4 * INSN_COST);
6633   format %{ "ldrw  $dst, $mem\t# int" %}
6634 
6635   ins_encode(aarch64_enc_ldrw(dst, mem));
6636 
6637   ins_pipe(iload_reg_mem);
6638 %}
6639 
6640 // Load Integer (32 bit signed) into long
6641 instruct loadI2L(iRegLNoSp dst, memory mem)
6642 %{
6643   match(Set dst (ConvI2L (LoadI mem)));
6644   predicate(!needs_acquiring_load(n->in(1)));
6645 
6646   ins_cost(4 * INSN_COST);
6647   format %{ "ldrsw  $dst, $mem\t# int" %}
6648 
6649   ins_encode(aarch64_enc_ldrsw(dst, mem));
6650 
6651   ins_pipe(iload_reg_mem);
6652 %}
6653 
6654 // Load Integer (32 bit unsigned) into long
6655 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6656 %{
6657   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6658   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6659 
6660   ins_cost(4 * INSN_COST);
6661   format %{ "ldrw  $dst, $mem\t# int" %}
6662 
6663   ins_encode(aarch64_enc_ldrw(dst, mem));
6664 
6665   ins_pipe(iload_reg_mem);
6666 %}
6667 
6668 // Load Long (64 bit signed)
6669 instruct loadL(iRegLNoSp dst, memory mem)
6670 %{
6671   match(Set dst (LoadL mem));
6672   predicate(!needs_acquiring_load(n));
6673 
6674   ins_cost(4 * INSN_COST);
6675   format %{ "ldr  $dst, $mem\t# int" %}
6676 
6677   ins_encode(aarch64_enc_ldr(dst, mem));
6678 
6679   ins_pipe(iload_reg_mem);
6680 %}
6681 
6682 // Load Range
6683 instruct loadRange(iRegINoSp dst, memory mem)
6684 %{
6685   match(Set dst (LoadRange mem));
6686 
6687   ins_cost(4 * INSN_COST);
6688   format %{ "ldrw  $dst, $mem\t# range" %}
6689 
6690   ins_encode(aarch64_enc_ldrw(dst, mem));
6691 
6692   ins_pipe(iload_reg_mem);
6693 %}
6694 
6695 // Load Pointer
6696 instruct loadP(iRegPNoSp dst, memory mem)
6697 %{
6698   match(Set dst (LoadP mem));
6699   predicate(!needs_acquiring_load(n));
6700 
6701   ins_cost(4 * INSN_COST);
6702   format %{ "ldr  $dst, $mem\t# ptr" %}
6703 
6704   ins_encode(aarch64_enc_ldr(dst, mem));
6705 
6706   ins_pipe(iload_reg_mem);
6707 %}
6708 
6709 // Load Compressed Pointer
6710 instruct loadN(iRegNNoSp dst, memory mem)
6711 %{
6712   match(Set dst (LoadN mem));
6713   predicate(!needs_acquiring_load(n));
6714 
6715   ins_cost(4 * INSN_COST);
6716   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6717 
6718   ins_encode(aarch64_enc_ldrw(dst, mem));
6719 
6720   ins_pipe(iload_reg_mem);
6721 %}
6722 
6723 // Load Klass Pointer
6724 instruct loadKlass(iRegPNoSp dst, memory mem)
6725 %{
6726   match(Set dst (LoadKlass mem));
6727   predicate(!needs_acquiring_load(n));
6728 
6729   ins_cost(4 * INSN_COST);
6730   format %{ "ldr  $dst, $mem\t# class" %}
6731 
6732   ins_encode(aarch64_enc_ldr(dst, mem));
6733 
6734   ins_pipe(iload_reg_mem);
6735 %}
6736 
6737 // Load Narrow Klass Pointer
6738 instruct loadNKlass(iRegNNoSp dst, memory mem)
6739 %{
6740   match(Set dst (LoadNKlass mem));
6741   predicate(!needs_acquiring_load(n));
6742 
6743   ins_cost(4 * INSN_COST);
6744   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6745 
6746   ins_encode(aarch64_enc_ldrw(dst, mem));
6747 
6748   ins_pipe(iload_reg_mem);
6749 %}
6750 
6751 // Load Float
6752 instruct loadF(vRegF dst, memory mem)
6753 %{
6754   match(Set dst (LoadF mem));
6755   predicate(!needs_acquiring_load(n));
6756 
6757   ins_cost(4 * INSN_COST);
6758   format %{ "ldrs  $dst, $mem\t# float" %}
6759 
6760   ins_encode( aarch64_enc_ldrs(dst, mem) );
6761 
6762   ins_pipe(pipe_class_memory);
6763 %}
6764 
6765 // Load Double
6766 instruct loadD(vRegD dst, memory mem)
6767 %{
6768   match(Set dst (LoadD mem));
6769   predicate(!needs_acquiring_load(n));
6770 
6771   ins_cost(4 * INSN_COST);
6772   format %{ "ldrd  $dst, $mem\t# double" %}
6773 
6774   ins_encode( aarch64_enc_ldrd(dst, mem) );
6775 
6776   ins_pipe(pipe_class_memory);
6777 %}
6778 
6779 
6780 // Load Int Constant
6781 instruct loadConI(iRegINoSp dst, immI src)
6782 %{
6783   match(Set dst src);
6784 
6785   ins_cost(INSN_COST);
6786   format %{ "mov $dst, $src\t# int" %}
6787 
6788   ins_encode( aarch64_enc_movw_imm(dst, src) );
6789 
6790   ins_pipe(ialu_imm);
6791 %}
6792 
6793 // Load Long Constant
6794 instruct loadConL(iRegLNoSp dst, immL src)
6795 %{
6796   match(Set dst src);
6797 
6798   ins_cost(INSN_COST);
6799   format %{ "mov $dst, $src\t# long" %}
6800 
6801   ins_encode( aarch64_enc_mov_imm(dst, src) );
6802 
6803   ins_pipe(ialu_imm);
6804 %}
6805 
6806 // Load Pointer Constant
6807 
6808 instruct loadConP(iRegPNoSp dst, immP con)
6809 %{
6810   match(Set dst con);
6811 
6812   ins_cost(INSN_COST * 4);
6813   format %{
6814     "mov  $dst, $con\t# ptr\n\t"
6815   %}
6816 
6817   ins_encode(aarch64_enc_mov_p(dst, con));
6818 
6819   ins_pipe(ialu_imm);
6820 %}
6821 
6822 // Load Null Pointer Constant
6823 
6824 instruct loadConP0(iRegPNoSp dst, immP0 con)
6825 %{
6826   match(Set dst con);
6827 
6828   ins_cost(INSN_COST);
6829   format %{ "mov  $dst, $con\t# NULL ptr" %}
6830 
6831   ins_encode(aarch64_enc_mov_p0(dst, con));
6832 
6833   ins_pipe(ialu_imm);
6834 %}
6835 
6836 // Load Pointer Constant One
6837 
6838 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6839 %{
6840   match(Set dst con);
6841 
6842   ins_cost(INSN_COST);
6843   format %{ "mov  $dst, $con\t# NULL ptr" %}
6844 
6845   ins_encode(aarch64_enc_mov_p1(dst, con));
6846 
6847   ins_pipe(ialu_imm);
6848 %}
6849 
6850 // Load Poll Page Constant
6851 
6852 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6853 %{
6854   match(Set dst con);
6855 
6856   ins_cost(INSN_COST);
6857   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6858 
6859   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6860 
6861   ins_pipe(ialu_imm);
6862 %}
6863 
6864 // Load Byte Map Base Constant
6865 
6866 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6867 %{
6868   match(Set dst con);
6869 
6870   ins_cost(INSN_COST);
6871   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6872 
6873   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6874 
6875   ins_pipe(ialu_imm);
6876 %}
6877 
6878 // Load Narrow Pointer Constant
6879 
6880 instruct loadConN(iRegNNoSp dst, immN con)
6881 %{
6882   match(Set dst con);
6883 
6884   ins_cost(INSN_COST * 4);
6885   format %{ "mov  $dst, $con\t# compressed ptr" %}
6886 
6887   ins_encode(aarch64_enc_mov_n(dst, con));
6888 
6889   ins_pipe(ialu_imm);
6890 %}
6891 
6892 // Load Narrow Null Pointer Constant
6893 
6894 instruct loadConN0(iRegNNoSp dst, immN0 con)
6895 %{
6896   match(Set dst con);
6897 
6898   ins_cost(INSN_COST);
6899   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6900 
6901   ins_encode(aarch64_enc_mov_n0(dst, con));
6902 
6903   ins_pipe(ialu_imm);
6904 %}
6905 
6906 // Load Narrow Klass Constant
6907 
6908 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6909 %{
6910   match(Set dst con);
6911 
6912   ins_cost(INSN_COST);
6913   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6914 
6915   ins_encode(aarch64_enc_mov_nk(dst, con));
6916 
6917   ins_pipe(ialu_imm);
6918 %}
6919 
6920 // Load Packed Float Constant
6921 
6922 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6923   match(Set dst con);
6924   ins_cost(INSN_COST * 4);
6925   format %{ "fmovs  $dst, $con"%}
6926   ins_encode %{
6927     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6928   %}
6929 
6930   ins_pipe(fp_imm_s);
6931 %}
6932 
6933 // Load Float Constant
6934 
6935 instruct loadConF(vRegF dst, immF con) %{
6936   match(Set dst con);
6937 
6938   ins_cost(INSN_COST * 4);
6939 
6940   format %{
6941     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6942   %}
6943 
6944   ins_encode %{
6945     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6946   %}
6947 
6948   ins_pipe(fp_load_constant_s);
6949 %}
6950 
6951 // Load Packed Double Constant
6952 
6953 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6954   match(Set dst con);
6955   ins_cost(INSN_COST);
6956   format %{ "fmovd  $dst, $con"%}
6957   ins_encode %{
6958     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6959   %}
6960 
6961   ins_pipe(fp_imm_d);
6962 %}
6963 
6964 // Load Double Constant
6965 
6966 instruct loadConD(vRegD dst, immD con) %{
6967   match(Set dst con);
6968 
6969   ins_cost(INSN_COST * 5);
6970   format %{
6971     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6972   %}
6973 
6974   ins_encode %{
6975     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6976   %}
6977 
6978   ins_pipe(fp_load_constant_d);
6979 %}
6980 
6981 // Store Instructions
6982 
6983 // Store CMS card-mark Immediate
6984 instruct storeimmCM0(immI0 zero, memory mem)
6985 %{
6986   match(Set mem (StoreCM mem zero));
6987   predicate(unnecessary_storestore(n));
6988 
6989   ins_cost(INSN_COST);
6990   format %{ "storestore (elided)\n\t"
6991             "strb zr, $mem\t# byte" %}
6992 
6993   ins_encode(aarch64_enc_strb0(mem));
6994 
6995   ins_pipe(istore_mem);
6996 %}
6997 
6998 // Store CMS card-mark Immediate with intervening StoreStore
6999 // needed when using CMS with no conditional card marking
7000 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7001 %{
7002   match(Set mem (StoreCM mem zero));
7003 
7004   ins_cost(INSN_COST * 2);
7005   format %{ "storestore\n\t"
7006             "dmb ishst"
7007             "\n\tstrb zr, $mem\t# byte" %}
7008 
7009   ins_encode(aarch64_enc_strb0_ordered(mem));
7010 
7011   ins_pipe(istore_mem);
7012 %}
7013 
7014 // Store Byte
7015 instruct storeB(iRegIorL2I src, memory mem)
7016 %{
7017   match(Set mem (StoreB mem src));
7018   predicate(!needs_releasing_store(n));
7019 
7020   ins_cost(INSN_COST);
7021   format %{ "strb  $src, $mem\t# byte" %}
7022 
7023   ins_encode(aarch64_enc_strb(src, mem));
7024 
7025   ins_pipe(istore_reg_mem);
7026 %}
7027 
7028 
7029 instruct storeimmB0(immI0 zero, memory mem)
7030 %{
7031   match(Set mem (StoreB mem zero));
7032   predicate(!needs_releasing_store(n));
7033 
7034   ins_cost(INSN_COST);
7035   format %{ "strb rscractch2, $mem\t# byte" %}
7036 
7037   ins_encode(aarch64_enc_strb0(mem));
7038 
7039   ins_pipe(istore_mem);
7040 %}
7041 
7042 // Store Char/Short
7043 instruct storeC(iRegIorL2I src, memory mem)
7044 %{
7045   match(Set mem (StoreC mem src));
7046   predicate(!needs_releasing_store(n));
7047 
7048   ins_cost(INSN_COST);
7049   format %{ "strh  $src, $mem\t# short" %}
7050 
7051   ins_encode(aarch64_enc_strh(src, mem));
7052 
7053   ins_pipe(istore_reg_mem);
7054 %}
7055 
7056 instruct storeimmC0(immI0 zero, memory mem)
7057 %{
7058   match(Set mem (StoreC mem zero));
7059   predicate(!needs_releasing_store(n));
7060 
7061   ins_cost(INSN_COST);
7062   format %{ "strh  zr, $mem\t# short" %}
7063 
7064   ins_encode(aarch64_enc_strh0(mem));
7065 
7066   ins_pipe(istore_mem);
7067 %}
7068 
7069 // Store Integer
7070 
7071 instruct storeI(iRegIorL2I src, memory mem)
7072 %{
7073   match(Set mem(StoreI mem src));
7074   predicate(!needs_releasing_store(n));
7075 
7076   ins_cost(INSN_COST);
7077   format %{ "strw  $src, $mem\t# int" %}
7078 
7079   ins_encode(aarch64_enc_strw(src, mem));
7080 
7081   ins_pipe(istore_reg_mem);
7082 %}
7083 
7084 instruct storeimmI0(immI0 zero, memory mem)
7085 %{
7086   match(Set mem(StoreI mem zero));
7087   predicate(!needs_releasing_store(n));
7088 
7089   ins_cost(INSN_COST);
7090   format %{ "strw  zr, $mem\t# int" %}
7091 
7092   ins_encode(aarch64_enc_strw0(mem));
7093 
7094   ins_pipe(istore_mem);
7095 %}
7096 
7097 // Store Long (64 bit signed)
7098 instruct storeL(iRegL src, memory mem)
7099 %{
7100   match(Set mem (StoreL mem src));
7101   predicate(!needs_releasing_store(n));
7102 
7103   ins_cost(INSN_COST);
7104   format %{ "str  $src, $mem\t# int" %}
7105 
7106   ins_encode(aarch64_enc_str(src, mem));
7107 
7108   ins_pipe(istore_reg_mem);
7109 %}
7110 
7111 // Store Long (64 bit signed)
7112 instruct storeimmL0(immL0 zero, memory mem)
7113 %{
7114   match(Set mem (StoreL mem zero));
7115   predicate(!needs_releasing_store(n));
7116 
7117   ins_cost(INSN_COST);
7118   format %{ "str  zr, $mem\t# int" %}
7119 
7120   ins_encode(aarch64_enc_str0(mem));
7121 
7122   ins_pipe(istore_mem);
7123 %}
7124 
7125 // Store Pointer
7126 instruct storeP(iRegP src, memory mem)
7127 %{
7128   match(Set mem (StoreP mem src));
7129   predicate(!needs_releasing_store(n));
7130 
7131   ins_cost(INSN_COST);
7132   format %{ "str  $src, $mem\t# ptr" %}
7133 
7134   ins_encode(aarch64_enc_str(src, mem));
7135 
7136   ins_pipe(istore_reg_mem);
7137 %}
7138 
7139 // Store Pointer
7140 instruct storeimmP0(immP0 zero, memory mem)
7141 %{
7142   match(Set mem (StoreP mem zero));
7143   predicate(!needs_releasing_store(n));
7144 
7145   ins_cost(INSN_COST);
7146   format %{ "str zr, $mem\t# ptr" %}
7147 
7148   ins_encode(aarch64_enc_str0(mem));
7149 
7150   ins_pipe(istore_mem);
7151 %}
7152 
7153 // Store Compressed Pointer
7154 instruct storeN(iRegN src, memory mem)
7155 %{
7156   match(Set mem (StoreN mem src));
7157   predicate(!needs_releasing_store(n));
7158 
7159   ins_cost(INSN_COST);
7160   format %{ "strw  $src, $mem\t# compressed ptr" %}
7161 
7162   ins_encode(aarch64_enc_strw(src, mem));
7163 
7164   ins_pipe(istore_reg_mem);
7165 %}
7166 
7167 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7168 %{
7169   match(Set mem (StoreN mem zero));
7170   predicate(Universe::narrow_oop_base() == NULL &&
7171             Universe::narrow_klass_base() == NULL &&
7172             (!needs_releasing_store(n)));
7173 
7174   ins_cost(INSN_COST);
7175   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7176 
7177   ins_encode(aarch64_enc_strw(heapbase, mem));
7178 
7179   ins_pipe(istore_reg_mem);
7180 %}
7181 
7182 // Store Float
7183 instruct storeF(vRegF src, memory mem)
7184 %{
7185   match(Set mem (StoreF mem src));
7186   predicate(!needs_releasing_store(n));
7187 
7188   ins_cost(INSN_COST);
7189   format %{ "strs  $src, $mem\t# float" %}
7190 
7191   ins_encode( aarch64_enc_strs(src, mem) );
7192 
7193   ins_pipe(pipe_class_memory);
7194 %}
7195 
7196 // TODO
7197 // implement storeImmF0 and storeFImmPacked
7198 
7199 // Store Double
7200 instruct storeD(vRegD src, memory mem)
7201 %{
7202   match(Set mem (StoreD mem src));
7203   predicate(!needs_releasing_store(n));
7204 
7205   ins_cost(INSN_COST);
7206   format %{ "strd  $src, $mem\t# double" %}
7207 
7208   ins_encode( aarch64_enc_strd(src, mem) );
7209 
7210   ins_pipe(pipe_class_memory);
7211 %}
7212 
7213 // Store Compressed Klass Pointer
7214 instruct storeNKlass(iRegN src, memory mem)
7215 %{
7216   predicate(!needs_releasing_store(n));
7217   match(Set mem (StoreNKlass mem src));
7218 
7219   ins_cost(INSN_COST);
7220   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7221 
7222   ins_encode(aarch64_enc_strw(src, mem));
7223 
7224   ins_pipe(istore_reg_mem);
7225 %}
7226 
7227 // TODO
7228 // implement storeImmD0 and storeDImmPacked
7229 
7230 // prefetch instructions
7231 // Must be safe to execute with invalid address (cannot fault).
7232 
7233 instruct prefetchalloc( memory mem ) %{
7234   match(PrefetchAllocation mem);
7235 
7236   ins_cost(INSN_COST);
7237   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7238 
7239   ins_encode( aarch64_enc_prefetchw(mem) );
7240 
7241   ins_pipe(iload_prefetch);
7242 %}
7243 
7244 //  ---------------- volatile loads and stores ----------------
7245 
7246 // Load Byte (8 bit signed)
7247 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7248 %{
7249   match(Set dst (LoadB mem));
7250 
7251   ins_cost(VOLATILE_REF_COST);
7252   format %{ "ldarsb  $dst, $mem\t# byte" %}
7253 
7254   ins_encode(aarch64_enc_ldarsb(dst, mem));
7255 
7256   ins_pipe(pipe_serial);
7257 %}
7258 
7259 // Load Byte (8 bit signed) into long
7260 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7261 %{
7262   match(Set dst (ConvI2L (LoadB mem)));
7263 
7264   ins_cost(VOLATILE_REF_COST);
7265   format %{ "ldarsb  $dst, $mem\t# byte" %}
7266 
7267   ins_encode(aarch64_enc_ldarsb(dst, mem));
7268 
7269   ins_pipe(pipe_serial);
7270 %}
7271 
7272 // Load Byte (8 bit unsigned)
7273 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7274 %{
7275   match(Set dst (LoadUB mem));
7276 
7277   ins_cost(VOLATILE_REF_COST);
7278   format %{ "ldarb  $dst, $mem\t# byte" %}
7279 
7280   ins_encode(aarch64_enc_ldarb(dst, mem));
7281 
7282   ins_pipe(pipe_serial);
7283 %}
7284 
7285 // Load Byte (8 bit unsigned) into long
7286 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7287 %{
7288   match(Set dst (ConvI2L (LoadUB mem)));
7289 
7290   ins_cost(VOLATILE_REF_COST);
7291   format %{ "ldarb  $dst, $mem\t# byte" %}
7292 
7293   ins_encode(aarch64_enc_ldarb(dst, mem));
7294 
7295   ins_pipe(pipe_serial);
7296 %}
7297 
7298 // Load Short (16 bit signed)
7299 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7300 %{
7301   match(Set dst (LoadS mem));
7302 
7303   ins_cost(VOLATILE_REF_COST);
7304   format %{ "ldarshw  $dst, $mem\t# short" %}
7305 
7306   ins_encode(aarch64_enc_ldarshw(dst, mem));
7307 
7308   ins_pipe(pipe_serial);
7309 %}
7310 
7311 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7312 %{
7313   match(Set dst (LoadUS mem));
7314 
7315   ins_cost(VOLATILE_REF_COST);
7316   format %{ "ldarhw  $dst, $mem\t# short" %}
7317 
7318   ins_encode(aarch64_enc_ldarhw(dst, mem));
7319 
7320   ins_pipe(pipe_serial);
7321 %}
7322 
7323 // Load Short/Char (16 bit unsigned) into long
7324 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7325 %{
7326   match(Set dst (ConvI2L (LoadUS mem)));
7327 
7328   ins_cost(VOLATILE_REF_COST);
7329   format %{ "ldarh  $dst, $mem\t# short" %}
7330 
7331   ins_encode(aarch64_enc_ldarh(dst, mem));
7332 
7333   ins_pipe(pipe_serial);
7334 %}
7335 
7336 // Load Short/Char (16 bit signed) into long
7337 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7338 %{
7339   match(Set dst (ConvI2L (LoadS mem)));
7340 
7341   ins_cost(VOLATILE_REF_COST);
7342   format %{ "ldarh  $dst, $mem\t# short" %}
7343 
7344   ins_encode(aarch64_enc_ldarsh(dst, mem));
7345 
7346   ins_pipe(pipe_serial);
7347 %}
7348 
7349 // Load Integer (32 bit signed)
7350 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7351 %{
7352   match(Set dst (LoadI mem));
7353 
7354   ins_cost(VOLATILE_REF_COST);
7355   format %{ "ldarw  $dst, $mem\t# int" %}
7356 
7357   ins_encode(aarch64_enc_ldarw(dst, mem));
7358 
7359   ins_pipe(pipe_serial);
7360 %}
7361 
7362 // Load Integer (32 bit unsigned) into long
7363 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7364 %{
7365   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7366 
7367   ins_cost(VOLATILE_REF_COST);
7368   format %{ "ldarw  $dst, $mem\t# int" %}
7369 
7370   ins_encode(aarch64_enc_ldarw(dst, mem));
7371 
7372   ins_pipe(pipe_serial);
7373 %}
7374 
7375 // Load Long (64 bit signed)
7376 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7377 %{
7378   match(Set dst (LoadL mem));
7379 
7380   ins_cost(VOLATILE_REF_COST);
7381   format %{ "ldar  $dst, $mem\t# int" %}
7382 
7383   ins_encode(aarch64_enc_ldar(dst, mem));
7384 
7385   ins_pipe(pipe_serial);
7386 %}
7387 
7388 // Load Pointer
7389 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7390 %{
7391   match(Set dst (LoadP mem));
7392 
7393   ins_cost(VOLATILE_REF_COST);
7394   format %{ "ldar  $dst, $mem\t# ptr" %}
7395 
7396   ins_encode(aarch64_enc_ldar(dst, mem));
7397 
7398   ins_pipe(pipe_serial);
7399 %}
7400 
7401 // Load Compressed Pointer
7402 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7403 %{
7404   match(Set dst (LoadN mem));
7405 
7406   ins_cost(VOLATILE_REF_COST);
7407   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7408 
7409   ins_encode(aarch64_enc_ldarw(dst, mem));
7410 
7411   ins_pipe(pipe_serial);
7412 %}
7413 
7414 // Load Float
7415 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7416 %{
7417   match(Set dst (LoadF mem));
7418 
7419   ins_cost(VOLATILE_REF_COST);
7420   format %{ "ldars  $dst, $mem\t# float" %}
7421 
7422   ins_encode( aarch64_enc_fldars(dst, mem) );
7423 
7424   ins_pipe(pipe_serial);
7425 %}
7426 
7427 // Load Double
7428 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7429 %{
7430   match(Set dst (LoadD mem));
7431 
7432   ins_cost(VOLATILE_REF_COST);
7433   format %{ "ldard  $dst, $mem\t# double" %}
7434 
7435   ins_encode( aarch64_enc_fldard(dst, mem) );
7436 
7437   ins_pipe(pipe_serial);
7438 %}
7439 
7440 // Store Byte
7441 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7442 %{
7443   match(Set mem (StoreB mem src));
7444 
7445   ins_cost(VOLATILE_REF_COST);
7446   format %{ "stlrb  $src, $mem\t# byte" %}
7447 
7448   ins_encode(aarch64_enc_stlrb(src, mem));
7449 
7450   ins_pipe(pipe_class_memory);
7451 %}
7452 
7453 // Store Char/Short
7454 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7455 %{
7456   match(Set mem (StoreC mem src));
7457 
7458   ins_cost(VOLATILE_REF_COST);
7459   format %{ "stlrh  $src, $mem\t# short" %}
7460 
7461   ins_encode(aarch64_enc_stlrh(src, mem));
7462 
7463   ins_pipe(pipe_class_memory);
7464 %}
7465 
7466 // Store Integer
7467 
7468 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7469 %{
7470   match(Set mem(StoreI mem src));
7471 
7472   ins_cost(VOLATILE_REF_COST);
7473   format %{ "stlrw  $src, $mem\t# int" %}
7474 
7475   ins_encode(aarch64_enc_stlrw(src, mem));
7476 
7477   ins_pipe(pipe_class_memory);
7478 %}
7479 
7480 // Store Long (64 bit signed)
7481 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7482 %{
7483   match(Set mem (StoreL mem src));
7484 
7485   ins_cost(VOLATILE_REF_COST);
7486   format %{ "stlr  $src, $mem\t# int" %}
7487 
7488   ins_encode(aarch64_enc_stlr(src, mem));
7489 
7490   ins_pipe(pipe_class_memory);
7491 %}
7492 
7493 // Store Pointer
7494 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7495 %{
7496   match(Set mem (StoreP mem src));
7497 
7498   ins_cost(VOLATILE_REF_COST);
7499   format %{ "stlr  $src, $mem\t# ptr" %}
7500 
7501   ins_encode(aarch64_enc_stlr(src, mem));
7502 
7503   ins_pipe(pipe_class_memory);
7504 %}
7505 
7506 // Store Compressed Pointer
7507 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7508 %{
7509   match(Set mem (StoreN mem src));
7510 
7511   ins_cost(VOLATILE_REF_COST);
7512   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7513 
7514   ins_encode(aarch64_enc_stlrw(src, mem));
7515 
7516   ins_pipe(pipe_class_memory);
7517 %}
7518 
7519 // Store Float
7520 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7521 %{
7522   match(Set mem (StoreF mem src));
7523 
7524   ins_cost(VOLATILE_REF_COST);
7525   format %{ "stlrs  $src, $mem\t# float" %}
7526 
7527   ins_encode( aarch64_enc_fstlrs(src, mem) );
7528 
7529   ins_pipe(pipe_class_memory);
7530 %}
7531 
7532 // TODO
7533 // implement storeImmF0 and storeFImmPacked
7534 
7535 // Store Double
7536 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7537 %{
7538   match(Set mem (StoreD mem src));
7539 
7540   ins_cost(VOLATILE_REF_COST);
7541   format %{ "stlrd  $src, $mem\t# double" %}
7542 
7543   ins_encode( aarch64_enc_fstlrd(src, mem) );
7544 
7545   ins_pipe(pipe_class_memory);
7546 %}
7547 
7548 //  ---------------- end of volatile loads and stores ----------------
7549 
7550 // ============================================================================
7551 // BSWAP Instructions
7552 
7553 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7554   match(Set dst (ReverseBytesI src));
7555 
7556   ins_cost(INSN_COST);
7557   format %{ "revw  $dst, $src" %}
7558 
7559   ins_encode %{
7560     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7561   %}
7562 
7563   ins_pipe(ialu_reg);
7564 %}
7565 
7566 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7567   match(Set dst (ReverseBytesL src));
7568 
7569   ins_cost(INSN_COST);
7570   format %{ "rev  $dst, $src" %}
7571 
7572   ins_encode %{
7573     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7574   %}
7575 
7576   ins_pipe(ialu_reg);
7577 %}
7578 
7579 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7580   match(Set dst (ReverseBytesUS src));
7581 
7582   ins_cost(INSN_COST);
7583   format %{ "rev16w  $dst, $src" %}
7584 
7585   ins_encode %{
7586     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7587   %}
7588 
7589   ins_pipe(ialu_reg);
7590 %}
7591 
7592 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7593   match(Set dst (ReverseBytesS src));
7594 
7595   ins_cost(INSN_COST);
7596   format %{ "rev16w  $dst, $src\n\t"
7597             "sbfmw $dst, $dst, #0, #15" %}
7598 
7599   ins_encode %{
7600     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7601     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7602   %}
7603 
7604   ins_pipe(ialu_reg);
7605 %}
7606 
7607 // ============================================================================
7608 // Zero Count Instructions
7609 
7610 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7611   match(Set dst (CountLeadingZerosI src));
7612 
7613   ins_cost(INSN_COST);
7614   format %{ "clzw  $dst, $src" %}
7615   ins_encode %{
7616     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7617   %}
7618 
7619   ins_pipe(ialu_reg);
7620 %}
7621 
7622 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7623   match(Set dst (CountLeadingZerosL src));
7624 
7625   ins_cost(INSN_COST);
7626   format %{ "clz   $dst, $src" %}
7627   ins_encode %{
7628     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7629   %}
7630 
7631   ins_pipe(ialu_reg);
7632 %}
7633 
7634 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7635   match(Set dst (CountTrailingZerosI src));
7636 
7637   ins_cost(INSN_COST * 2);
7638   format %{ "rbitw  $dst, $src\n\t"
7639             "clzw   $dst, $dst" %}
7640   ins_encode %{
7641     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7642     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7643   %}
7644 
7645   ins_pipe(ialu_reg);
7646 %}
7647 
7648 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7649   match(Set dst (CountTrailingZerosL src));
7650 
7651   ins_cost(INSN_COST * 2);
7652   format %{ "rbit   $dst, $src\n\t"
7653             "clz    $dst, $dst" %}
7654   ins_encode %{
7655     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7656     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7657   %}
7658 
7659   ins_pipe(ialu_reg);
7660 %}
7661 
7662 //---------- Population Count Instructions -------------------------------------
7663 //
7664 
7665 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7666   predicate(UsePopCountInstruction);
7667   match(Set dst (PopCountI src));
7668   effect(TEMP tmp);
7669   ins_cost(INSN_COST * 13);
7670 
7671   format %{ "movw   $src, $src\n\t"
7672             "mov    $tmp, $src\t# vector (1D)\n\t"
7673             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7674             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7675             "mov    $dst, $tmp\t# vector (1D)" %}
7676   ins_encode %{
7677     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7678     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7679     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7680     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7681     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7682   %}
7683 
7684   ins_pipe(pipe_class_default);
7685 %}
7686 
7687 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7688   predicate(UsePopCountInstruction);
7689   match(Set dst (PopCountI (LoadI mem)));
7690   effect(TEMP tmp);
7691   ins_cost(INSN_COST * 13);
7692 
7693   format %{ "ldrs   $tmp, $mem\n\t"
7694             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7695             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7696             "mov    $dst, $tmp\t# vector (1D)" %}
7697   ins_encode %{
7698     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7699     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7700                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7701     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7702     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7703     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7704   %}
7705 
7706   ins_pipe(pipe_class_default);
7707 %}
7708 
7709 // Note: Long.bitCount(long) returns an int.
7710 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7711   predicate(UsePopCountInstruction);
7712   match(Set dst (PopCountL src));
7713   effect(TEMP tmp);
7714   ins_cost(INSN_COST * 13);
7715 
7716   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7717             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7718             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7719             "mov    $dst, $tmp\t# vector (1D)" %}
7720   ins_encode %{
7721     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7722     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7723     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7724     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7725   %}
7726 
7727   ins_pipe(pipe_class_default);
7728 %}
7729 
7730 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7731   predicate(UsePopCountInstruction);
7732   match(Set dst (PopCountL (LoadL mem)));
7733   effect(TEMP tmp);
7734   ins_cost(INSN_COST * 13);
7735 
7736   format %{ "ldrd   $tmp, $mem\n\t"
7737             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7738             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7739             "mov    $dst, $tmp\t# vector (1D)" %}
7740   ins_encode %{
7741     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7742     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7743                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7744     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7745     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7746     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7747   %}
7748 
7749   ins_pipe(pipe_class_default);
7750 %}
7751 
7752 // ============================================================================
7753 // MemBar Instruction
7754 
7755 instruct load_fence() %{
7756   match(LoadFence);
7757   ins_cost(VOLATILE_REF_COST);
7758 
7759   format %{ "load_fence" %}
7760 
7761   ins_encode %{
7762     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7763   %}
7764   ins_pipe(pipe_serial);
7765 %}
7766 
7767 instruct unnecessary_membar_acquire() %{
7768   predicate(unnecessary_acquire(n));
7769   match(MemBarAcquire);
7770   ins_cost(0);
7771 
7772   format %{ "membar_acquire (elided)" %}
7773 
7774   ins_encode %{
7775     __ block_comment("membar_acquire (elided)");
7776   %}
7777 
7778   ins_pipe(pipe_class_empty);
7779 %}
7780 
7781 instruct membar_acquire() %{
7782   match(MemBarAcquire);
7783   ins_cost(VOLATILE_REF_COST);
7784 
7785   format %{ "membar_acquire\n\t"
7786             "dmb ish" %}
7787 
7788   ins_encode %{
7789     __ block_comment("membar_acquire");
7790     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7791   %}
7792 
7793   ins_pipe(pipe_serial);
7794 %}
7795 
7796 
7797 instruct membar_acquire_lock() %{
7798   match(MemBarAcquireLock);
7799   ins_cost(VOLATILE_REF_COST);
7800 
7801   format %{ "membar_acquire_lock (elided)" %}
7802 
7803   ins_encode %{
7804     __ block_comment("membar_acquire_lock (elided)");
7805   %}
7806 
7807   ins_pipe(pipe_serial);
7808 %}
7809 
7810 instruct store_fence() %{
7811   match(StoreFence);
7812   ins_cost(VOLATILE_REF_COST);
7813 
7814   format %{ "store_fence" %}
7815 
7816   ins_encode %{
7817     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7818   %}
7819   ins_pipe(pipe_serial);
7820 %}
7821 
7822 instruct unnecessary_membar_release() %{
7823   predicate(unnecessary_release(n));
7824   match(MemBarRelease);
7825   ins_cost(0);
7826 
7827   format %{ "membar_release (elided)" %}
7828 
7829   ins_encode %{
7830     __ block_comment("membar_release (elided)");
7831   %}
7832   ins_pipe(pipe_serial);
7833 %}
7834 
7835 instruct membar_release() %{
7836   match(MemBarRelease);
7837   ins_cost(VOLATILE_REF_COST);
7838 
7839   format %{ "membar_release\n\t"
7840             "dmb ish" %}
7841 
7842   ins_encode %{
7843     __ block_comment("membar_release");
7844     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7845   %}
7846   ins_pipe(pipe_serial);
7847 %}
7848 
7849 instruct membar_storestore() %{
7850   match(MemBarStoreStore);
7851   ins_cost(VOLATILE_REF_COST);
7852 
7853   format %{ "MEMBAR-store-store" %}
7854 
7855   ins_encode %{
7856     __ membar(Assembler::StoreStore);
7857   %}
7858   ins_pipe(pipe_serial);
7859 %}
7860 
7861 instruct membar_release_lock() %{
7862   match(MemBarReleaseLock);
7863   ins_cost(VOLATILE_REF_COST);
7864 
7865   format %{ "membar_release_lock (elided)" %}
7866 
7867   ins_encode %{
7868     __ block_comment("membar_release_lock (elided)");
7869   %}
7870 
7871   ins_pipe(pipe_serial);
7872 %}
7873 
7874 instruct unnecessary_membar_volatile() %{
7875   predicate(unnecessary_volatile(n));
7876   match(MemBarVolatile);
7877   ins_cost(0);
7878 
7879   format %{ "membar_volatile (elided)" %}
7880 
7881   ins_encode %{
7882     __ block_comment("membar_volatile (elided)");
7883   %}
7884 
7885   ins_pipe(pipe_serial);
7886 %}
7887 
7888 instruct membar_volatile() %{
7889   match(MemBarVolatile);
7890   ins_cost(VOLATILE_REF_COST*100);
7891 
7892   format %{ "membar_volatile\n\t"
7893              "dmb ish"%}
7894 
7895   ins_encode %{
7896     __ block_comment("membar_volatile");
7897     __ membar(Assembler::StoreLoad);
7898   %}
7899 
7900   ins_pipe(pipe_serial);
7901 %}
7902 
7903 // ============================================================================
7904 // Cast/Convert Instructions
7905 
7906 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7907   match(Set dst (CastX2P src));
7908 
7909   ins_cost(INSN_COST);
7910   format %{ "mov $dst, $src\t# long -> ptr" %}
7911 
7912   ins_encode %{
7913     if ($dst$$reg != $src$$reg) {
7914       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7915     }
7916   %}
7917 
7918   ins_pipe(ialu_reg);
7919 %}
7920 
7921 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7922   match(Set dst (CastP2X src));
7923 
7924   ins_cost(INSN_COST);
7925   format %{ "mov $dst, $src\t# ptr -> long" %}
7926 
7927   ins_encode %{
7928     if ($dst$$reg != $src$$reg) {
7929       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7930     }
7931   %}
7932 
7933   ins_pipe(ialu_reg);
7934 %}
7935 
7936 // Convert oop into int for vectors alignment masking
7937 instruct convP2I(iRegINoSp dst, iRegP src) %{
7938   match(Set dst (ConvL2I (CastP2X src)));
7939 
7940   ins_cost(INSN_COST);
7941   format %{ "movw $dst, $src\t# ptr -> int" %}
7942   ins_encode %{
7943     __ movw($dst$$Register, $src$$Register);
7944   %}
7945 
7946   ins_pipe(ialu_reg);
7947 %}
7948 
7949 // Convert compressed oop into int for vectors alignment masking
7950 // in case of 32bit oops (heap < 4Gb).
7951 instruct convN2I(iRegINoSp dst, iRegN src)
7952 %{
7953   predicate(Universe::narrow_oop_shift() == 0);
7954   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7955 
7956   ins_cost(INSN_COST);
7957   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7958   ins_encode %{
7959     __ movw($dst$$Register, $src$$Register);
7960   %}
7961 
7962   ins_pipe(ialu_reg);
7963 %}
7964 
7965 
7966 // Convert oop pointer into compressed form
7967 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7968   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7969   match(Set dst (EncodeP src));
7970   effect(KILL cr);
7971   ins_cost(INSN_COST * 3);
7972   format %{ "encode_heap_oop $dst, $src" %}
7973   ins_encode %{
7974     Register s = $src$$Register;
7975     Register d = $dst$$Register;
7976     __ encode_heap_oop(d, s);
7977   %}
7978   ins_pipe(ialu_reg);
7979 %}
7980 
7981 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7982   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7983   match(Set dst (EncodeP src));
7984   ins_cost(INSN_COST * 3);
7985   format %{ "encode_heap_oop_not_null $dst, $src" %}
7986   ins_encode %{
7987     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7988   %}
7989   ins_pipe(ialu_reg);
7990 %}
7991 
7992 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7993   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7994             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7995   match(Set dst (DecodeN src));
7996   ins_cost(INSN_COST * 3);
7997   format %{ "decode_heap_oop $dst, $src" %}
7998   ins_encode %{
7999     Register s = $src$$Register;
8000     Register d = $dst$$Register;
8001     __ decode_heap_oop(d, s);
8002   %}
8003   ins_pipe(ialu_reg);
8004 %}
8005 
8006 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8007   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8008             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8009   match(Set dst (DecodeN src));
8010   ins_cost(INSN_COST * 3);
8011   format %{ "decode_heap_oop_not_null $dst, $src" %}
8012   ins_encode %{
8013     Register s = $src$$Register;
8014     Register d = $dst$$Register;
8015     __ decode_heap_oop_not_null(d, s);
8016   %}
8017   ins_pipe(ialu_reg);
8018 %}
8019 
8020 // n.b. AArch64 implementations of encode_klass_not_null and
8021 // decode_klass_not_null do not modify the flags register so, unlike
8022 // Intel, we don't kill CR as a side effect here
8023 
8024 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8025   match(Set dst (EncodePKlass src));
8026 
8027   ins_cost(INSN_COST * 3);
8028   format %{ "encode_klass_not_null $dst,$src" %}
8029 
8030   ins_encode %{
8031     Register src_reg = as_Register($src$$reg);
8032     Register dst_reg = as_Register($dst$$reg);
8033     __ encode_klass_not_null(dst_reg, src_reg);
8034   %}
8035 
8036    ins_pipe(ialu_reg);
8037 %}
8038 
8039 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8040   match(Set dst (DecodeNKlass src));
8041 
8042   ins_cost(INSN_COST * 3);
8043   format %{ "decode_klass_not_null $dst,$src" %}
8044 
8045   ins_encode %{
8046     Register src_reg = as_Register($src$$reg);
8047     Register dst_reg = as_Register($dst$$reg);
8048     if (dst_reg != src_reg) {
8049       __ decode_klass_not_null(dst_reg, src_reg);
8050     } else {
8051       __ decode_klass_not_null(dst_reg);
8052     }
8053   %}
8054 
8055    ins_pipe(ialu_reg);
8056 %}
8057 
8058 instruct checkCastPP(iRegPNoSp dst)
8059 %{
8060   match(Set dst (CheckCastPP dst));
8061 
8062   size(0);
8063   format %{ "# checkcastPP of $dst" %}
8064   ins_encode(/* empty encoding */);
8065   ins_pipe(pipe_class_empty);
8066 %}
8067 
8068 instruct castPP(iRegPNoSp dst)
8069 %{
8070   match(Set dst (CastPP dst));
8071 
8072   size(0);
8073   format %{ "# castPP of $dst" %}
8074   ins_encode(/* empty encoding */);
8075   ins_pipe(pipe_class_empty);
8076 %}
8077 
8078 instruct castII(iRegI dst)
8079 %{
8080   match(Set dst (CastII dst));
8081 
8082   size(0);
8083   format %{ "# castII of $dst" %}
8084   ins_encode(/* empty encoding */);
8085   ins_cost(0);
8086   ins_pipe(pipe_class_empty);
8087 %}
8088 
8089 // ============================================================================
8090 // Atomic operation instructions
8091 //
8092 // Intel and SPARC both implement Ideal Node LoadPLocked and
8093 // Store{PIL}Conditional instructions using a normal load for the
8094 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8095 //
8096 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8097 // pair to lock object allocations from Eden space when not using
8098 // TLABs.
8099 //
8100 // There does not appear to be a Load{IL}Locked Ideal Node and the
8101 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8102 // and to use StoreIConditional only for 32-bit and StoreLConditional
8103 // only for 64-bit.
8104 //
8105 // We implement LoadPLocked and StorePLocked instructions using,
8106 // respectively the AArch64 hw load-exclusive and store-conditional
8107 // instructions. Whereas we must implement each of
8108 // Store{IL}Conditional using a CAS which employs a pair of
8109 // instructions comprising a load-exclusive followed by a
8110 // store-conditional.
8111 
8112 
8113 // Locked-load (linked load) of the current heap-top
8114 // used when updating the eden heap top
8115 // implemented using ldaxr on AArch64
8116 
8117 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8118 %{
8119   match(Set dst (LoadPLocked mem));
8120 
8121   ins_cost(VOLATILE_REF_COST);
8122 
8123   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8124 
8125   ins_encode(aarch64_enc_ldaxr(dst, mem));
8126 
8127   ins_pipe(pipe_serial);
8128 %}
8129 
8130 // Conditional-store of the updated heap-top.
8131 // Used during allocation of the shared heap.
8132 // Sets flag (EQ) on success.
8133 // implemented using stlxr on AArch64.
8134 
8135 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8136 %{
8137   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8138 
8139   ins_cost(VOLATILE_REF_COST);
8140 
8141  // TODO
8142  // do we need to do a store-conditional release or can we just use a
8143  // plain store-conditional?
8144 
8145   format %{
8146     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8147     "cmpw rscratch1, zr\t# EQ on successful write"
8148   %}
8149 
8150   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8151 
8152   ins_pipe(pipe_serial);
8153 %}
8154 
8155 
8156 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8157 // when attempting to rebias a lock towards the current thread.  We
8158 // must use the acquire form of cmpxchg in order to guarantee acquire
8159 // semantics in this case.
8160 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8161 %{
8162   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8163 
8164   ins_cost(VOLATILE_REF_COST);
8165 
8166   format %{
8167     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8168     "cmpw rscratch1, zr\t# EQ on successful write"
8169   %}
8170 
8171   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8172 
8173   ins_pipe(pipe_slow);
8174 %}
8175 
8176 // storeIConditional also has acquire semantics, for no better reason
8177 // than matching storeLConditional.  At the time of writing this
8178 // comment storeIConditional was not used anywhere by AArch64.
8179 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8180 %{
8181   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8182 
8183   ins_cost(VOLATILE_REF_COST);
8184 
8185   format %{
8186     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8187     "cmpw rscratch1, zr\t# EQ on successful write"
8188   %}
8189 
8190   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8191 
8192   ins_pipe(pipe_slow);
8193 %}
8194 
8195 // standard CompareAndSwapX when we are using barriers
8196 // these have higher priority than the rules selected by a predicate
8197 
8198 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8199 // can't match them
8200 
8201 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8202 
8203   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8204   ins_cost(2 * VOLATILE_REF_COST);
8205 
8206   effect(KILL cr);
8207 
8208   format %{
8209     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8210     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8211   %}
8212 
8213   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8214             aarch64_enc_cset_eq(res));
8215 
8216   ins_pipe(pipe_slow);
8217 %}
8218 
8219 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8220 
8221   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8222   ins_cost(2 * VOLATILE_REF_COST);
8223 
8224   effect(KILL cr);
8225 
8226   format %{
8227     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8228     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8229   %}
8230 
8231   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8232             aarch64_enc_cset_eq(res));
8233 
8234   ins_pipe(pipe_slow);
8235 %}
8236 
8237 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8238 
8239   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8240   ins_cost(2 * VOLATILE_REF_COST);
8241 
8242   effect(KILL cr);
8243 
8244  format %{
8245     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8246     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8247  %}
8248 
8249  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8250             aarch64_enc_cset_eq(res));
8251 
8252   ins_pipe(pipe_slow);
8253 %}
8254 
8255 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8256 
8257   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8258   ins_cost(2 * VOLATILE_REF_COST);
8259 
8260   effect(KILL cr);
8261 
8262  format %{
8263     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8264     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8265  %}
8266 
8267  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8268             aarch64_enc_cset_eq(res));
8269 
8270   ins_pipe(pipe_slow);
8271 %}
8272 
8273 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8274 
8275   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8276   ins_cost(2 * VOLATILE_REF_COST);
8277 
8278   effect(KILL cr);
8279 
8280  format %{
8281     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8282     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8283  %}
8284 
8285  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8286             aarch64_enc_cset_eq(res));
8287 
8288   ins_pipe(pipe_slow);
8289 %}
8290 
8291 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8292 
8293   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8294   ins_cost(2 * VOLATILE_REF_COST);
8295 
8296   effect(KILL cr);
8297 
8298  format %{
8299     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8300     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8301  %}
8302 
8303  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8304             aarch64_enc_cset_eq(res));
8305 
8306   ins_pipe(pipe_slow);
8307 %}
8308 
8309 // alternative CompareAndSwapX when we are eliding barriers
8310 
8311 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8312 
8313   predicate(needs_acquiring_load_exclusive(n));
8314   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8315   ins_cost(VOLATILE_REF_COST);
8316 
8317   effect(KILL cr);
8318 
8319   format %{
8320     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8321     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8322   %}
8323 
8324   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8325             aarch64_enc_cset_eq(res));
8326 
8327   ins_pipe(pipe_slow);
8328 %}
8329 
8330 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8331 
8332   predicate(needs_acquiring_load_exclusive(n));
8333   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8334   ins_cost(VOLATILE_REF_COST);
8335 
8336   effect(KILL cr);
8337 
8338   format %{
8339     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8340     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8341   %}
8342 
8343   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8344             aarch64_enc_cset_eq(res));
8345 
8346   ins_pipe(pipe_slow);
8347 %}
8348 
8349 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8350 
8351   predicate(needs_acquiring_load_exclusive(n));
8352   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8353   ins_cost(VOLATILE_REF_COST);
8354 
8355   effect(KILL cr);
8356 
8357  format %{
8358     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8359     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8360  %}
8361 
8362  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8363             aarch64_enc_cset_eq(res));
8364 
8365   ins_pipe(pipe_slow);
8366 %}
8367 
8368 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8369 
8370   predicate(needs_acquiring_load_exclusive(n));
8371   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8372   ins_cost(VOLATILE_REF_COST);
8373 
8374   effect(KILL cr);
8375 
8376  format %{
8377     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8378     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8379  %}
8380 
8381  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8382             aarch64_enc_cset_eq(res));
8383 
8384   ins_pipe(pipe_slow);
8385 %}
8386 
8387 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8388 
8389   predicate(needs_acquiring_load_exclusive(n));
8390   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8391   ins_cost(VOLATILE_REF_COST);
8392 
8393   effect(KILL cr);
8394 
8395  format %{
8396     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8397     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8398  %}
8399 
8400  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8401             aarch64_enc_cset_eq(res));
8402 
8403   ins_pipe(pipe_slow);
8404 %}
8405 
8406 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8407 
8408   predicate(needs_acquiring_load_exclusive(n));
8409   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8410   ins_cost(VOLATILE_REF_COST);
8411 
8412   effect(KILL cr);
8413 
8414  format %{
8415     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8416     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8417  %}
8418 
8419  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8420             aarch64_enc_cset_eq(res));
8421 
8422   ins_pipe(pipe_slow);
8423 %}
8424 
8425 
8426 // ---------------------------------------------------------------------
8427 
8428 
8429 // BEGIN This section of the file is automatically generated. Do not edit --------------
8430 
8431 // Sundry CAS operations.  Note that release is always true,
8432 // regardless of the memory ordering of the CAS.  This is because we
8433 // need the volatile case to be sequentially consistent but there is
8434 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8435 // can't check the type of memory ordering here, so we always emit a
8436 // STLXR.
8437 
8438 // This section is generated from aarch64_ad_cas.m4
8439 
8440 
8441 
8442 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8443   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8444   ins_cost(2 * VOLATILE_REF_COST);
8445   effect(TEMP_DEF res, KILL cr);
8446   format %{
8447     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8448   %}
8449   ins_encode %{
8450     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8451                Assembler::byte, /*acquire*/ false, /*release*/ true,
8452                /*weak*/ false, $res$$Register);
8453     __ sxtbw($res$$Register, $res$$Register);
8454   %}
8455   ins_pipe(pipe_slow);
8456 %}
8457 
8458 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8459   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8460   ins_cost(2 * VOLATILE_REF_COST);
8461   effect(TEMP_DEF res, KILL cr);
8462   format %{
8463     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8464   %}
8465   ins_encode %{
8466     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8467                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8468                /*weak*/ false, $res$$Register);
8469     __ sxthw($res$$Register, $res$$Register);
8470   %}
8471   ins_pipe(pipe_slow);
8472 %}
8473 
8474 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8475   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8476   ins_cost(2 * VOLATILE_REF_COST);
8477   effect(TEMP_DEF res, KILL cr);
8478   format %{
8479     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8480   %}
8481   ins_encode %{
8482     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8483                Assembler::word, /*acquire*/ false, /*release*/ true,
8484                /*weak*/ false, $res$$Register);
8485   %}
8486   ins_pipe(pipe_slow);
8487 %}
8488 
8489 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8490   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8491   ins_cost(2 * VOLATILE_REF_COST);
8492   effect(TEMP_DEF res, KILL cr);
8493   format %{
8494     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8495   %}
8496   ins_encode %{
8497     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8498                Assembler::xword, /*acquire*/ false, /*release*/ true,
8499                /*weak*/ false, $res$$Register);
8500   %}
8501   ins_pipe(pipe_slow);
8502 %}
8503 
8504 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8505   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8506   ins_cost(2 * VOLATILE_REF_COST);
8507   effect(TEMP_DEF res, KILL cr);
8508   format %{
8509     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8510   %}
8511   ins_encode %{
8512     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8513                Assembler::word, /*acquire*/ false, /*release*/ true,
8514                /*weak*/ false, $res$$Register);
8515   %}
8516   ins_pipe(pipe_slow);
8517 %}
8518 
8519 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8520   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8521   ins_cost(2 * VOLATILE_REF_COST);
8522   effect(TEMP_DEF res, KILL cr);
8523   format %{
8524     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8525   %}
8526   ins_encode %{
8527     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8528                Assembler::xword, /*acquire*/ false, /*release*/ true,
8529                /*weak*/ false, $res$$Register);
8530   %}
8531   ins_pipe(pipe_slow);
8532 %}
8533 
8534 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8535   predicate(needs_acquiring_load_exclusive(n));
8536   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8537   ins_cost(VOLATILE_REF_COST);
8538   effect(TEMP_DEF res, KILL cr);
8539   format %{
8540     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8541   %}
8542   ins_encode %{
8543     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8544                Assembler::byte, /*acquire*/ true, /*release*/ true,
8545                /*weak*/ false, $res$$Register);
8546     __ sxtbw($res$$Register, $res$$Register);
8547   %}
8548   ins_pipe(pipe_slow);
8549 %}
8550 
8551 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8552   predicate(needs_acquiring_load_exclusive(n));
8553   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8554   ins_cost(VOLATILE_REF_COST);
8555   effect(TEMP_DEF res, KILL cr);
8556   format %{
8557     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8558   %}
8559   ins_encode %{
8560     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8561                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8562                /*weak*/ false, $res$$Register);
8563     __ sxthw($res$$Register, $res$$Register);
8564   %}
8565   ins_pipe(pipe_slow);
8566 %}
8567 
8568 
8569 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8570   predicate(needs_acquiring_load_exclusive(n));
8571   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8572   ins_cost(VOLATILE_REF_COST);
8573   effect(TEMP_DEF res, KILL cr);
8574   format %{
8575     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8576   %}
8577   ins_encode %{
8578     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8579                Assembler::word, /*acquire*/ true, /*release*/ true,
8580                /*weak*/ false, $res$$Register);
8581   %}
8582   ins_pipe(pipe_slow);
8583 %}
8584 
8585 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8586   predicate(needs_acquiring_load_exclusive(n));
8587   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8588   ins_cost(VOLATILE_REF_COST);
8589   effect(TEMP_DEF res, KILL cr);
8590   format %{
8591     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8592   %}
8593   ins_encode %{
8594     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8595                Assembler::xword, /*acquire*/ true, /*release*/ true,
8596                /*weak*/ false, $res$$Register);
8597   %}
8598   ins_pipe(pipe_slow);
8599 %}
8600 
8601 
8602 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8603   predicate(needs_acquiring_load_exclusive(n));
8604   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8605   ins_cost(VOLATILE_REF_COST);
8606   effect(TEMP_DEF res, KILL cr);
8607   format %{
8608     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8609   %}
8610   ins_encode %{
8611     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8612                Assembler::word, /*acquire*/ true, /*release*/ true,
8613                /*weak*/ false, $res$$Register);
8614   %}
8615   ins_pipe(pipe_slow);
8616 %}
8617 
8618 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8619   predicate(needs_acquiring_load_exclusive(n));
8620   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8621   ins_cost(VOLATILE_REF_COST);
8622   effect(TEMP_DEF res, KILL cr);
8623   format %{
8624     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8625   %}
8626   ins_encode %{
8627     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8628                Assembler::xword, /*acquire*/ true, /*release*/ true,
8629                /*weak*/ false, $res$$Register);
8630   %}
8631   ins_pipe(pipe_slow);
8632 %}
8633 
8634 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8635   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8636   ins_cost(2 * VOLATILE_REF_COST);
8637   effect(KILL cr);
8638   format %{
8639     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8640     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8641   %}
8642   ins_encode %{
8643     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8644                Assembler::byte, /*acquire*/ false, /*release*/ true,
8645                /*weak*/ true, noreg);
8646     __ csetw($res$$Register, Assembler::EQ);
8647   %}
8648   ins_pipe(pipe_slow);
8649 %}
8650 
8651 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8652   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8653   ins_cost(2 * VOLATILE_REF_COST);
8654   effect(KILL cr);
8655   format %{
8656     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8657     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8658   %}
8659   ins_encode %{
8660     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8661                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8662                /*weak*/ true, noreg);
8663     __ csetw($res$$Register, Assembler::EQ);
8664   %}
8665   ins_pipe(pipe_slow);
8666 %}
8667 
8668 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8669   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8670   ins_cost(2 * VOLATILE_REF_COST);
8671   effect(KILL cr);
8672   format %{
8673     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8674     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8675   %}
8676   ins_encode %{
8677     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8678                Assembler::word, /*acquire*/ false, /*release*/ true,
8679                /*weak*/ true, noreg);
8680     __ csetw($res$$Register, Assembler::EQ);
8681   %}
8682   ins_pipe(pipe_slow);
8683 %}
8684 
8685 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8686   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8687   ins_cost(2 * VOLATILE_REF_COST);
8688   effect(KILL cr);
8689   format %{
8690     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8691     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8692   %}
8693   ins_encode %{
8694     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8695                Assembler::xword, /*acquire*/ false, /*release*/ true,
8696                /*weak*/ true, noreg);
8697     __ csetw($res$$Register, Assembler::EQ);
8698   %}
8699   ins_pipe(pipe_slow);
8700 %}
8701 
8702 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8703   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8704   ins_cost(2 * VOLATILE_REF_COST);
8705   effect(KILL cr);
8706   format %{
8707     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8708     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8709   %}
8710   ins_encode %{
8711     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8712                Assembler::word, /*acquire*/ false, /*release*/ true,
8713                /*weak*/ true, noreg);
8714     __ csetw($res$$Register, Assembler::EQ);
8715   %}
8716   ins_pipe(pipe_slow);
8717 %}
8718 
8719 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8720   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8721   ins_cost(2 * VOLATILE_REF_COST);
8722   effect(KILL cr);
8723   format %{
8724     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8725     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8726   %}
8727   ins_encode %{
8728     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8729                Assembler::xword, /*acquire*/ false, /*release*/ true,
8730                /*weak*/ true, noreg);
8731     __ csetw($res$$Register, Assembler::EQ);
8732   %}
8733   ins_pipe(pipe_slow);
8734 %}
8735 
8736 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8737   predicate(needs_acquiring_load_exclusive(n));
8738   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8739   ins_cost(VOLATILE_REF_COST);
8740   effect(KILL cr);
8741   format %{
8742     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8743     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8744   %}
8745   ins_encode %{
8746     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8747                Assembler::byte, /*acquire*/ true, /*release*/ true,
8748                /*weak*/ true, noreg);
8749     __ csetw($res$$Register, Assembler::EQ);
8750   %}
8751   ins_pipe(pipe_slow);
8752 %}
8753 
8754 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8755   predicate(needs_acquiring_load_exclusive(n));
8756   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8757   ins_cost(VOLATILE_REF_COST);
8758   effect(KILL cr);
8759   format %{
8760     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8761     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8762   %}
8763   ins_encode %{
8764     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8765                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8766                /*weak*/ true, noreg);
8767     __ csetw($res$$Register, Assembler::EQ);
8768   %}
8769   ins_pipe(pipe_slow);
8770 %}
8771 
8772 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8773   predicate(needs_acquiring_load_exclusive(n));
8774   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8775   ins_cost(VOLATILE_REF_COST);
8776   effect(KILL cr);
8777   format %{
8778     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8779     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8780   %}
8781   ins_encode %{
8782     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8783                Assembler::word, /*acquire*/ true, /*release*/ true,
8784                /*weak*/ true, noreg);
8785     __ csetw($res$$Register, Assembler::EQ);
8786   %}
8787   ins_pipe(pipe_slow);
8788 %}
8789 
8790 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8791   predicate(needs_acquiring_load_exclusive(n));
8792   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8793   ins_cost(VOLATILE_REF_COST);
8794   effect(KILL cr);
8795   format %{
8796     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8797     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8798   %}
8799   ins_encode %{
8800     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8801                Assembler::xword, /*acquire*/ true, /*release*/ true,
8802                /*weak*/ true, noreg);
8803     __ csetw($res$$Register, Assembler::EQ);
8804   %}
8805   ins_pipe(pipe_slow);
8806 %}
8807 
8808 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8809   predicate(needs_acquiring_load_exclusive(n));
8810   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8811   ins_cost(VOLATILE_REF_COST);
8812   effect(KILL cr);
8813   format %{
8814     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8815     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8816   %}
8817   ins_encode %{
8818     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8819                Assembler::word, /*acquire*/ true, /*release*/ true,
8820                /*weak*/ true, noreg);
8821     __ csetw($res$$Register, Assembler::EQ);
8822   %}
8823   ins_pipe(pipe_slow);
8824 %}
8825 
8826 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8827   predicate(needs_acquiring_load_exclusive(n));
8828   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8829   ins_cost(VOLATILE_REF_COST);
8830   effect(KILL cr);
8831   format %{
8832     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8833     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8834   %}
8835   ins_encode %{
8836     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8837                Assembler::xword, /*acquire*/ true, /*release*/ true,
8838                /*weak*/ true, noreg);
8839     __ csetw($res$$Register, Assembler::EQ);
8840   %}
8841   ins_pipe(pipe_slow);
8842 %}
8843 
8844 // END This section of the file is automatically generated. Do not edit --------------
8845 // ---------------------------------------------------------------------
8846 
8847 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
8848   match(Set prev (GetAndSetI mem newv));
8849   ins_cost(2 * VOLATILE_REF_COST);
8850   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8851   ins_encode %{
8852     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8853   %}
8854   ins_pipe(pipe_serial);
8855 %}
8856 
8857 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
8858   match(Set prev (GetAndSetL mem newv));
8859   ins_cost(2 * VOLATILE_REF_COST);
8860   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8861   ins_encode %{
8862     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8863   %}
8864   ins_pipe(pipe_serial);
8865 %}
8866 
8867 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
8868   match(Set prev (GetAndSetN mem newv));
8869   ins_cost(2 * VOLATILE_REF_COST);
8870   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8871   ins_encode %{
8872     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8873   %}
8874   ins_pipe(pipe_serial);
8875 %}
8876 
8877 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
8878   match(Set prev (GetAndSetP mem newv));
8879   ins_cost(2 * VOLATILE_REF_COST);
8880   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8881   ins_encode %{
8882     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8883   %}
8884   ins_pipe(pipe_serial);
8885 %}
8886 
8887 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
8888   predicate(needs_acquiring_load_exclusive(n));
8889   match(Set prev (GetAndSetI mem newv));
8890   ins_cost(VOLATILE_REF_COST);
8891   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
8892   ins_encode %{
8893     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8894   %}
8895   ins_pipe(pipe_serial);
8896 %}
8897 
8898 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
8899   predicate(needs_acquiring_load_exclusive(n));
8900   match(Set prev (GetAndSetL mem newv));
8901   ins_cost(VOLATILE_REF_COST);
8902   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8903   ins_encode %{
8904     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8905   %}
8906   ins_pipe(pipe_serial);
8907 %}
8908 
8909 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
8910   predicate(needs_acquiring_load_exclusive(n));
8911   match(Set prev (GetAndSetN mem newv));
8912   ins_cost(VOLATILE_REF_COST);
8913   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
8914   ins_encode %{
8915     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8916   %}
8917   ins_pipe(pipe_serial);
8918 %}
8919 
8920 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
8921   predicate(needs_acquiring_load_exclusive(n));
8922   match(Set prev (GetAndSetP mem newv));
8923   ins_cost(VOLATILE_REF_COST);
8924   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8925   ins_encode %{
8926     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8927   %}
8928   ins_pipe(pipe_serial);
8929 %}
8930 
8931 
8932 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8933   match(Set newval (GetAndAddL mem incr));
8934   ins_cost(2 * VOLATILE_REF_COST + 1);
8935   format %{ "get_and_addL $newval, [$mem], $incr" %}
8936   ins_encode %{
8937     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8938   %}
8939   ins_pipe(pipe_serial);
8940 %}
8941 
8942 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8943   predicate(n->as_LoadStore()->result_not_used());
8944   match(Set dummy (GetAndAddL mem incr));
8945   ins_cost(2 * VOLATILE_REF_COST);
8946   format %{ "get_and_addL [$mem], $incr" %}
8947   ins_encode %{
8948     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8949   %}
8950   ins_pipe(pipe_serial);
8951 %}
8952 
8953 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8954   match(Set newval (GetAndAddL mem incr));
8955   ins_cost(2 * VOLATILE_REF_COST + 1);
8956   format %{ "get_and_addL $newval, [$mem], $incr" %}
8957   ins_encode %{
8958     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8959   %}
8960   ins_pipe(pipe_serial);
8961 %}
8962 
8963 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8964   predicate(n->as_LoadStore()->result_not_used());
8965   match(Set dummy (GetAndAddL mem incr));
8966   ins_cost(2 * VOLATILE_REF_COST);
8967   format %{ "get_and_addL [$mem], $incr" %}
8968   ins_encode %{
8969     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8970   %}
8971   ins_pipe(pipe_serial);
8972 %}
8973 
8974 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8975   match(Set newval (GetAndAddI mem incr));
8976   ins_cost(2 * VOLATILE_REF_COST + 1);
8977   format %{ "get_and_addI $newval, [$mem], $incr" %}
8978   ins_encode %{
8979     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8980   %}
8981   ins_pipe(pipe_serial);
8982 %}
8983 
8984 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8985   predicate(n->as_LoadStore()->result_not_used());
8986   match(Set dummy (GetAndAddI mem incr));
8987   ins_cost(2 * VOLATILE_REF_COST);
8988   format %{ "get_and_addI [$mem], $incr" %}
8989   ins_encode %{
8990     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8991   %}
8992   ins_pipe(pipe_serial);
8993 %}
8994 
8995 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8996   match(Set newval (GetAndAddI mem incr));
8997   ins_cost(2 * VOLATILE_REF_COST + 1);
8998   format %{ "get_and_addI $newval, [$mem], $incr" %}
8999   ins_encode %{
9000     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9001   %}
9002   ins_pipe(pipe_serial);
9003 %}
9004 
9005 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9006   predicate(n->as_LoadStore()->result_not_used());
9007   match(Set dummy (GetAndAddI mem incr));
9008   ins_cost(2 * VOLATILE_REF_COST);
9009   format %{ "get_and_addI [$mem], $incr" %}
9010   ins_encode %{
9011     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9012   %}
9013   ins_pipe(pipe_serial);
9014 %}
9015 
9016 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
9017   predicate(needs_acquiring_load_exclusive(n));
9018   match(Set newval (GetAndAddL mem incr));
9019   ins_cost(VOLATILE_REF_COST + 1);
9020   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9021   ins_encode %{
9022     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
9023   %}
9024   ins_pipe(pipe_serial);
9025 %}
9026 
9027 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
9028   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9029   match(Set dummy (GetAndAddL mem incr));
9030   ins_cost(VOLATILE_REF_COST);
9031   format %{ "get_and_addL_acq [$mem], $incr" %}
9032   ins_encode %{
9033     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
9034   %}
9035   ins_pipe(pipe_serial);
9036 %}
9037 
9038 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9039   predicate(needs_acquiring_load_exclusive(n));
9040   match(Set newval (GetAndAddL mem incr));
9041   ins_cost(VOLATILE_REF_COST + 1);
9042   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9043   ins_encode %{
9044     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9045   %}
9046   ins_pipe(pipe_serial);
9047 %}
9048 
9049 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9050   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9051   match(Set dummy (GetAndAddL mem incr));
9052   ins_cost(VOLATILE_REF_COST);
9053   format %{ "get_and_addL_acq [$mem], $incr" %}
9054   ins_encode %{
9055     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9056   %}
9057   ins_pipe(pipe_serial);
9058 %}
9059 
9060 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9061   predicate(needs_acquiring_load_exclusive(n));
9062   match(Set newval (GetAndAddI mem incr));
9063   ins_cost(VOLATILE_REF_COST + 1);
9064   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9065   ins_encode %{
9066     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9067   %}
9068   ins_pipe(pipe_serial);
9069 %}
9070 
9071 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9072   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9073   match(Set dummy (GetAndAddI mem incr));
9074   ins_cost(VOLATILE_REF_COST);
9075   format %{ "get_and_addI_acq [$mem], $incr" %}
9076   ins_encode %{
9077     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9078   %}
9079   ins_pipe(pipe_serial);
9080 %}
9081 
9082 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9083   predicate(needs_acquiring_load_exclusive(n));
9084   match(Set newval (GetAndAddI mem incr));
9085   ins_cost(VOLATILE_REF_COST + 1);
9086   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9087   ins_encode %{
9088     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9089   %}
9090   ins_pipe(pipe_serial);
9091 %}
9092 
9093 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9094   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9095   match(Set dummy (GetAndAddI mem incr));
9096   ins_cost(VOLATILE_REF_COST);
9097   format %{ "get_and_addI_acq [$mem], $incr" %}
9098   ins_encode %{
9099     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9100   %}
9101   ins_pipe(pipe_serial);
9102 %}
9103 
9104 // Manifest a CmpL result in an integer register.
9105 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9106 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9107 %{
9108   match(Set dst (CmpL3 src1 src2));
9109   effect(KILL flags);
9110 
9111   ins_cost(INSN_COST * 6);
9112   format %{
9113       "cmp $src1, $src2"
9114       "csetw $dst, ne"
9115       "cnegw $dst, lt"
9116   %}
9117   // format %{ "CmpL3 $dst, $src1, $src2" %}
9118   ins_encode %{
9119     __ cmp($src1$$Register, $src2$$Register);
9120     __ csetw($dst$$Register, Assembler::NE);
9121     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9122   %}
9123 
9124   ins_pipe(pipe_class_default);
9125 %}
9126 
9127 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9128 %{
9129   match(Set dst (CmpL3 src1 src2));
9130   effect(KILL flags);
9131 
9132   ins_cost(INSN_COST * 6);
9133   format %{
9134       "cmp $src1, $src2"
9135       "csetw $dst, ne"
9136       "cnegw $dst, lt"
9137   %}
9138   ins_encode %{
9139     int32_t con = (int32_t)$src2$$constant;
9140      if (con < 0) {
9141       __ adds(zr, $src1$$Register, -con);
9142     } else {
9143       __ subs(zr, $src1$$Register, con);
9144     }
9145     __ csetw($dst$$Register, Assembler::NE);
9146     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9147   %}
9148 
9149   ins_pipe(pipe_class_default);
9150 %}
9151 
9152 // ============================================================================
9153 // Conditional Move Instructions
9154 
9155 // n.b. we have identical rules for both a signed compare op (cmpOp)
9156 // and an unsigned compare op (cmpOpU). it would be nice if we could
9157 // define an op class which merged both inputs and use it to type the
9158 // argument to a single rule. unfortunatelyt his fails because the
9159 // opclass does not live up to the COND_INTER interface of its
9160 // component operands. When the generic code tries to negate the
9161 // operand it ends up running the generci Machoper::negate method
9162 // which throws a ShouldNotHappen. So, we have to provide two flavours
9163 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9164 
9165 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9166   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9167 
9168   ins_cost(INSN_COST * 2);
9169   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9170 
9171   ins_encode %{
9172     __ cselw(as_Register($dst$$reg),
9173              as_Register($src2$$reg),
9174              as_Register($src1$$reg),
9175              (Assembler::Condition)$cmp$$cmpcode);
9176   %}
9177 
9178   ins_pipe(icond_reg_reg);
9179 %}
9180 
9181 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9182   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9183 
9184   ins_cost(INSN_COST * 2);
9185   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9186 
9187   ins_encode %{
9188     __ cselw(as_Register($dst$$reg),
9189              as_Register($src2$$reg),
9190              as_Register($src1$$reg),
9191              (Assembler::Condition)$cmp$$cmpcode);
9192   %}
9193 
9194   ins_pipe(icond_reg_reg);
9195 %}
9196 
9197 // special cases where one arg is zero
9198 
9199 // n.b. this is selected in preference to the rule above because it
9200 // avoids loading constant 0 into a source register
9201 
9202 // TODO
9203 // we ought only to be able to cull one of these variants as the ideal
9204 // transforms ought always to order the zero consistently (to left/right?)
9205 
9206 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9207   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9208 
9209   ins_cost(INSN_COST * 2);
9210   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9211 
9212   ins_encode %{
9213     __ cselw(as_Register($dst$$reg),
9214              as_Register($src$$reg),
9215              zr,
9216              (Assembler::Condition)$cmp$$cmpcode);
9217   %}
9218 
9219   ins_pipe(icond_reg);
9220 %}
9221 
9222 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9223   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9224 
9225   ins_cost(INSN_COST * 2);
9226   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9227 
9228   ins_encode %{
9229     __ cselw(as_Register($dst$$reg),
9230              as_Register($src$$reg),
9231              zr,
9232              (Assembler::Condition)$cmp$$cmpcode);
9233   %}
9234 
9235   ins_pipe(icond_reg);
9236 %}
9237 
9238 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9239   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9240 
9241   ins_cost(INSN_COST * 2);
9242   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9243 
9244   ins_encode %{
9245     __ cselw(as_Register($dst$$reg),
9246              zr,
9247              as_Register($src$$reg),
9248              (Assembler::Condition)$cmp$$cmpcode);
9249   %}
9250 
9251   ins_pipe(icond_reg);
9252 %}
9253 
9254 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9255   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9256 
9257   ins_cost(INSN_COST * 2);
9258   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9259 
9260   ins_encode %{
9261     __ cselw(as_Register($dst$$reg),
9262              zr,
9263              as_Register($src$$reg),
9264              (Assembler::Condition)$cmp$$cmpcode);
9265   %}
9266 
9267   ins_pipe(icond_reg);
9268 %}
9269 
9270 // special case for creating a boolean 0 or 1
9271 
9272 // n.b. this is selected in preference to the rule above because it
9273 // avoids loading constants 0 and 1 into a source register
9274 
9275 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9276   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9277 
9278   ins_cost(INSN_COST * 2);
9279   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9280 
9281   ins_encode %{
9282     // equivalently
9283     // cset(as_Register($dst$$reg),
9284     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9285     __ csincw(as_Register($dst$$reg),
9286              zr,
9287              zr,
9288              (Assembler::Condition)$cmp$$cmpcode);
9289   %}
9290 
9291   ins_pipe(icond_none);
9292 %}
9293 
9294 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9295   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9296 
9297   ins_cost(INSN_COST * 2);
9298   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9299 
9300   ins_encode %{
9301     // equivalently
9302     // cset(as_Register($dst$$reg),
9303     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9304     __ csincw(as_Register($dst$$reg),
9305              zr,
9306              zr,
9307              (Assembler::Condition)$cmp$$cmpcode);
9308   %}
9309 
9310   ins_pipe(icond_none);
9311 %}
9312 
9313 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9314   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9315 
9316   ins_cost(INSN_COST * 2);
9317   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9318 
9319   ins_encode %{
9320     __ csel(as_Register($dst$$reg),
9321             as_Register($src2$$reg),
9322             as_Register($src1$$reg),
9323             (Assembler::Condition)$cmp$$cmpcode);
9324   %}
9325 
9326   ins_pipe(icond_reg_reg);
9327 %}
9328 
9329 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9330   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9331 
9332   ins_cost(INSN_COST * 2);
9333   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9334 
9335   ins_encode %{
9336     __ csel(as_Register($dst$$reg),
9337             as_Register($src2$$reg),
9338             as_Register($src1$$reg),
9339             (Assembler::Condition)$cmp$$cmpcode);
9340   %}
9341 
9342   ins_pipe(icond_reg_reg);
9343 %}
9344 
9345 // special cases where one arg is zero
9346 
9347 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9348   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9349 
9350   ins_cost(INSN_COST * 2);
9351   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9352 
9353   ins_encode %{
9354     __ csel(as_Register($dst$$reg),
9355             zr,
9356             as_Register($src$$reg),
9357             (Assembler::Condition)$cmp$$cmpcode);
9358   %}
9359 
9360   ins_pipe(icond_reg);
9361 %}
9362 
9363 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9364   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9365 
9366   ins_cost(INSN_COST * 2);
9367   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9368 
9369   ins_encode %{
9370     __ csel(as_Register($dst$$reg),
9371             zr,
9372             as_Register($src$$reg),
9373             (Assembler::Condition)$cmp$$cmpcode);
9374   %}
9375 
9376   ins_pipe(icond_reg);
9377 %}
9378 
9379 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9380   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9381 
9382   ins_cost(INSN_COST * 2);
9383   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9384 
9385   ins_encode %{
9386     __ csel(as_Register($dst$$reg),
9387             as_Register($src$$reg),
9388             zr,
9389             (Assembler::Condition)$cmp$$cmpcode);
9390   %}
9391 
9392   ins_pipe(icond_reg);
9393 %}
9394 
9395 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9396   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9397 
9398   ins_cost(INSN_COST * 2);
9399   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9400 
9401   ins_encode %{
9402     __ csel(as_Register($dst$$reg),
9403             as_Register($src$$reg),
9404             zr,
9405             (Assembler::Condition)$cmp$$cmpcode);
9406   %}
9407 
9408   ins_pipe(icond_reg);
9409 %}
9410 
9411 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9412   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9413 
9414   ins_cost(INSN_COST * 2);
9415   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9416 
9417   ins_encode %{
9418     __ csel(as_Register($dst$$reg),
9419             as_Register($src2$$reg),
9420             as_Register($src1$$reg),
9421             (Assembler::Condition)$cmp$$cmpcode);
9422   %}
9423 
9424   ins_pipe(icond_reg_reg);
9425 %}
9426 
9427 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9428   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9429 
9430   ins_cost(INSN_COST * 2);
9431   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9432 
9433   ins_encode %{
9434     __ csel(as_Register($dst$$reg),
9435             as_Register($src2$$reg),
9436             as_Register($src1$$reg),
9437             (Assembler::Condition)$cmp$$cmpcode);
9438   %}
9439 
9440   ins_pipe(icond_reg_reg);
9441 %}
9442 
9443 // special cases where one arg is zero
9444 
9445 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9446   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9447 
9448   ins_cost(INSN_COST * 2);
9449   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9450 
9451   ins_encode %{
9452     __ csel(as_Register($dst$$reg),
9453             zr,
9454             as_Register($src$$reg),
9455             (Assembler::Condition)$cmp$$cmpcode);
9456   %}
9457 
9458   ins_pipe(icond_reg);
9459 %}
9460 
9461 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9462   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9463 
9464   ins_cost(INSN_COST * 2);
9465   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9466 
9467   ins_encode %{
9468     __ csel(as_Register($dst$$reg),
9469             zr,
9470             as_Register($src$$reg),
9471             (Assembler::Condition)$cmp$$cmpcode);
9472   %}
9473 
9474   ins_pipe(icond_reg);
9475 %}
9476 
9477 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9478   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9479 
9480   ins_cost(INSN_COST * 2);
9481   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9482 
9483   ins_encode %{
9484     __ csel(as_Register($dst$$reg),
9485             as_Register($src$$reg),
9486             zr,
9487             (Assembler::Condition)$cmp$$cmpcode);
9488   %}
9489 
9490   ins_pipe(icond_reg);
9491 %}
9492 
9493 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9494   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9495 
9496   ins_cost(INSN_COST * 2);
9497   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9498 
9499   ins_encode %{
9500     __ csel(as_Register($dst$$reg),
9501             as_Register($src$$reg),
9502             zr,
9503             (Assembler::Condition)$cmp$$cmpcode);
9504   %}
9505 
9506   ins_pipe(icond_reg);
9507 %}
9508 
9509 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9510   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9511 
9512   ins_cost(INSN_COST * 2);
9513   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9514 
9515   ins_encode %{
9516     __ cselw(as_Register($dst$$reg),
9517              as_Register($src2$$reg),
9518              as_Register($src1$$reg),
9519              (Assembler::Condition)$cmp$$cmpcode);
9520   %}
9521 
9522   ins_pipe(icond_reg_reg);
9523 %}
9524 
9525 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9526   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9527 
9528   ins_cost(INSN_COST * 2);
9529   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9530 
9531   ins_encode %{
9532     __ cselw(as_Register($dst$$reg),
9533              as_Register($src2$$reg),
9534              as_Register($src1$$reg),
9535              (Assembler::Condition)$cmp$$cmpcode);
9536   %}
9537 
9538   ins_pipe(icond_reg_reg);
9539 %}
9540 
9541 // special cases where one arg is zero
9542 
9543 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9544   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9545 
9546   ins_cost(INSN_COST * 2);
9547   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9548 
9549   ins_encode %{
9550     __ cselw(as_Register($dst$$reg),
9551              zr,
9552              as_Register($src$$reg),
9553              (Assembler::Condition)$cmp$$cmpcode);
9554   %}
9555 
9556   ins_pipe(icond_reg);
9557 %}
9558 
9559 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9560   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9561 
9562   ins_cost(INSN_COST * 2);
9563   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9564 
9565   ins_encode %{
9566     __ cselw(as_Register($dst$$reg),
9567              zr,
9568              as_Register($src$$reg),
9569              (Assembler::Condition)$cmp$$cmpcode);
9570   %}
9571 
9572   ins_pipe(icond_reg);
9573 %}
9574 
9575 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9576   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9577 
9578   ins_cost(INSN_COST * 2);
9579   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9580 
9581   ins_encode %{
9582     __ cselw(as_Register($dst$$reg),
9583              as_Register($src$$reg),
9584              zr,
9585              (Assembler::Condition)$cmp$$cmpcode);
9586   %}
9587 
9588   ins_pipe(icond_reg);
9589 %}
9590 
9591 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9592   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9593 
9594   ins_cost(INSN_COST * 2);
9595   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9596 
9597   ins_encode %{
9598     __ cselw(as_Register($dst$$reg),
9599              as_Register($src$$reg),
9600              zr,
9601              (Assembler::Condition)$cmp$$cmpcode);
9602   %}
9603 
9604   ins_pipe(icond_reg);
9605 %}
9606 
9607 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9608 %{
9609   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9610 
9611   ins_cost(INSN_COST * 3);
9612 
9613   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9614   ins_encode %{
9615     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9616     __ fcsels(as_FloatRegister($dst$$reg),
9617               as_FloatRegister($src2$$reg),
9618               as_FloatRegister($src1$$reg),
9619               cond);
9620   %}
9621 
9622   ins_pipe(fp_cond_reg_reg_s);
9623 %}
9624 
9625 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9626 %{
9627   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9628 
9629   ins_cost(INSN_COST * 3);
9630 
9631   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9632   ins_encode %{
9633     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9634     __ fcsels(as_FloatRegister($dst$$reg),
9635               as_FloatRegister($src2$$reg),
9636               as_FloatRegister($src1$$reg),
9637               cond);
9638   %}
9639 
9640   ins_pipe(fp_cond_reg_reg_s);
9641 %}
9642 
9643 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9644 %{
9645   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9646 
9647   ins_cost(INSN_COST * 3);
9648 
9649   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9650   ins_encode %{
9651     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9652     __ fcseld(as_FloatRegister($dst$$reg),
9653               as_FloatRegister($src2$$reg),
9654               as_FloatRegister($src1$$reg),
9655               cond);
9656   %}
9657 
9658   ins_pipe(fp_cond_reg_reg_d);
9659 %}
9660 
9661 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9662 %{
9663   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9664 
9665   ins_cost(INSN_COST * 3);
9666 
9667   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9668   ins_encode %{
9669     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9670     __ fcseld(as_FloatRegister($dst$$reg),
9671               as_FloatRegister($src2$$reg),
9672               as_FloatRegister($src1$$reg),
9673               cond);
9674   %}
9675 
9676   ins_pipe(fp_cond_reg_reg_d);
9677 %}
9678 
9679 // ============================================================================
9680 // Arithmetic Instructions
9681 //
9682 
9683 // Integer Addition
9684 
9685 // TODO
9686 // these currently employ operations which do not set CR and hence are
9687 // not flagged as killing CR but we would like to isolate the cases
9688 // where we want to set flags from those where we don't. need to work
9689 // out how to do that.
9690 
9691 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9692   match(Set dst (AddI src1 src2));
9693 
9694   ins_cost(INSN_COST);
9695   format %{ "addw  $dst, $src1, $src2" %}
9696 
9697   ins_encode %{
9698     __ addw(as_Register($dst$$reg),
9699             as_Register($src1$$reg),
9700             as_Register($src2$$reg));
9701   %}
9702 
9703   ins_pipe(ialu_reg_reg);
9704 %}
9705 
9706 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9707   match(Set dst (AddI src1 src2));
9708 
9709   ins_cost(INSN_COST);
9710   format %{ "addw $dst, $src1, $src2" %}
9711 
9712   // use opcode to indicate that this is an add not a sub
9713   opcode(0x0);
9714 
9715   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9716 
9717   ins_pipe(ialu_reg_imm);
9718 %}
9719 
9720 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9721   match(Set dst (AddI (ConvL2I src1) src2));
9722 
9723   ins_cost(INSN_COST);
9724   format %{ "addw $dst, $src1, $src2" %}
9725 
9726   // use opcode to indicate that this is an add not a sub
9727   opcode(0x0);
9728 
9729   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9730 
9731   ins_pipe(ialu_reg_imm);
9732 %}
9733 
9734 // Pointer Addition
9735 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9736   match(Set dst (AddP src1 src2));
9737 
9738   ins_cost(INSN_COST);
9739   format %{ "add $dst, $src1, $src2\t# ptr" %}
9740 
9741   ins_encode %{
9742     __ add(as_Register($dst$$reg),
9743            as_Register($src1$$reg),
9744            as_Register($src2$$reg));
9745   %}
9746 
9747   ins_pipe(ialu_reg_reg);
9748 %}
9749 
9750 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9751   match(Set dst (AddP src1 (ConvI2L src2)));
9752 
9753   ins_cost(1.9 * INSN_COST);
9754   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9755 
9756   ins_encode %{
9757     __ add(as_Register($dst$$reg),
9758            as_Register($src1$$reg),
9759            as_Register($src2$$reg), ext::sxtw);
9760   %}
9761 
9762   ins_pipe(ialu_reg_reg);
9763 %}
9764 
9765 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9766   match(Set dst (AddP src1 (LShiftL src2 scale)));
9767 
9768   ins_cost(1.9 * INSN_COST);
9769   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9770 
9771   ins_encode %{
9772     __ lea(as_Register($dst$$reg),
9773            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9774                    Address::lsl($scale$$constant)));
9775   %}
9776 
9777   ins_pipe(ialu_reg_reg_shift);
9778 %}
9779 
9780 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9781   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9782 
9783   ins_cost(1.9 * INSN_COST);
9784   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9785 
9786   ins_encode %{
9787     __ lea(as_Register($dst$$reg),
9788            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9789                    Address::sxtw($scale$$constant)));
9790   %}
9791 
9792   ins_pipe(ialu_reg_reg_shift);
9793 %}
9794 
9795 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9796   match(Set dst (LShiftL (ConvI2L src) scale));
9797 
9798   ins_cost(INSN_COST);
9799   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9800 
9801   ins_encode %{
9802     __ sbfiz(as_Register($dst$$reg),
9803           as_Register($src$$reg),
9804           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9805   %}
9806 
9807   ins_pipe(ialu_reg_shift);
9808 %}
9809 
9810 // Pointer Immediate Addition
9811 // n.b. this needs to be more expensive than using an indirect memory
9812 // operand
9813 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9814   match(Set dst (AddP src1 src2));
9815 
9816   ins_cost(INSN_COST);
9817   format %{ "add $dst, $src1, $src2\t# ptr" %}
9818 
9819   // use opcode to indicate that this is an add not a sub
9820   opcode(0x0);
9821 
9822   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9823 
9824   ins_pipe(ialu_reg_imm);
9825 %}
9826 
9827 // Long Addition
9828 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9829 
9830   match(Set dst (AddL src1 src2));
9831 
9832   ins_cost(INSN_COST);
9833   format %{ "add  $dst, $src1, $src2" %}
9834 
9835   ins_encode %{
9836     __ add(as_Register($dst$$reg),
9837            as_Register($src1$$reg),
9838            as_Register($src2$$reg));
9839   %}
9840 
9841   ins_pipe(ialu_reg_reg);
9842 %}
9843 
9844 // No constant pool entries requiredLong Immediate Addition.
9845 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9846   match(Set dst (AddL src1 src2));
9847 
9848   ins_cost(INSN_COST);
9849   format %{ "add $dst, $src1, $src2" %}
9850 
9851   // use opcode to indicate that this is an add not a sub
9852   opcode(0x0);
9853 
9854   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9855 
9856   ins_pipe(ialu_reg_imm);
9857 %}
9858 
9859 // Integer Subtraction
9860 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9861   match(Set dst (SubI src1 src2));
9862 
9863   ins_cost(INSN_COST);
9864   format %{ "subw  $dst, $src1, $src2" %}
9865 
9866   ins_encode %{
9867     __ subw(as_Register($dst$$reg),
9868             as_Register($src1$$reg),
9869             as_Register($src2$$reg));
9870   %}
9871 
9872   ins_pipe(ialu_reg_reg);
9873 %}
9874 
9875 // Immediate Subtraction
9876 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9877   match(Set dst (SubI src1 src2));
9878 
9879   ins_cost(INSN_COST);
9880   format %{ "subw $dst, $src1, $src2" %}
9881 
9882   // use opcode to indicate that this is a sub not an add
9883   opcode(0x1);
9884 
9885   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9886 
9887   ins_pipe(ialu_reg_imm);
9888 %}
9889 
9890 // Long Subtraction
9891 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9892 
9893   match(Set dst (SubL src1 src2));
9894 
9895   ins_cost(INSN_COST);
9896   format %{ "sub  $dst, $src1, $src2" %}
9897 
9898   ins_encode %{
9899     __ sub(as_Register($dst$$reg),
9900            as_Register($src1$$reg),
9901            as_Register($src2$$reg));
9902   %}
9903 
9904   ins_pipe(ialu_reg_reg);
9905 %}
9906 
9907 // No constant pool entries requiredLong Immediate Subtraction.
9908 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9909   match(Set dst (SubL src1 src2));
9910 
9911   ins_cost(INSN_COST);
9912   format %{ "sub$dst, $src1, $src2" %}
9913 
9914   // use opcode to indicate that this is a sub not an add
9915   opcode(0x1);
9916 
9917   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9918 
9919   ins_pipe(ialu_reg_imm);
9920 %}
9921 
9922 // Integer Negation (special case for sub)
9923 
9924 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9925   match(Set dst (SubI zero src));
9926 
9927   ins_cost(INSN_COST);
9928   format %{ "negw $dst, $src\t# int" %}
9929 
9930   ins_encode %{
9931     __ negw(as_Register($dst$$reg),
9932             as_Register($src$$reg));
9933   %}
9934 
9935   ins_pipe(ialu_reg);
9936 %}
9937 
9938 // Long Negation
9939 
9940 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
9941   match(Set dst (SubL zero src));
9942 
9943   ins_cost(INSN_COST);
9944   format %{ "neg $dst, $src\t# long" %}
9945 
9946   ins_encode %{
9947     __ neg(as_Register($dst$$reg),
9948            as_Register($src$$reg));
9949   %}
9950 
9951   ins_pipe(ialu_reg);
9952 %}
9953 
9954 // Integer Multiply
9955 
9956 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9957   match(Set dst (MulI src1 src2));
9958 
9959   ins_cost(INSN_COST * 3);
9960   format %{ "mulw  $dst, $src1, $src2" %}
9961 
9962   ins_encode %{
9963     __ mulw(as_Register($dst$$reg),
9964             as_Register($src1$$reg),
9965             as_Register($src2$$reg));
9966   %}
9967 
9968   ins_pipe(imul_reg_reg);
9969 %}
9970 
9971 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9972   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9973 
9974   ins_cost(INSN_COST * 3);
9975   format %{ "smull  $dst, $src1, $src2" %}
9976 
9977   ins_encode %{
9978     __ smull(as_Register($dst$$reg),
9979              as_Register($src1$$reg),
9980              as_Register($src2$$reg));
9981   %}
9982 
9983   ins_pipe(imul_reg_reg);
9984 %}
9985 
9986 // Long Multiply
9987 
9988 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9989   match(Set dst (MulL src1 src2));
9990 
9991   ins_cost(INSN_COST * 5);
9992   format %{ "mul  $dst, $src1, $src2" %}
9993 
9994   ins_encode %{
9995     __ mul(as_Register($dst$$reg),
9996            as_Register($src1$$reg),
9997            as_Register($src2$$reg));
9998   %}
9999 
10000   ins_pipe(lmul_reg_reg);
10001 %}
10002 
10003 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10004 %{
10005   match(Set dst (MulHiL src1 src2));
10006 
10007   ins_cost(INSN_COST * 7);
10008   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10009 
10010   ins_encode %{
10011     __ smulh(as_Register($dst$$reg),
10012              as_Register($src1$$reg),
10013              as_Register($src2$$reg));
10014   %}
10015 
10016   ins_pipe(lmul_reg_reg);
10017 %}
10018 
10019 // Combined Integer Multiply & Add/Sub
10020 
10021 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10022   match(Set dst (AddI src3 (MulI src1 src2)));
10023 
10024   ins_cost(INSN_COST * 3);
10025   format %{ "madd  $dst, $src1, $src2, $src3" %}
10026 
10027   ins_encode %{
10028     __ maddw(as_Register($dst$$reg),
10029              as_Register($src1$$reg),
10030              as_Register($src2$$reg),
10031              as_Register($src3$$reg));
10032   %}
10033 
10034   ins_pipe(imac_reg_reg);
10035 %}
10036 
10037 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10038   match(Set dst (SubI src3 (MulI src1 src2)));
10039 
10040   ins_cost(INSN_COST * 3);
10041   format %{ "msub  $dst, $src1, $src2, $src3" %}
10042 
10043   ins_encode %{
10044     __ msubw(as_Register($dst$$reg),
10045              as_Register($src1$$reg),
10046              as_Register($src2$$reg),
10047              as_Register($src3$$reg));
10048   %}
10049 
10050   ins_pipe(imac_reg_reg);
10051 %}
10052 
10053 // Combined Integer Multiply & Neg
10054 
10055 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10056   match(Set dst (MulI (SubI zero src1) src2));
10057   match(Set dst (MulI src1 (SubI zero src2)));
10058 
10059   ins_cost(INSN_COST * 3);
10060   format %{ "mneg  $dst, $src1, $src2" %}
10061 
10062   ins_encode %{
10063     __ mnegw(as_Register($dst$$reg),
10064              as_Register($src1$$reg),
10065              as_Register($src2$$reg));
10066   %}
10067 
10068   ins_pipe(imac_reg_reg);
10069 %}
10070 
10071 // Combined Long Multiply & Add/Sub
10072 
10073 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10074   match(Set dst (AddL src3 (MulL src1 src2)));
10075 
10076   ins_cost(INSN_COST * 5);
10077   format %{ "madd  $dst, $src1, $src2, $src3" %}
10078 
10079   ins_encode %{
10080     __ madd(as_Register($dst$$reg),
10081             as_Register($src1$$reg),
10082             as_Register($src2$$reg),
10083             as_Register($src3$$reg));
10084   %}
10085 
10086   ins_pipe(lmac_reg_reg);
10087 %}
10088 
10089 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10090   match(Set dst (SubL src3 (MulL src1 src2)));
10091 
10092   ins_cost(INSN_COST * 5);
10093   format %{ "msub  $dst, $src1, $src2, $src3" %}
10094 
10095   ins_encode %{
10096     __ msub(as_Register($dst$$reg),
10097             as_Register($src1$$reg),
10098             as_Register($src2$$reg),
10099             as_Register($src3$$reg));
10100   %}
10101 
10102   ins_pipe(lmac_reg_reg);
10103 %}
10104 
10105 // Combined Long Multiply & Neg
10106 
10107 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10108   match(Set dst (MulL (SubL zero src1) src2));
10109   match(Set dst (MulL src1 (SubL zero src2)));
10110 
10111   ins_cost(INSN_COST * 5);
10112   format %{ "mneg  $dst, $src1, $src2" %}
10113 
10114   ins_encode %{
10115     __ mneg(as_Register($dst$$reg),
10116             as_Register($src1$$reg),
10117             as_Register($src2$$reg));
10118   %}
10119 
10120   ins_pipe(lmac_reg_reg);
10121 %}
10122 
10123 // Integer Divide
10124 
10125 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10126   match(Set dst (DivI src1 src2));
10127 
10128   ins_cost(INSN_COST * 19);
10129   format %{ "sdivw  $dst, $src1, $src2" %}
10130 
10131   ins_encode(aarch64_enc_divw(dst, src1, src2));
10132   ins_pipe(idiv_reg_reg);
10133 %}
10134 
10135 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10136   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10137   ins_cost(INSN_COST);
10138   format %{ "lsrw $dst, $src1, $div1" %}
10139   ins_encode %{
10140     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10141   %}
10142   ins_pipe(ialu_reg_shift);
10143 %}
10144 
10145 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10146   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10147   ins_cost(INSN_COST);
10148   format %{ "addw $dst, $src, LSR $div1" %}
10149 
10150   ins_encode %{
10151     __ addw(as_Register($dst$$reg),
10152               as_Register($src$$reg),
10153               as_Register($src$$reg),
10154               Assembler::LSR, 31);
10155   %}
10156   ins_pipe(ialu_reg);
10157 %}
10158 
10159 // Long Divide
10160 
10161 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10162   match(Set dst (DivL src1 src2));
10163 
10164   ins_cost(INSN_COST * 35);
10165   format %{ "sdiv   $dst, $src1, $src2" %}
10166 
10167   ins_encode(aarch64_enc_div(dst, src1, src2));
10168   ins_pipe(ldiv_reg_reg);
10169 %}
10170 
10171 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10172   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10173   ins_cost(INSN_COST);
10174   format %{ "lsr $dst, $src1, $div1" %}
10175   ins_encode %{
10176     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10177   %}
10178   ins_pipe(ialu_reg_shift);
10179 %}
10180 
10181 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10182   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10183   ins_cost(INSN_COST);
10184   format %{ "add $dst, $src, $div1" %}
10185 
10186   ins_encode %{
10187     __ add(as_Register($dst$$reg),
10188               as_Register($src$$reg),
10189               as_Register($src$$reg),
10190               Assembler::LSR, 63);
10191   %}
10192   ins_pipe(ialu_reg);
10193 %}
10194 
10195 // Integer Remainder
10196 
10197 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10198   match(Set dst (ModI src1 src2));
10199 
10200   ins_cost(INSN_COST * 22);
10201   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10202             "msubw($dst, rscratch1, $src2, $src1" %}
10203 
10204   ins_encode(aarch64_enc_modw(dst, src1, src2));
10205   ins_pipe(idiv_reg_reg);
10206 %}
10207 
10208 // Long Remainder
10209 
10210 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10211   match(Set dst (ModL src1 src2));
10212 
10213   ins_cost(INSN_COST * 38);
10214   format %{ "sdiv   rscratch1, $src1, $src2\n"
10215             "msub($dst, rscratch1, $src2, $src1" %}
10216 
10217   ins_encode(aarch64_enc_mod(dst, src1, src2));
10218   ins_pipe(ldiv_reg_reg);
10219 %}
10220 
10221 // Integer Shifts
10222 
10223 // Shift Left Register
10224 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10225   match(Set dst (LShiftI src1 src2));
10226 
10227   ins_cost(INSN_COST * 2);
10228   format %{ "lslvw  $dst, $src1, $src2" %}
10229 
10230   ins_encode %{
10231     __ lslvw(as_Register($dst$$reg),
10232              as_Register($src1$$reg),
10233              as_Register($src2$$reg));
10234   %}
10235 
10236   ins_pipe(ialu_reg_reg_vshift);
10237 %}
10238 
10239 // Shift Left Immediate
10240 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10241   match(Set dst (LShiftI src1 src2));
10242 
10243   ins_cost(INSN_COST);
10244   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10245 
10246   ins_encode %{
10247     __ lslw(as_Register($dst$$reg),
10248             as_Register($src1$$reg),
10249             $src2$$constant & 0x1f);
10250   %}
10251 
10252   ins_pipe(ialu_reg_shift);
10253 %}
10254 
10255 // Shift Right Logical Register
10256 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10257   match(Set dst (URShiftI src1 src2));
10258 
10259   ins_cost(INSN_COST * 2);
10260   format %{ "lsrvw  $dst, $src1, $src2" %}
10261 
10262   ins_encode %{
10263     __ lsrvw(as_Register($dst$$reg),
10264              as_Register($src1$$reg),
10265              as_Register($src2$$reg));
10266   %}
10267 
10268   ins_pipe(ialu_reg_reg_vshift);
10269 %}
10270 
10271 // Shift Right Logical Immediate
10272 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10273   match(Set dst (URShiftI src1 src2));
10274 
10275   ins_cost(INSN_COST);
10276   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10277 
10278   ins_encode %{
10279     __ lsrw(as_Register($dst$$reg),
10280             as_Register($src1$$reg),
10281             $src2$$constant & 0x1f);
10282   %}
10283 
10284   ins_pipe(ialu_reg_shift);
10285 %}
10286 
10287 // Shift Right Arithmetic Register
10288 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10289   match(Set dst (RShiftI src1 src2));
10290 
10291   ins_cost(INSN_COST * 2);
10292   format %{ "asrvw  $dst, $src1, $src2" %}
10293 
10294   ins_encode %{
10295     __ asrvw(as_Register($dst$$reg),
10296              as_Register($src1$$reg),
10297              as_Register($src2$$reg));
10298   %}
10299 
10300   ins_pipe(ialu_reg_reg_vshift);
10301 %}
10302 
10303 // Shift Right Arithmetic Immediate
10304 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10305   match(Set dst (RShiftI src1 src2));
10306 
10307   ins_cost(INSN_COST);
10308   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10309 
10310   ins_encode %{
10311     __ asrw(as_Register($dst$$reg),
10312             as_Register($src1$$reg),
10313             $src2$$constant & 0x1f);
10314   %}
10315 
10316   ins_pipe(ialu_reg_shift);
10317 %}
10318 
10319 // Combined Int Mask and Right Shift (using UBFM)
10320 // TODO
10321 
10322 // Long Shifts
10323 
10324 // Shift Left Register
10325 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10326   match(Set dst (LShiftL src1 src2));
10327 
10328   ins_cost(INSN_COST * 2);
10329   format %{ "lslv  $dst, $src1, $src2" %}
10330 
10331   ins_encode %{
10332     __ lslv(as_Register($dst$$reg),
10333             as_Register($src1$$reg),
10334             as_Register($src2$$reg));
10335   %}
10336 
10337   ins_pipe(ialu_reg_reg_vshift);
10338 %}
10339 
10340 // Shift Left Immediate
10341 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10342   match(Set dst (LShiftL src1 src2));
10343 
10344   ins_cost(INSN_COST);
10345   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10346 
10347   ins_encode %{
10348     __ lsl(as_Register($dst$$reg),
10349             as_Register($src1$$reg),
10350             $src2$$constant & 0x3f);
10351   %}
10352 
10353   ins_pipe(ialu_reg_shift);
10354 %}
10355 
10356 // Shift Right Logical Register
10357 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10358   match(Set dst (URShiftL src1 src2));
10359 
10360   ins_cost(INSN_COST * 2);
10361   format %{ "lsrv  $dst, $src1, $src2" %}
10362 
10363   ins_encode %{
10364     __ lsrv(as_Register($dst$$reg),
10365             as_Register($src1$$reg),
10366             as_Register($src2$$reg));
10367   %}
10368 
10369   ins_pipe(ialu_reg_reg_vshift);
10370 %}
10371 
10372 // Shift Right Logical Immediate
10373 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10374   match(Set dst (URShiftL src1 src2));
10375 
10376   ins_cost(INSN_COST);
10377   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10378 
10379   ins_encode %{
10380     __ lsr(as_Register($dst$$reg),
10381            as_Register($src1$$reg),
10382            $src2$$constant & 0x3f);
10383   %}
10384 
10385   ins_pipe(ialu_reg_shift);
10386 %}
10387 
10388 // A special-case pattern for card table stores.
10389 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10390   match(Set dst (URShiftL (CastP2X src1) src2));
10391 
10392   ins_cost(INSN_COST);
10393   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10394 
10395   ins_encode %{
10396     __ lsr(as_Register($dst$$reg),
10397            as_Register($src1$$reg),
10398            $src2$$constant & 0x3f);
10399   %}
10400 
10401   ins_pipe(ialu_reg_shift);
10402 %}
10403 
10404 // Shift Right Arithmetic Register
10405 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10406   match(Set dst (RShiftL src1 src2));
10407 
10408   ins_cost(INSN_COST * 2);
10409   format %{ "asrv  $dst, $src1, $src2" %}
10410 
10411   ins_encode %{
10412     __ asrv(as_Register($dst$$reg),
10413             as_Register($src1$$reg),
10414             as_Register($src2$$reg));
10415   %}
10416 
10417   ins_pipe(ialu_reg_reg_vshift);
10418 %}
10419 
10420 // Shift Right Arithmetic Immediate
10421 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10422   match(Set dst (RShiftL src1 src2));
10423 
10424   ins_cost(INSN_COST);
10425   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10426 
10427   ins_encode %{
10428     __ asr(as_Register($dst$$reg),
10429            as_Register($src1$$reg),
10430            $src2$$constant & 0x3f);
10431   %}
10432 
10433   ins_pipe(ialu_reg_shift);
10434 %}
10435 
10436 // BEGIN This section of the file is automatically generated. Do not edit --------------
10437 
10438 instruct regL_not_reg(iRegLNoSp dst,
10439                          iRegL src1, immL_M1 m1,
10440                          rFlagsReg cr) %{
10441   match(Set dst (XorL src1 m1));
10442   ins_cost(INSN_COST);
10443   format %{ "eon  $dst, $src1, zr" %}
10444 
10445   ins_encode %{
10446     __ eon(as_Register($dst$$reg),
10447               as_Register($src1$$reg),
10448               zr,
10449               Assembler::LSL, 0);
10450   %}
10451 
10452   ins_pipe(ialu_reg);
10453 %}
10454 instruct regI_not_reg(iRegINoSp dst,
10455                          iRegIorL2I src1, immI_M1 m1,
10456                          rFlagsReg cr) %{
10457   match(Set dst (XorI src1 m1));
10458   ins_cost(INSN_COST);
10459   format %{ "eonw  $dst, $src1, zr" %}
10460 
10461   ins_encode %{
10462     __ eonw(as_Register($dst$$reg),
10463               as_Register($src1$$reg),
10464               zr,
10465               Assembler::LSL, 0);
10466   %}
10467 
10468   ins_pipe(ialu_reg);
10469 %}
10470 
10471 instruct AndI_reg_not_reg(iRegINoSp dst,
10472                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10473                          rFlagsReg cr) %{
10474   match(Set dst (AndI src1 (XorI src2 m1)));
10475   ins_cost(INSN_COST);
10476   format %{ "bicw  $dst, $src1, $src2" %}
10477 
10478   ins_encode %{
10479     __ bicw(as_Register($dst$$reg),
10480               as_Register($src1$$reg),
10481               as_Register($src2$$reg),
10482               Assembler::LSL, 0);
10483   %}
10484 
10485   ins_pipe(ialu_reg_reg);
10486 %}
10487 
10488 instruct AndL_reg_not_reg(iRegLNoSp dst,
10489                          iRegL src1, iRegL src2, immL_M1 m1,
10490                          rFlagsReg cr) %{
10491   match(Set dst (AndL src1 (XorL src2 m1)));
10492   ins_cost(INSN_COST);
10493   format %{ "bic  $dst, $src1, $src2" %}
10494 
10495   ins_encode %{
10496     __ bic(as_Register($dst$$reg),
10497               as_Register($src1$$reg),
10498               as_Register($src2$$reg),
10499               Assembler::LSL, 0);
10500   %}
10501 
10502   ins_pipe(ialu_reg_reg);
10503 %}
10504 
10505 instruct OrI_reg_not_reg(iRegINoSp dst,
10506                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10507                          rFlagsReg cr) %{
10508   match(Set dst (OrI src1 (XorI src2 m1)));
10509   ins_cost(INSN_COST);
10510   format %{ "ornw  $dst, $src1, $src2" %}
10511 
10512   ins_encode %{
10513     __ ornw(as_Register($dst$$reg),
10514               as_Register($src1$$reg),
10515               as_Register($src2$$reg),
10516               Assembler::LSL, 0);
10517   %}
10518 
10519   ins_pipe(ialu_reg_reg);
10520 %}
10521 
10522 instruct OrL_reg_not_reg(iRegLNoSp dst,
10523                          iRegL src1, iRegL src2, immL_M1 m1,
10524                          rFlagsReg cr) %{
10525   match(Set dst (OrL src1 (XorL src2 m1)));
10526   ins_cost(INSN_COST);
10527   format %{ "orn  $dst, $src1, $src2" %}
10528 
10529   ins_encode %{
10530     __ orn(as_Register($dst$$reg),
10531               as_Register($src1$$reg),
10532               as_Register($src2$$reg),
10533               Assembler::LSL, 0);
10534   %}
10535 
10536   ins_pipe(ialu_reg_reg);
10537 %}
10538 
10539 instruct XorI_reg_not_reg(iRegINoSp dst,
10540                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10541                          rFlagsReg cr) %{
10542   match(Set dst (XorI m1 (XorI src2 src1)));
10543   ins_cost(INSN_COST);
10544   format %{ "eonw  $dst, $src1, $src2" %}
10545 
10546   ins_encode %{
10547     __ eonw(as_Register($dst$$reg),
10548               as_Register($src1$$reg),
10549               as_Register($src2$$reg),
10550               Assembler::LSL, 0);
10551   %}
10552 
10553   ins_pipe(ialu_reg_reg);
10554 %}
10555 
10556 instruct XorL_reg_not_reg(iRegLNoSp dst,
10557                          iRegL src1, iRegL src2, immL_M1 m1,
10558                          rFlagsReg cr) %{
10559   match(Set dst (XorL m1 (XorL src2 src1)));
10560   ins_cost(INSN_COST);
10561   format %{ "eon  $dst, $src1, $src2" %}
10562 
10563   ins_encode %{
10564     __ eon(as_Register($dst$$reg),
10565               as_Register($src1$$reg),
10566               as_Register($src2$$reg),
10567               Assembler::LSL, 0);
10568   %}
10569 
10570   ins_pipe(ialu_reg_reg);
10571 %}
10572 
10573 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10574                          iRegIorL2I src1, iRegIorL2I src2,
10575                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10576   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10577   ins_cost(1.9 * INSN_COST);
10578   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10579 
10580   ins_encode %{
10581     __ bicw(as_Register($dst$$reg),
10582               as_Register($src1$$reg),
10583               as_Register($src2$$reg),
10584               Assembler::LSR,
10585               $src3$$constant & 0x1f);
10586   %}
10587 
10588   ins_pipe(ialu_reg_reg_shift);
10589 %}
10590 
10591 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10592                          iRegL src1, iRegL src2,
10593                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10594   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10595   ins_cost(1.9 * INSN_COST);
10596   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10597 
10598   ins_encode %{
10599     __ bic(as_Register($dst$$reg),
10600               as_Register($src1$$reg),
10601               as_Register($src2$$reg),
10602               Assembler::LSR,
10603               $src3$$constant & 0x3f);
10604   %}
10605 
10606   ins_pipe(ialu_reg_reg_shift);
10607 %}
10608 
10609 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10610                          iRegIorL2I src1, iRegIorL2I src2,
10611                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10612   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10613   ins_cost(1.9 * INSN_COST);
10614   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10615 
10616   ins_encode %{
10617     __ bicw(as_Register($dst$$reg),
10618               as_Register($src1$$reg),
10619               as_Register($src2$$reg),
10620               Assembler::ASR,
10621               $src3$$constant & 0x1f);
10622   %}
10623 
10624   ins_pipe(ialu_reg_reg_shift);
10625 %}
10626 
10627 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10628                          iRegL src1, iRegL src2,
10629                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10630   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10631   ins_cost(1.9 * INSN_COST);
10632   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10633 
10634   ins_encode %{
10635     __ bic(as_Register($dst$$reg),
10636               as_Register($src1$$reg),
10637               as_Register($src2$$reg),
10638               Assembler::ASR,
10639               $src3$$constant & 0x3f);
10640   %}
10641 
10642   ins_pipe(ialu_reg_reg_shift);
10643 %}
10644 
10645 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10646                          iRegIorL2I src1, iRegIorL2I src2,
10647                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10648   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10649   ins_cost(1.9 * INSN_COST);
10650   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10651 
10652   ins_encode %{
10653     __ bicw(as_Register($dst$$reg),
10654               as_Register($src1$$reg),
10655               as_Register($src2$$reg),
10656               Assembler::LSL,
10657               $src3$$constant & 0x1f);
10658   %}
10659 
10660   ins_pipe(ialu_reg_reg_shift);
10661 %}
10662 
10663 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10664                          iRegL src1, iRegL src2,
10665                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10666   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10667   ins_cost(1.9 * INSN_COST);
10668   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10669 
10670   ins_encode %{
10671     __ bic(as_Register($dst$$reg),
10672               as_Register($src1$$reg),
10673               as_Register($src2$$reg),
10674               Assembler::LSL,
10675               $src3$$constant & 0x3f);
10676   %}
10677 
10678   ins_pipe(ialu_reg_reg_shift);
10679 %}
10680 
10681 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10682                          iRegIorL2I src1, iRegIorL2I src2,
10683                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10684   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10685   ins_cost(1.9 * INSN_COST);
10686   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10687 
10688   ins_encode %{
10689     __ eonw(as_Register($dst$$reg),
10690               as_Register($src1$$reg),
10691               as_Register($src2$$reg),
10692               Assembler::LSR,
10693               $src3$$constant & 0x1f);
10694   %}
10695 
10696   ins_pipe(ialu_reg_reg_shift);
10697 %}
10698 
10699 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10700                          iRegL src1, iRegL src2,
10701                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10702   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10703   ins_cost(1.9 * INSN_COST);
10704   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10705 
10706   ins_encode %{
10707     __ eon(as_Register($dst$$reg),
10708               as_Register($src1$$reg),
10709               as_Register($src2$$reg),
10710               Assembler::LSR,
10711               $src3$$constant & 0x3f);
10712   %}
10713 
10714   ins_pipe(ialu_reg_reg_shift);
10715 %}
10716 
10717 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10718                          iRegIorL2I src1, iRegIorL2I src2,
10719                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10720   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10721   ins_cost(1.9 * INSN_COST);
10722   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10723 
10724   ins_encode %{
10725     __ eonw(as_Register($dst$$reg),
10726               as_Register($src1$$reg),
10727               as_Register($src2$$reg),
10728               Assembler::ASR,
10729               $src3$$constant & 0x1f);
10730   %}
10731 
10732   ins_pipe(ialu_reg_reg_shift);
10733 %}
10734 
10735 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10736                          iRegL src1, iRegL src2,
10737                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10738   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10739   ins_cost(1.9 * INSN_COST);
10740   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10741 
10742   ins_encode %{
10743     __ eon(as_Register($dst$$reg),
10744               as_Register($src1$$reg),
10745               as_Register($src2$$reg),
10746               Assembler::ASR,
10747               $src3$$constant & 0x3f);
10748   %}
10749 
10750   ins_pipe(ialu_reg_reg_shift);
10751 %}
10752 
10753 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10754                          iRegIorL2I src1, iRegIorL2I src2,
10755                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10756   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10757   ins_cost(1.9 * INSN_COST);
10758   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10759 
10760   ins_encode %{
10761     __ eonw(as_Register($dst$$reg),
10762               as_Register($src1$$reg),
10763               as_Register($src2$$reg),
10764               Assembler::LSL,
10765               $src3$$constant & 0x1f);
10766   %}
10767 
10768   ins_pipe(ialu_reg_reg_shift);
10769 %}
10770 
10771 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10772                          iRegL src1, iRegL src2,
10773                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10774   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10775   ins_cost(1.9 * INSN_COST);
10776   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10777 
10778   ins_encode %{
10779     __ eon(as_Register($dst$$reg),
10780               as_Register($src1$$reg),
10781               as_Register($src2$$reg),
10782               Assembler::LSL,
10783               $src3$$constant & 0x3f);
10784   %}
10785 
10786   ins_pipe(ialu_reg_reg_shift);
10787 %}
10788 
10789 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10790                          iRegIorL2I src1, iRegIorL2I src2,
10791                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10792   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10793   ins_cost(1.9 * INSN_COST);
10794   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10795 
10796   ins_encode %{
10797     __ ornw(as_Register($dst$$reg),
10798               as_Register($src1$$reg),
10799               as_Register($src2$$reg),
10800               Assembler::LSR,
10801               $src3$$constant & 0x1f);
10802   %}
10803 
10804   ins_pipe(ialu_reg_reg_shift);
10805 %}
10806 
10807 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10808                          iRegL src1, iRegL src2,
10809                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10810   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10811   ins_cost(1.9 * INSN_COST);
10812   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10813 
10814   ins_encode %{
10815     __ orn(as_Register($dst$$reg),
10816               as_Register($src1$$reg),
10817               as_Register($src2$$reg),
10818               Assembler::LSR,
10819               $src3$$constant & 0x3f);
10820   %}
10821 
10822   ins_pipe(ialu_reg_reg_shift);
10823 %}
10824 
10825 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10826                          iRegIorL2I src1, iRegIorL2I src2,
10827                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10828   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10829   ins_cost(1.9 * INSN_COST);
10830   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10831 
10832   ins_encode %{
10833     __ ornw(as_Register($dst$$reg),
10834               as_Register($src1$$reg),
10835               as_Register($src2$$reg),
10836               Assembler::ASR,
10837               $src3$$constant & 0x1f);
10838   %}
10839 
10840   ins_pipe(ialu_reg_reg_shift);
10841 %}
10842 
10843 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10844                          iRegL src1, iRegL src2,
10845                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10846   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10847   ins_cost(1.9 * INSN_COST);
10848   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10849 
10850   ins_encode %{
10851     __ orn(as_Register($dst$$reg),
10852               as_Register($src1$$reg),
10853               as_Register($src2$$reg),
10854               Assembler::ASR,
10855               $src3$$constant & 0x3f);
10856   %}
10857 
10858   ins_pipe(ialu_reg_reg_shift);
10859 %}
10860 
10861 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10862                          iRegIorL2I src1, iRegIorL2I src2,
10863                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10864   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10865   ins_cost(1.9 * INSN_COST);
10866   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10867 
10868   ins_encode %{
10869     __ ornw(as_Register($dst$$reg),
10870               as_Register($src1$$reg),
10871               as_Register($src2$$reg),
10872               Assembler::LSL,
10873               $src3$$constant & 0x1f);
10874   %}
10875 
10876   ins_pipe(ialu_reg_reg_shift);
10877 %}
10878 
10879 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10880                          iRegL src1, iRegL src2,
10881                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10882   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10883   ins_cost(1.9 * INSN_COST);
10884   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10885 
10886   ins_encode %{
10887     __ orn(as_Register($dst$$reg),
10888               as_Register($src1$$reg),
10889               as_Register($src2$$reg),
10890               Assembler::LSL,
10891               $src3$$constant & 0x3f);
10892   %}
10893 
10894   ins_pipe(ialu_reg_reg_shift);
10895 %}
10896 
10897 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10898                          iRegIorL2I src1, iRegIorL2I src2,
10899                          immI src3, rFlagsReg cr) %{
10900   match(Set dst (AndI src1 (URShiftI src2 src3)));
10901 
10902   ins_cost(1.9 * INSN_COST);
10903   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10904 
10905   ins_encode %{
10906     __ andw(as_Register($dst$$reg),
10907               as_Register($src1$$reg),
10908               as_Register($src2$$reg),
10909               Assembler::LSR,
10910               $src3$$constant & 0x1f);
10911   %}
10912 
10913   ins_pipe(ialu_reg_reg_shift);
10914 %}
10915 
10916 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10917                          iRegL src1, iRegL src2,
10918                          immI src3, rFlagsReg cr) %{
10919   match(Set dst (AndL src1 (URShiftL src2 src3)));
10920 
10921   ins_cost(1.9 * INSN_COST);
10922   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10923 
10924   ins_encode %{
10925     __ andr(as_Register($dst$$reg),
10926               as_Register($src1$$reg),
10927               as_Register($src2$$reg),
10928               Assembler::LSR,
10929               $src3$$constant & 0x3f);
10930   %}
10931 
10932   ins_pipe(ialu_reg_reg_shift);
10933 %}
10934 
10935 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10936                          iRegIorL2I src1, iRegIorL2I src2,
10937                          immI src3, rFlagsReg cr) %{
10938   match(Set dst (AndI src1 (RShiftI src2 src3)));
10939 
10940   ins_cost(1.9 * INSN_COST);
10941   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10942 
10943   ins_encode %{
10944     __ andw(as_Register($dst$$reg),
10945               as_Register($src1$$reg),
10946               as_Register($src2$$reg),
10947               Assembler::ASR,
10948               $src3$$constant & 0x1f);
10949   %}
10950 
10951   ins_pipe(ialu_reg_reg_shift);
10952 %}
10953 
10954 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10955                          iRegL src1, iRegL src2,
10956                          immI src3, rFlagsReg cr) %{
10957   match(Set dst (AndL src1 (RShiftL src2 src3)));
10958 
10959   ins_cost(1.9 * INSN_COST);
10960   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10961 
10962   ins_encode %{
10963     __ andr(as_Register($dst$$reg),
10964               as_Register($src1$$reg),
10965               as_Register($src2$$reg),
10966               Assembler::ASR,
10967               $src3$$constant & 0x3f);
10968   %}
10969 
10970   ins_pipe(ialu_reg_reg_shift);
10971 %}
10972 
10973 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10974                          iRegIorL2I src1, iRegIorL2I src2,
10975                          immI src3, rFlagsReg cr) %{
10976   match(Set dst (AndI src1 (LShiftI src2 src3)));
10977 
10978   ins_cost(1.9 * INSN_COST);
10979   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10980 
10981   ins_encode %{
10982     __ andw(as_Register($dst$$reg),
10983               as_Register($src1$$reg),
10984               as_Register($src2$$reg),
10985               Assembler::LSL,
10986               $src3$$constant & 0x1f);
10987   %}
10988 
10989   ins_pipe(ialu_reg_reg_shift);
10990 %}
10991 
10992 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10993                          iRegL src1, iRegL src2,
10994                          immI src3, rFlagsReg cr) %{
10995   match(Set dst (AndL src1 (LShiftL src2 src3)));
10996 
10997   ins_cost(1.9 * INSN_COST);
10998   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10999 
11000   ins_encode %{
11001     __ andr(as_Register($dst$$reg),
11002               as_Register($src1$$reg),
11003               as_Register($src2$$reg),
11004               Assembler::LSL,
11005               $src3$$constant & 0x3f);
11006   %}
11007 
11008   ins_pipe(ialu_reg_reg_shift);
11009 %}
11010 
11011 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11012                          iRegIorL2I src1, iRegIorL2I src2,
11013                          immI src3, rFlagsReg cr) %{
11014   match(Set dst (XorI src1 (URShiftI src2 src3)));
11015 
11016   ins_cost(1.9 * INSN_COST);
11017   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11018 
11019   ins_encode %{
11020     __ eorw(as_Register($dst$$reg),
11021               as_Register($src1$$reg),
11022               as_Register($src2$$reg),
11023               Assembler::LSR,
11024               $src3$$constant & 0x1f);
11025   %}
11026 
11027   ins_pipe(ialu_reg_reg_shift);
11028 %}
11029 
11030 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11031                          iRegL src1, iRegL src2,
11032                          immI src3, rFlagsReg cr) %{
11033   match(Set dst (XorL src1 (URShiftL src2 src3)));
11034 
11035   ins_cost(1.9 * INSN_COST);
11036   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11037 
11038   ins_encode %{
11039     __ eor(as_Register($dst$$reg),
11040               as_Register($src1$$reg),
11041               as_Register($src2$$reg),
11042               Assembler::LSR,
11043               $src3$$constant & 0x3f);
11044   %}
11045 
11046   ins_pipe(ialu_reg_reg_shift);
11047 %}
11048 
11049 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11050                          iRegIorL2I src1, iRegIorL2I src2,
11051                          immI src3, rFlagsReg cr) %{
11052   match(Set dst (XorI src1 (RShiftI src2 src3)));
11053 
11054   ins_cost(1.9 * INSN_COST);
11055   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11056 
11057   ins_encode %{
11058     __ eorw(as_Register($dst$$reg),
11059               as_Register($src1$$reg),
11060               as_Register($src2$$reg),
11061               Assembler::ASR,
11062               $src3$$constant & 0x1f);
11063   %}
11064 
11065   ins_pipe(ialu_reg_reg_shift);
11066 %}
11067 
11068 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11069                          iRegL src1, iRegL src2,
11070                          immI src3, rFlagsReg cr) %{
11071   match(Set dst (XorL src1 (RShiftL src2 src3)));
11072 
11073   ins_cost(1.9 * INSN_COST);
11074   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11075 
11076   ins_encode %{
11077     __ eor(as_Register($dst$$reg),
11078               as_Register($src1$$reg),
11079               as_Register($src2$$reg),
11080               Assembler::ASR,
11081               $src3$$constant & 0x3f);
11082   %}
11083 
11084   ins_pipe(ialu_reg_reg_shift);
11085 %}
11086 
11087 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11088                          iRegIorL2I src1, iRegIorL2I src2,
11089                          immI src3, rFlagsReg cr) %{
11090   match(Set dst (XorI src1 (LShiftI src2 src3)));
11091 
11092   ins_cost(1.9 * INSN_COST);
11093   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11094 
11095   ins_encode %{
11096     __ eorw(as_Register($dst$$reg),
11097               as_Register($src1$$reg),
11098               as_Register($src2$$reg),
11099               Assembler::LSL,
11100               $src3$$constant & 0x1f);
11101   %}
11102 
11103   ins_pipe(ialu_reg_reg_shift);
11104 %}
11105 
11106 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11107                          iRegL src1, iRegL src2,
11108                          immI src3, rFlagsReg cr) %{
11109   match(Set dst (XorL src1 (LShiftL src2 src3)));
11110 
11111   ins_cost(1.9 * INSN_COST);
11112   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11113 
11114   ins_encode %{
11115     __ eor(as_Register($dst$$reg),
11116               as_Register($src1$$reg),
11117               as_Register($src2$$reg),
11118               Assembler::LSL,
11119               $src3$$constant & 0x3f);
11120   %}
11121 
11122   ins_pipe(ialu_reg_reg_shift);
11123 %}
11124 
11125 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11126                          iRegIorL2I src1, iRegIorL2I src2,
11127                          immI src3, rFlagsReg cr) %{
11128   match(Set dst (OrI src1 (URShiftI src2 src3)));
11129 
11130   ins_cost(1.9 * INSN_COST);
11131   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11132 
11133   ins_encode %{
11134     __ orrw(as_Register($dst$$reg),
11135               as_Register($src1$$reg),
11136               as_Register($src2$$reg),
11137               Assembler::LSR,
11138               $src3$$constant & 0x1f);
11139   %}
11140 
11141   ins_pipe(ialu_reg_reg_shift);
11142 %}
11143 
11144 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11145                          iRegL src1, iRegL src2,
11146                          immI src3, rFlagsReg cr) %{
11147   match(Set dst (OrL src1 (URShiftL src2 src3)));
11148 
11149   ins_cost(1.9 * INSN_COST);
11150   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11151 
11152   ins_encode %{
11153     __ orr(as_Register($dst$$reg),
11154               as_Register($src1$$reg),
11155               as_Register($src2$$reg),
11156               Assembler::LSR,
11157               $src3$$constant & 0x3f);
11158   %}
11159 
11160   ins_pipe(ialu_reg_reg_shift);
11161 %}
11162 
11163 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11164                          iRegIorL2I src1, iRegIorL2I src2,
11165                          immI src3, rFlagsReg cr) %{
11166   match(Set dst (OrI src1 (RShiftI src2 src3)));
11167 
11168   ins_cost(1.9 * INSN_COST);
11169   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11170 
11171   ins_encode %{
11172     __ orrw(as_Register($dst$$reg),
11173               as_Register($src1$$reg),
11174               as_Register($src2$$reg),
11175               Assembler::ASR,
11176               $src3$$constant & 0x1f);
11177   %}
11178 
11179   ins_pipe(ialu_reg_reg_shift);
11180 %}
11181 
11182 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11183                          iRegL src1, iRegL src2,
11184                          immI src3, rFlagsReg cr) %{
11185   match(Set dst (OrL src1 (RShiftL src2 src3)));
11186 
11187   ins_cost(1.9 * INSN_COST);
11188   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11189 
11190   ins_encode %{
11191     __ orr(as_Register($dst$$reg),
11192               as_Register($src1$$reg),
11193               as_Register($src2$$reg),
11194               Assembler::ASR,
11195               $src3$$constant & 0x3f);
11196   %}
11197 
11198   ins_pipe(ialu_reg_reg_shift);
11199 %}
11200 
11201 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11202                          iRegIorL2I src1, iRegIorL2I src2,
11203                          immI src3, rFlagsReg cr) %{
11204   match(Set dst (OrI src1 (LShiftI src2 src3)));
11205 
11206   ins_cost(1.9 * INSN_COST);
11207   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11208 
11209   ins_encode %{
11210     __ orrw(as_Register($dst$$reg),
11211               as_Register($src1$$reg),
11212               as_Register($src2$$reg),
11213               Assembler::LSL,
11214               $src3$$constant & 0x1f);
11215   %}
11216 
11217   ins_pipe(ialu_reg_reg_shift);
11218 %}
11219 
11220 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11221                          iRegL src1, iRegL src2,
11222                          immI src3, rFlagsReg cr) %{
11223   match(Set dst (OrL src1 (LShiftL src2 src3)));
11224 
11225   ins_cost(1.9 * INSN_COST);
11226   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11227 
11228   ins_encode %{
11229     __ orr(as_Register($dst$$reg),
11230               as_Register($src1$$reg),
11231               as_Register($src2$$reg),
11232               Assembler::LSL,
11233               $src3$$constant & 0x3f);
11234   %}
11235 
11236   ins_pipe(ialu_reg_reg_shift);
11237 %}
11238 
11239 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11240                          iRegIorL2I src1, iRegIorL2I src2,
11241                          immI src3, rFlagsReg cr) %{
11242   match(Set dst (AddI src1 (URShiftI src2 src3)));
11243 
11244   ins_cost(1.9 * INSN_COST);
11245   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11246 
11247   ins_encode %{
11248     __ addw(as_Register($dst$$reg),
11249               as_Register($src1$$reg),
11250               as_Register($src2$$reg),
11251               Assembler::LSR,
11252               $src3$$constant & 0x1f);
11253   %}
11254 
11255   ins_pipe(ialu_reg_reg_shift);
11256 %}
11257 
11258 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11259                          iRegL src1, iRegL src2,
11260                          immI src3, rFlagsReg cr) %{
11261   match(Set dst (AddL src1 (URShiftL src2 src3)));
11262 
11263   ins_cost(1.9 * INSN_COST);
11264   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11265 
11266   ins_encode %{
11267     __ add(as_Register($dst$$reg),
11268               as_Register($src1$$reg),
11269               as_Register($src2$$reg),
11270               Assembler::LSR,
11271               $src3$$constant & 0x3f);
11272   %}
11273 
11274   ins_pipe(ialu_reg_reg_shift);
11275 %}
11276 
11277 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11278                          iRegIorL2I src1, iRegIorL2I src2,
11279                          immI src3, rFlagsReg cr) %{
11280   match(Set dst (AddI src1 (RShiftI src2 src3)));
11281 
11282   ins_cost(1.9 * INSN_COST);
11283   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11284 
11285   ins_encode %{
11286     __ addw(as_Register($dst$$reg),
11287               as_Register($src1$$reg),
11288               as_Register($src2$$reg),
11289               Assembler::ASR,
11290               $src3$$constant & 0x1f);
11291   %}
11292 
11293   ins_pipe(ialu_reg_reg_shift);
11294 %}
11295 
11296 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11297                          iRegL src1, iRegL src2,
11298                          immI src3, rFlagsReg cr) %{
11299   match(Set dst (AddL src1 (RShiftL src2 src3)));
11300 
11301   ins_cost(1.9 * INSN_COST);
11302   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11303 
11304   ins_encode %{
11305     __ add(as_Register($dst$$reg),
11306               as_Register($src1$$reg),
11307               as_Register($src2$$reg),
11308               Assembler::ASR,
11309               $src3$$constant & 0x3f);
11310   %}
11311 
11312   ins_pipe(ialu_reg_reg_shift);
11313 %}
11314 
11315 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11316                          iRegIorL2I src1, iRegIorL2I src2,
11317                          immI src3, rFlagsReg cr) %{
11318   match(Set dst (AddI src1 (LShiftI src2 src3)));
11319 
11320   ins_cost(1.9 * INSN_COST);
11321   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11322 
11323   ins_encode %{
11324     __ addw(as_Register($dst$$reg),
11325               as_Register($src1$$reg),
11326               as_Register($src2$$reg),
11327               Assembler::LSL,
11328               $src3$$constant & 0x1f);
11329   %}
11330 
11331   ins_pipe(ialu_reg_reg_shift);
11332 %}
11333 
11334 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11335                          iRegL src1, iRegL src2,
11336                          immI src3, rFlagsReg cr) %{
11337   match(Set dst (AddL src1 (LShiftL src2 src3)));
11338 
11339   ins_cost(1.9 * INSN_COST);
11340   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11341 
11342   ins_encode %{
11343     __ add(as_Register($dst$$reg),
11344               as_Register($src1$$reg),
11345               as_Register($src2$$reg),
11346               Assembler::LSL,
11347               $src3$$constant & 0x3f);
11348   %}
11349 
11350   ins_pipe(ialu_reg_reg_shift);
11351 %}
11352 
11353 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11354                          iRegIorL2I src1, iRegIorL2I src2,
11355                          immI src3, rFlagsReg cr) %{
11356   match(Set dst (SubI src1 (URShiftI src2 src3)));
11357 
11358   ins_cost(1.9 * INSN_COST);
11359   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11360 
11361   ins_encode %{
11362     __ subw(as_Register($dst$$reg),
11363               as_Register($src1$$reg),
11364               as_Register($src2$$reg),
11365               Assembler::LSR,
11366               $src3$$constant & 0x1f);
11367   %}
11368 
11369   ins_pipe(ialu_reg_reg_shift);
11370 %}
11371 
11372 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11373                          iRegL src1, iRegL src2,
11374                          immI src3, rFlagsReg cr) %{
11375   match(Set dst (SubL src1 (URShiftL src2 src3)));
11376 
11377   ins_cost(1.9 * INSN_COST);
11378   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11379 
11380   ins_encode %{
11381     __ sub(as_Register($dst$$reg),
11382               as_Register($src1$$reg),
11383               as_Register($src2$$reg),
11384               Assembler::LSR,
11385               $src3$$constant & 0x3f);
11386   %}
11387 
11388   ins_pipe(ialu_reg_reg_shift);
11389 %}
11390 
11391 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11392                          iRegIorL2I src1, iRegIorL2I src2,
11393                          immI src3, rFlagsReg cr) %{
11394   match(Set dst (SubI src1 (RShiftI src2 src3)));
11395 
11396   ins_cost(1.9 * INSN_COST);
11397   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11398 
11399   ins_encode %{
11400     __ subw(as_Register($dst$$reg),
11401               as_Register($src1$$reg),
11402               as_Register($src2$$reg),
11403               Assembler::ASR,
11404               $src3$$constant & 0x1f);
11405   %}
11406 
11407   ins_pipe(ialu_reg_reg_shift);
11408 %}
11409 
11410 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11411                          iRegL src1, iRegL src2,
11412                          immI src3, rFlagsReg cr) %{
11413   match(Set dst (SubL src1 (RShiftL src2 src3)));
11414 
11415   ins_cost(1.9 * INSN_COST);
11416   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11417 
11418   ins_encode %{
11419     __ sub(as_Register($dst$$reg),
11420               as_Register($src1$$reg),
11421               as_Register($src2$$reg),
11422               Assembler::ASR,
11423               $src3$$constant & 0x3f);
11424   %}
11425 
11426   ins_pipe(ialu_reg_reg_shift);
11427 %}
11428 
11429 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11430                          iRegIorL2I src1, iRegIorL2I src2,
11431                          immI src3, rFlagsReg cr) %{
11432   match(Set dst (SubI src1 (LShiftI src2 src3)));
11433 
11434   ins_cost(1.9 * INSN_COST);
11435   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11436 
11437   ins_encode %{
11438     __ subw(as_Register($dst$$reg),
11439               as_Register($src1$$reg),
11440               as_Register($src2$$reg),
11441               Assembler::LSL,
11442               $src3$$constant & 0x1f);
11443   %}
11444 
11445   ins_pipe(ialu_reg_reg_shift);
11446 %}
11447 
11448 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11449                          iRegL src1, iRegL src2,
11450                          immI src3, rFlagsReg cr) %{
11451   match(Set dst (SubL src1 (LShiftL src2 src3)));
11452 
11453   ins_cost(1.9 * INSN_COST);
11454   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11455 
11456   ins_encode %{
11457     __ sub(as_Register($dst$$reg),
11458               as_Register($src1$$reg),
11459               as_Register($src2$$reg),
11460               Assembler::LSL,
11461               $src3$$constant & 0x3f);
11462   %}
11463 
11464   ins_pipe(ialu_reg_reg_shift);
11465 %}
11466 
11467 
11468 
11469 // Shift Left followed by Shift Right.
11470 // This idiom is used by the compiler for the i2b bytecode etc.
11471 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11472 %{
11473   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11474   // Make sure we are not going to exceed what sbfm can do.
11475   predicate((unsigned int)n->in(2)->get_int() <= 63
11476             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11477 
11478   ins_cost(INSN_COST * 2);
11479   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11480   ins_encode %{
11481     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11482     int s = 63 - lshift;
11483     int r = (rshift - lshift) & 63;
11484     __ sbfm(as_Register($dst$$reg),
11485             as_Register($src$$reg),
11486             r, s);
11487   %}
11488 
11489   ins_pipe(ialu_reg_shift);
11490 %}
11491 
11492 // Shift Left followed by Shift Right.
11493 // This idiom is used by the compiler for the i2b bytecode etc.
11494 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11495 %{
11496   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11497   // Make sure we are not going to exceed what sbfmw can do.
11498   predicate((unsigned int)n->in(2)->get_int() <= 31
11499             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11500 
11501   ins_cost(INSN_COST * 2);
11502   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11503   ins_encode %{
11504     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11505     int s = 31 - lshift;
11506     int r = (rshift - lshift) & 31;
11507     __ sbfmw(as_Register($dst$$reg),
11508             as_Register($src$$reg),
11509             r, s);
11510   %}
11511 
11512   ins_pipe(ialu_reg_shift);
11513 %}
11514 
11515 // Shift Left followed by Shift Right.
11516 // This idiom is used by the compiler for the i2b bytecode etc.
11517 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11518 %{
11519   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11520   // Make sure we are not going to exceed what ubfm can do.
11521   predicate((unsigned int)n->in(2)->get_int() <= 63
11522             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11523 
11524   ins_cost(INSN_COST * 2);
11525   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11526   ins_encode %{
11527     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11528     int s = 63 - lshift;
11529     int r = (rshift - lshift) & 63;
11530     __ ubfm(as_Register($dst$$reg),
11531             as_Register($src$$reg),
11532             r, s);
11533   %}
11534 
11535   ins_pipe(ialu_reg_shift);
11536 %}
11537 
11538 // Shift Left followed by Shift Right.
11539 // This idiom is used by the compiler for the i2b bytecode etc.
11540 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11541 %{
11542   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11543   // Make sure we are not going to exceed what ubfmw can do.
11544   predicate((unsigned int)n->in(2)->get_int() <= 31
11545             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11546 
11547   ins_cost(INSN_COST * 2);
11548   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11549   ins_encode %{
11550     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11551     int s = 31 - lshift;
11552     int r = (rshift - lshift) & 31;
11553     __ ubfmw(as_Register($dst$$reg),
11554             as_Register($src$$reg),
11555             r, s);
11556   %}
11557 
11558   ins_pipe(ialu_reg_shift);
11559 %}
11560 // Bitfield extract with shift & mask
11561 
11562 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11563 %{
11564   match(Set dst (AndI (URShiftI src rshift) mask));
11565 
11566   ins_cost(INSN_COST);
11567   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11568   ins_encode %{
11569     int rshift = $rshift$$constant;
11570     long mask = $mask$$constant;
11571     int width = exact_log2(mask+1);
11572     __ ubfxw(as_Register($dst$$reg),
11573             as_Register($src$$reg), rshift, width);
11574   %}
11575   ins_pipe(ialu_reg_shift);
11576 %}
11577 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11578 %{
11579   match(Set dst (AndL (URShiftL src rshift) mask));
11580 
11581   ins_cost(INSN_COST);
11582   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11583   ins_encode %{
11584     int rshift = $rshift$$constant;
11585     long mask = $mask$$constant;
11586     int width = exact_log2(mask+1);
11587     __ ubfx(as_Register($dst$$reg),
11588             as_Register($src$$reg), rshift, width);
11589   %}
11590   ins_pipe(ialu_reg_shift);
11591 %}
11592 
11593 // We can use ubfx when extending an And with a mask when we know mask
11594 // is positive.  We know that because immI_bitmask guarantees it.
11595 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11596 %{
11597   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11598 
11599   ins_cost(INSN_COST * 2);
11600   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11601   ins_encode %{
11602     int rshift = $rshift$$constant;
11603     long mask = $mask$$constant;
11604     int width = exact_log2(mask+1);
11605     __ ubfx(as_Register($dst$$reg),
11606             as_Register($src$$reg), rshift, width);
11607   %}
11608   ins_pipe(ialu_reg_shift);
11609 %}
11610 
11611 // We can use ubfiz when masking by a positive number and then left shifting the result.
11612 // We know that the mask is positive because immI_bitmask guarantees it.
11613 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11614 %{
11615   match(Set dst (LShiftI (AndI src mask) lshift));
11616   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11617     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
11618 
11619   ins_cost(INSN_COST);
11620   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11621   ins_encode %{
11622     int lshift = $lshift$$constant;
11623     long mask = $mask$$constant;
11624     int width = exact_log2(mask+1);
11625     __ ubfizw(as_Register($dst$$reg),
11626           as_Register($src$$reg), lshift, width);
11627   %}
11628   ins_pipe(ialu_reg_shift);
11629 %}
11630 // We can use ubfiz when masking by a positive number and then left shifting the result.
11631 // We know that the mask is positive because immL_bitmask guarantees it.
11632 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11633 %{
11634   match(Set dst (LShiftL (AndL src mask) lshift));
11635   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
11636     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
11637 
11638   ins_cost(INSN_COST);
11639   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11640   ins_encode %{
11641     int lshift = $lshift$$constant;
11642     long mask = $mask$$constant;
11643     int width = exact_log2(mask+1);
11644     __ ubfiz(as_Register($dst$$reg),
11645           as_Register($src$$reg), lshift, width);
11646   %}
11647   ins_pipe(ialu_reg_shift);
11648 %}
11649 
11650 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11651 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11652 %{
11653   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
11654   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11655     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
11656 
11657   ins_cost(INSN_COST);
11658   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11659   ins_encode %{
11660     int lshift = $lshift$$constant;
11661     long mask = $mask$$constant;
11662     int width = exact_log2(mask+1);
11663     __ ubfiz(as_Register($dst$$reg),
11664              as_Register($src$$reg), lshift, width);
11665   %}
11666   ins_pipe(ialu_reg_shift);
11667 %}
11668 
11669 // Rotations
11670 
11671 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11672 %{
11673   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11674   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11675 
11676   ins_cost(INSN_COST);
11677   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11678 
11679   ins_encode %{
11680     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11681             $rshift$$constant & 63);
11682   %}
11683   ins_pipe(ialu_reg_reg_extr);
11684 %}
11685 
11686 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11687 %{
11688   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11689   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11690 
11691   ins_cost(INSN_COST);
11692   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11693 
11694   ins_encode %{
11695     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11696             $rshift$$constant & 31);
11697   %}
11698   ins_pipe(ialu_reg_reg_extr);
11699 %}
11700 
11701 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11702 %{
11703   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11704   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11705 
11706   ins_cost(INSN_COST);
11707   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11708 
11709   ins_encode %{
11710     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11711             $rshift$$constant & 63);
11712   %}
11713   ins_pipe(ialu_reg_reg_extr);
11714 %}
11715 
11716 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11717 %{
11718   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11719   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11720 
11721   ins_cost(INSN_COST);
11722   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11723 
11724   ins_encode %{
11725     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11726             $rshift$$constant & 31);
11727   %}
11728   ins_pipe(ialu_reg_reg_extr);
11729 %}
11730 
11731 
11732 // rol expander
11733 
11734 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11735 %{
11736   effect(DEF dst, USE src, USE shift);
11737 
11738   format %{ "rol    $dst, $src, $shift" %}
11739   ins_cost(INSN_COST * 3);
11740   ins_encode %{
11741     __ subw(rscratch1, zr, as_Register($shift$$reg));
11742     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11743             rscratch1);
11744     %}
11745   ins_pipe(ialu_reg_reg_vshift);
11746 %}
11747 
11748 // rol expander
11749 
11750 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11751 %{
11752   effect(DEF dst, USE src, USE shift);
11753 
11754   format %{ "rol    $dst, $src, $shift" %}
11755   ins_cost(INSN_COST * 3);
11756   ins_encode %{
11757     __ subw(rscratch1, zr, as_Register($shift$$reg));
11758     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11759             rscratch1);
11760     %}
11761   ins_pipe(ialu_reg_reg_vshift);
11762 %}
11763 
11764 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11765 %{
11766   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11767 
11768   expand %{
11769     rolL_rReg(dst, src, shift, cr);
11770   %}
11771 %}
11772 
11773 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11774 %{
11775   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11776 
11777   expand %{
11778     rolL_rReg(dst, src, shift, cr);
11779   %}
11780 %}
11781 
11782 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11783 %{
11784   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11785 
11786   expand %{
11787     rolI_rReg(dst, src, shift, cr);
11788   %}
11789 %}
11790 
11791 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11792 %{
11793   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11794 
11795   expand %{
11796     rolI_rReg(dst, src, shift, cr);
11797   %}
11798 %}
11799 
11800 // ror expander
11801 
11802 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11803 %{
11804   effect(DEF dst, USE src, USE shift);
11805 
11806   format %{ "ror    $dst, $src, $shift" %}
11807   ins_cost(INSN_COST);
11808   ins_encode %{
11809     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11810             as_Register($shift$$reg));
11811     %}
11812   ins_pipe(ialu_reg_reg_vshift);
11813 %}
11814 
11815 // ror expander
11816 
11817 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11818 %{
11819   effect(DEF dst, USE src, USE shift);
11820 
11821   format %{ "ror    $dst, $src, $shift" %}
11822   ins_cost(INSN_COST);
11823   ins_encode %{
11824     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11825             as_Register($shift$$reg));
11826     %}
11827   ins_pipe(ialu_reg_reg_vshift);
11828 %}
11829 
11830 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11831 %{
11832   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11833 
11834   expand %{
11835     rorL_rReg(dst, src, shift, cr);
11836   %}
11837 %}
11838 
11839 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11840 %{
11841   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11842 
11843   expand %{
11844     rorL_rReg(dst, src, shift, cr);
11845   %}
11846 %}
11847 
11848 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11849 %{
11850   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11851 
11852   expand %{
11853     rorI_rReg(dst, src, shift, cr);
11854   %}
11855 %}
11856 
11857 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11858 %{
11859   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11860 
11861   expand %{
11862     rorI_rReg(dst, src, shift, cr);
11863   %}
11864 %}
11865 
11866 // Add/subtract (extended)
11867 
11868 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11869 %{
11870   match(Set dst (AddL src1 (ConvI2L src2)));
11871   ins_cost(INSN_COST);
11872   format %{ "add  $dst, $src1, $src2, sxtw" %}
11873 
11874    ins_encode %{
11875      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11876             as_Register($src2$$reg), ext::sxtw);
11877    %}
11878   ins_pipe(ialu_reg_reg);
11879 %};
11880 
11881 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11882 %{
11883   match(Set dst (SubL src1 (ConvI2L src2)));
11884   ins_cost(INSN_COST);
11885   format %{ "sub  $dst, $src1, $src2, sxtw" %}
11886 
11887    ins_encode %{
11888      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11889             as_Register($src2$$reg), ext::sxtw);
11890    %}
11891   ins_pipe(ialu_reg_reg);
11892 %};
11893 
11894 
11895 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11896 %{
11897   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11898   ins_cost(INSN_COST);
11899   format %{ "add  $dst, $src1, $src2, sxth" %}
11900 
11901    ins_encode %{
11902      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11903             as_Register($src2$$reg), ext::sxth);
11904    %}
11905   ins_pipe(ialu_reg_reg);
11906 %}
11907 
11908 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11909 %{
11910   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11911   ins_cost(INSN_COST);
11912   format %{ "add  $dst, $src1, $src2, sxtb" %}
11913 
11914    ins_encode %{
11915      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11916             as_Register($src2$$reg), ext::sxtb);
11917    %}
11918   ins_pipe(ialu_reg_reg);
11919 %}
11920 
11921 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11922 %{
11923   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11924   ins_cost(INSN_COST);
11925   format %{ "add  $dst, $src1, $src2, uxtb" %}
11926 
11927    ins_encode %{
11928      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11929             as_Register($src2$$reg), ext::uxtb);
11930    %}
11931   ins_pipe(ialu_reg_reg);
11932 %}
11933 
11934 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11935 %{
11936   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11937   ins_cost(INSN_COST);
11938   format %{ "add  $dst, $src1, $src2, sxth" %}
11939 
11940    ins_encode %{
11941      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11942             as_Register($src2$$reg), ext::sxth);
11943    %}
11944   ins_pipe(ialu_reg_reg);
11945 %}
11946 
11947 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11948 %{
11949   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11950   ins_cost(INSN_COST);
11951   format %{ "add  $dst, $src1, $src2, sxtw" %}
11952 
11953    ins_encode %{
11954      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11955             as_Register($src2$$reg), ext::sxtw);
11956    %}
11957   ins_pipe(ialu_reg_reg);
11958 %}
11959 
11960 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11961 %{
11962   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11963   ins_cost(INSN_COST);
11964   format %{ "add  $dst, $src1, $src2, sxtb" %}
11965 
11966    ins_encode %{
11967      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11968             as_Register($src2$$reg), ext::sxtb);
11969    %}
11970   ins_pipe(ialu_reg_reg);
11971 %}
11972 
11973 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11974 %{
11975   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11976   ins_cost(INSN_COST);
11977   format %{ "add  $dst, $src1, $src2, uxtb" %}
11978 
11979    ins_encode %{
11980      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11981             as_Register($src2$$reg), ext::uxtb);
11982    %}
11983   ins_pipe(ialu_reg_reg);
11984 %}
11985 
11986 
11987 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11988 %{
11989   match(Set dst (AddI src1 (AndI src2 mask)));
11990   ins_cost(INSN_COST);
11991   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11992 
11993    ins_encode %{
11994      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11995             as_Register($src2$$reg), ext::uxtb);
11996    %}
11997   ins_pipe(ialu_reg_reg);
11998 %}
11999 
12000 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12001 %{
12002   match(Set dst (AddI src1 (AndI src2 mask)));
12003   ins_cost(INSN_COST);
12004   format %{ "addw  $dst, $src1, $src2, uxth" %}
12005 
12006    ins_encode %{
12007      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12008             as_Register($src2$$reg), ext::uxth);
12009    %}
12010   ins_pipe(ialu_reg_reg);
12011 %}
12012 
12013 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12014 %{
12015   match(Set dst (AddL src1 (AndL src2 mask)));
12016   ins_cost(INSN_COST);
12017   format %{ "add  $dst, $src1, $src2, uxtb" %}
12018 
12019    ins_encode %{
12020      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12021             as_Register($src2$$reg), ext::uxtb);
12022    %}
12023   ins_pipe(ialu_reg_reg);
12024 %}
12025 
12026 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12027 %{
12028   match(Set dst (AddL src1 (AndL src2 mask)));
12029   ins_cost(INSN_COST);
12030   format %{ "add  $dst, $src1, $src2, uxth" %}
12031 
12032    ins_encode %{
12033      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12034             as_Register($src2$$reg), ext::uxth);
12035    %}
12036   ins_pipe(ialu_reg_reg);
12037 %}
12038 
12039 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12040 %{
12041   match(Set dst (AddL src1 (AndL src2 mask)));
12042   ins_cost(INSN_COST);
12043   format %{ "add  $dst, $src1, $src2, uxtw" %}
12044 
12045    ins_encode %{
12046      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12047             as_Register($src2$$reg), ext::uxtw);
12048    %}
12049   ins_pipe(ialu_reg_reg);
12050 %}
12051 
12052 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12053 %{
12054   match(Set dst (SubI src1 (AndI src2 mask)));
12055   ins_cost(INSN_COST);
12056   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12057 
12058    ins_encode %{
12059      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12060             as_Register($src2$$reg), ext::uxtb);
12061    %}
12062   ins_pipe(ialu_reg_reg);
12063 %}
12064 
12065 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12066 %{
12067   match(Set dst (SubI src1 (AndI src2 mask)));
12068   ins_cost(INSN_COST);
12069   format %{ "subw  $dst, $src1, $src2, uxth" %}
12070 
12071    ins_encode %{
12072      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12073             as_Register($src2$$reg), ext::uxth);
12074    %}
12075   ins_pipe(ialu_reg_reg);
12076 %}
12077 
12078 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12079 %{
12080   match(Set dst (SubL src1 (AndL src2 mask)));
12081   ins_cost(INSN_COST);
12082   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12083 
12084    ins_encode %{
12085      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12086             as_Register($src2$$reg), ext::uxtb);
12087    %}
12088   ins_pipe(ialu_reg_reg);
12089 %}
12090 
12091 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12092 %{
12093   match(Set dst (SubL src1 (AndL src2 mask)));
12094   ins_cost(INSN_COST);
12095   format %{ "sub  $dst, $src1, $src2, uxth" %}
12096 
12097    ins_encode %{
12098      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12099             as_Register($src2$$reg), ext::uxth);
12100    %}
12101   ins_pipe(ialu_reg_reg);
12102 %}
12103 
12104 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12105 %{
12106   match(Set dst (SubL src1 (AndL src2 mask)));
12107   ins_cost(INSN_COST);
12108   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12109 
12110    ins_encode %{
12111      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12112             as_Register($src2$$reg), ext::uxtw);
12113    %}
12114   ins_pipe(ialu_reg_reg);
12115 %}
12116 
12117 
12118 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12119 %{
12120   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12121   ins_cost(1.9 * INSN_COST);
12122   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12123 
12124    ins_encode %{
12125      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12126             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12127    %}
12128   ins_pipe(ialu_reg_reg_shift);
12129 %}
12130 
12131 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12132 %{
12133   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12134   ins_cost(1.9 * INSN_COST);
12135   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12136 
12137    ins_encode %{
12138      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12139             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12140    %}
12141   ins_pipe(ialu_reg_reg_shift);
12142 %}
12143 
12144 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12145 %{
12146   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12147   ins_cost(1.9 * INSN_COST);
12148   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12149 
12150    ins_encode %{
12151      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12152             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12153    %}
12154   ins_pipe(ialu_reg_reg_shift);
12155 %}
12156 
12157 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12158 %{
12159   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12160   ins_cost(1.9 * INSN_COST);
12161   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12162 
12163    ins_encode %{
12164      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12165             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12166    %}
12167   ins_pipe(ialu_reg_reg_shift);
12168 %}
12169 
12170 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12171 %{
12172   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12173   ins_cost(1.9 * INSN_COST);
12174   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12175 
12176    ins_encode %{
12177      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12178             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12179    %}
12180   ins_pipe(ialu_reg_reg_shift);
12181 %}
12182 
12183 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12184 %{
12185   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12186   ins_cost(1.9 * INSN_COST);
12187   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12188 
12189    ins_encode %{
12190      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12191             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12192    %}
12193   ins_pipe(ialu_reg_reg_shift);
12194 %}
12195 
12196 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12197 %{
12198   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12199   ins_cost(1.9 * INSN_COST);
12200   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12201 
12202    ins_encode %{
12203      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12204             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12205    %}
12206   ins_pipe(ialu_reg_reg_shift);
12207 %}
12208 
12209 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12210 %{
12211   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12212   ins_cost(1.9 * INSN_COST);
12213   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12214 
12215    ins_encode %{
12216      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12217             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12218    %}
12219   ins_pipe(ialu_reg_reg_shift);
12220 %}
12221 
12222 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12223 %{
12224   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12225   ins_cost(1.9 * INSN_COST);
12226   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12227 
12228    ins_encode %{
12229      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12230             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12231    %}
12232   ins_pipe(ialu_reg_reg_shift);
12233 %}
12234 
12235 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12236 %{
12237   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12238   ins_cost(1.9 * INSN_COST);
12239   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12240 
12241    ins_encode %{
12242      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12243             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12244    %}
12245   ins_pipe(ialu_reg_reg_shift);
12246 %}
12247 
12248 
12249 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12250 %{
12251   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12252   ins_cost(1.9 * INSN_COST);
12253   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12254 
12255    ins_encode %{
12256      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12257             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12258    %}
12259   ins_pipe(ialu_reg_reg_shift);
12260 %};
12261 
12262 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12263 %{
12264   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12265   ins_cost(1.9 * INSN_COST);
12266   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12267 
12268    ins_encode %{
12269      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12270             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12271    %}
12272   ins_pipe(ialu_reg_reg_shift);
12273 %};
12274 
12275 
12276 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12277 %{
12278   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12279   ins_cost(1.9 * INSN_COST);
12280   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12281 
12282    ins_encode %{
12283      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12284             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12285    %}
12286   ins_pipe(ialu_reg_reg_shift);
12287 %}
12288 
12289 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12290 %{
12291   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12292   ins_cost(1.9 * INSN_COST);
12293   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12294 
12295    ins_encode %{
12296      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12297             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12298    %}
12299   ins_pipe(ialu_reg_reg_shift);
12300 %}
12301 
12302 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12303 %{
12304   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12305   ins_cost(1.9 * INSN_COST);
12306   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12307 
12308    ins_encode %{
12309      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12310             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12311    %}
12312   ins_pipe(ialu_reg_reg_shift);
12313 %}
12314 
12315 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12316 %{
12317   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12318   ins_cost(1.9 * INSN_COST);
12319   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12320 
12321    ins_encode %{
12322      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12323             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12324    %}
12325   ins_pipe(ialu_reg_reg_shift);
12326 %}
12327 
12328 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12329 %{
12330   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12331   ins_cost(1.9 * INSN_COST);
12332   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12333 
12334    ins_encode %{
12335      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12336             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12337    %}
12338   ins_pipe(ialu_reg_reg_shift);
12339 %}
12340 
12341 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12342 %{
12343   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12344   ins_cost(1.9 * INSN_COST);
12345   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12346 
12347    ins_encode %{
12348      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12349             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12350    %}
12351   ins_pipe(ialu_reg_reg_shift);
12352 %}
12353 
12354 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12355 %{
12356   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12357   ins_cost(1.9 * INSN_COST);
12358   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12359 
12360    ins_encode %{
12361      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12362             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12363    %}
12364   ins_pipe(ialu_reg_reg_shift);
12365 %}
12366 
12367 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12368 %{
12369   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12370   ins_cost(1.9 * INSN_COST);
12371   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12372 
12373    ins_encode %{
12374      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12375             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12376    %}
12377   ins_pipe(ialu_reg_reg_shift);
12378 %}
12379 
12380 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12381 %{
12382   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12383   ins_cost(1.9 * INSN_COST);
12384   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12385 
12386    ins_encode %{
12387      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12388             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12389    %}
12390   ins_pipe(ialu_reg_reg_shift);
12391 %}
12392 
12393 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12394 %{
12395   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12396   ins_cost(1.9 * INSN_COST);
12397   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12398 
12399    ins_encode %{
12400      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12401             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12402    %}
12403   ins_pipe(ialu_reg_reg_shift);
12404 %}
12405 // END This section of the file is automatically generated. Do not edit --------------
12406 
12407 // ============================================================================
12408 // Floating Point Arithmetic Instructions
12409 
12410 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12411   match(Set dst (AddF src1 src2));
12412 
12413   ins_cost(INSN_COST * 5);
12414   format %{ "fadds   $dst, $src1, $src2" %}
12415 
12416   ins_encode %{
12417     __ fadds(as_FloatRegister($dst$$reg),
12418              as_FloatRegister($src1$$reg),
12419              as_FloatRegister($src2$$reg));
12420   %}
12421 
12422   ins_pipe(fp_dop_reg_reg_s);
12423 %}
12424 
12425 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12426   match(Set dst (AddD src1 src2));
12427 
12428   ins_cost(INSN_COST * 5);
12429   format %{ "faddd   $dst, $src1, $src2" %}
12430 
12431   ins_encode %{
12432     __ faddd(as_FloatRegister($dst$$reg),
12433              as_FloatRegister($src1$$reg),
12434              as_FloatRegister($src2$$reg));
12435   %}
12436 
12437   ins_pipe(fp_dop_reg_reg_d);
12438 %}
12439 
12440 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12441   match(Set dst (SubF src1 src2));
12442 
12443   ins_cost(INSN_COST * 5);
12444   format %{ "fsubs   $dst, $src1, $src2" %}
12445 
12446   ins_encode %{
12447     __ fsubs(as_FloatRegister($dst$$reg),
12448              as_FloatRegister($src1$$reg),
12449              as_FloatRegister($src2$$reg));
12450   %}
12451 
12452   ins_pipe(fp_dop_reg_reg_s);
12453 %}
12454 
12455 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12456   match(Set dst (SubD src1 src2));
12457 
12458   ins_cost(INSN_COST * 5);
12459   format %{ "fsubd   $dst, $src1, $src2" %}
12460 
12461   ins_encode %{
12462     __ fsubd(as_FloatRegister($dst$$reg),
12463              as_FloatRegister($src1$$reg),
12464              as_FloatRegister($src2$$reg));
12465   %}
12466 
12467   ins_pipe(fp_dop_reg_reg_d);
12468 %}
12469 
12470 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12471   match(Set dst (MulF src1 src2));
12472 
12473   ins_cost(INSN_COST * 6);
12474   format %{ "fmuls   $dst, $src1, $src2" %}
12475 
12476   ins_encode %{
12477     __ fmuls(as_FloatRegister($dst$$reg),
12478              as_FloatRegister($src1$$reg),
12479              as_FloatRegister($src2$$reg));
12480   %}
12481 
12482   ins_pipe(fp_dop_reg_reg_s);
12483 %}
12484 
12485 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12486   match(Set dst (MulD src1 src2));
12487 
12488   ins_cost(INSN_COST * 6);
12489   format %{ "fmuld   $dst, $src1, $src2" %}
12490 
12491   ins_encode %{
12492     __ fmuld(as_FloatRegister($dst$$reg),
12493              as_FloatRegister($src1$$reg),
12494              as_FloatRegister($src2$$reg));
12495   %}
12496 
12497   ins_pipe(fp_dop_reg_reg_d);
12498 %}
12499 
12500 // src1 * src2 + src3
12501 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12502   predicate(UseFMA);
12503   match(Set dst (FmaF src3 (Binary src1 src2)));
12504 
12505   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12506 
12507   ins_encode %{
12508     __ fmadds(as_FloatRegister($dst$$reg),
12509              as_FloatRegister($src1$$reg),
12510              as_FloatRegister($src2$$reg),
12511              as_FloatRegister($src3$$reg));
12512   %}
12513 
12514   ins_pipe(pipe_class_default);
12515 %}
12516 
12517 // src1 * src2 + src3
12518 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12519   predicate(UseFMA);
12520   match(Set dst (FmaD src3 (Binary src1 src2)));
12521 
12522   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12523 
12524   ins_encode %{
12525     __ fmaddd(as_FloatRegister($dst$$reg),
12526              as_FloatRegister($src1$$reg),
12527              as_FloatRegister($src2$$reg),
12528              as_FloatRegister($src3$$reg));
12529   %}
12530 
12531   ins_pipe(pipe_class_default);
12532 %}
12533 
12534 // -src1 * src2 + src3
12535 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12536   predicate(UseFMA);
12537   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12538   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12539 
12540   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12541 
12542   ins_encode %{
12543     __ fmsubs(as_FloatRegister($dst$$reg),
12544               as_FloatRegister($src1$$reg),
12545               as_FloatRegister($src2$$reg),
12546               as_FloatRegister($src3$$reg));
12547   %}
12548 
12549   ins_pipe(pipe_class_default);
12550 %}
12551 
12552 // -src1 * src2 + src3
12553 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12554   predicate(UseFMA);
12555   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12556   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12557 
12558   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12559 
12560   ins_encode %{
12561     __ fmsubd(as_FloatRegister($dst$$reg),
12562               as_FloatRegister($src1$$reg),
12563               as_FloatRegister($src2$$reg),
12564               as_FloatRegister($src3$$reg));
12565   %}
12566 
12567   ins_pipe(pipe_class_default);
12568 %}
12569 
12570 // -src1 * src2 - src3
12571 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12572   predicate(UseFMA);
12573   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12574   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12575 
12576   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12577 
12578   ins_encode %{
12579     __ fnmadds(as_FloatRegister($dst$$reg),
12580                as_FloatRegister($src1$$reg),
12581                as_FloatRegister($src2$$reg),
12582                as_FloatRegister($src3$$reg));
12583   %}
12584 
12585   ins_pipe(pipe_class_default);
12586 %}
12587 
12588 // -src1 * src2 - src3
12589 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12590   predicate(UseFMA);
12591   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12592   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12593 
12594   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12595 
12596   ins_encode %{
12597     __ fnmaddd(as_FloatRegister($dst$$reg),
12598                as_FloatRegister($src1$$reg),
12599                as_FloatRegister($src2$$reg),
12600                as_FloatRegister($src3$$reg));
12601   %}
12602 
12603   ins_pipe(pipe_class_default);
12604 %}
12605 
12606 // src1 * src2 - src3
12607 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12608   predicate(UseFMA);
12609   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12610 
12611   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12612 
12613   ins_encode %{
12614     __ fnmsubs(as_FloatRegister($dst$$reg),
12615                as_FloatRegister($src1$$reg),
12616                as_FloatRegister($src2$$reg),
12617                as_FloatRegister($src3$$reg));
12618   %}
12619 
12620   ins_pipe(pipe_class_default);
12621 %}
12622 
12623 // src1 * src2 - src3
12624 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12625   predicate(UseFMA);
12626   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12627 
12628   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12629 
12630   ins_encode %{
12631   // n.b. insn name should be fnmsubd
12632     __ fnmsub(as_FloatRegister($dst$$reg),
12633               as_FloatRegister($src1$$reg),
12634               as_FloatRegister($src2$$reg),
12635               as_FloatRegister($src3$$reg));
12636   %}
12637 
12638   ins_pipe(pipe_class_default);
12639 %}
12640 
12641 
12642 // Math.max(FF)F
12643 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12644   match(Set dst (MaxF src1 src2));
12645 
12646   format %{ "fmaxs   $dst, $src1, $src2" %}
12647   ins_encode %{
12648     __ fmaxs(as_FloatRegister($dst$$reg),
12649              as_FloatRegister($src1$$reg),
12650              as_FloatRegister($src2$$reg));
12651   %}
12652 
12653   ins_pipe(fp_dop_reg_reg_s);
12654 %}
12655 
12656 // Math.min(FF)F
12657 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12658   match(Set dst (MinF src1 src2));
12659 
12660   format %{ "fmins   $dst, $src1, $src2" %}
12661   ins_encode %{
12662     __ fmins(as_FloatRegister($dst$$reg),
12663              as_FloatRegister($src1$$reg),
12664              as_FloatRegister($src2$$reg));
12665   %}
12666 
12667   ins_pipe(fp_dop_reg_reg_s);
12668 %}
12669 
12670 // Math.max(DD)D
12671 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12672   match(Set dst (MaxD src1 src2));
12673 
12674   format %{ "fmaxd   $dst, $src1, $src2" %}
12675   ins_encode %{
12676     __ fmaxd(as_FloatRegister($dst$$reg),
12677              as_FloatRegister($src1$$reg),
12678              as_FloatRegister($src2$$reg));
12679   %}
12680 
12681   ins_pipe(fp_dop_reg_reg_d);
12682 %}
12683 
12684 // Math.min(DD)D
12685 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12686   match(Set dst (MinD src1 src2));
12687 
12688   format %{ "fmind   $dst, $src1, $src2" %}
12689   ins_encode %{
12690     __ fmind(as_FloatRegister($dst$$reg),
12691              as_FloatRegister($src1$$reg),
12692              as_FloatRegister($src2$$reg));
12693   %}
12694 
12695   ins_pipe(fp_dop_reg_reg_d);
12696 %}
12697 
12698 
12699 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12700   match(Set dst (DivF src1  src2));
12701 
12702   ins_cost(INSN_COST * 18);
12703   format %{ "fdivs   $dst, $src1, $src2" %}
12704 
12705   ins_encode %{
12706     __ fdivs(as_FloatRegister($dst$$reg),
12707              as_FloatRegister($src1$$reg),
12708              as_FloatRegister($src2$$reg));
12709   %}
12710 
12711   ins_pipe(fp_div_s);
12712 %}
12713 
12714 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12715   match(Set dst (DivD src1  src2));
12716 
12717   ins_cost(INSN_COST * 32);
12718   format %{ "fdivd   $dst, $src1, $src2" %}
12719 
12720   ins_encode %{
12721     __ fdivd(as_FloatRegister($dst$$reg),
12722              as_FloatRegister($src1$$reg),
12723              as_FloatRegister($src2$$reg));
12724   %}
12725 
12726   ins_pipe(fp_div_d);
12727 %}
12728 
12729 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12730   match(Set dst (NegF src));
12731 
12732   ins_cost(INSN_COST * 3);
12733   format %{ "fneg   $dst, $src" %}
12734 
12735   ins_encode %{
12736     __ fnegs(as_FloatRegister($dst$$reg),
12737              as_FloatRegister($src$$reg));
12738   %}
12739 
12740   ins_pipe(fp_uop_s);
12741 %}
12742 
12743 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12744   match(Set dst (NegD src));
12745 
12746   ins_cost(INSN_COST * 3);
12747   format %{ "fnegd   $dst, $src" %}
12748 
12749   ins_encode %{
12750     __ fnegd(as_FloatRegister($dst$$reg),
12751              as_FloatRegister($src$$reg));
12752   %}
12753 
12754   ins_pipe(fp_uop_d);
12755 %}
12756 
12757 instruct absF_reg(vRegF dst, vRegF src) %{
12758   match(Set dst (AbsF src));
12759 
12760   ins_cost(INSN_COST * 3);
12761   format %{ "fabss   $dst, $src" %}
12762   ins_encode %{
12763     __ fabss(as_FloatRegister($dst$$reg),
12764              as_FloatRegister($src$$reg));
12765   %}
12766 
12767   ins_pipe(fp_uop_s);
12768 %}
12769 
12770 instruct absD_reg(vRegD dst, vRegD src) %{
12771   match(Set dst (AbsD src));
12772 
12773   ins_cost(INSN_COST * 3);
12774   format %{ "fabsd   $dst, $src" %}
12775   ins_encode %{
12776     __ fabsd(as_FloatRegister($dst$$reg),
12777              as_FloatRegister($src$$reg));
12778   %}
12779 
12780   ins_pipe(fp_uop_d);
12781 %}
12782 
12783 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12784   match(Set dst (SqrtD src));
12785 
12786   ins_cost(INSN_COST * 50);
12787   format %{ "fsqrtd  $dst, $src" %}
12788   ins_encode %{
12789     __ fsqrtd(as_FloatRegister($dst$$reg),
12790              as_FloatRegister($src$$reg));
12791   %}
12792 
12793   ins_pipe(fp_div_s);
12794 %}
12795 
12796 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12797   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12798 
12799   ins_cost(INSN_COST * 50);
12800   format %{ "fsqrts  $dst, $src" %}
12801   ins_encode %{
12802     __ fsqrts(as_FloatRegister($dst$$reg),
12803              as_FloatRegister($src$$reg));
12804   %}
12805 
12806   ins_pipe(fp_div_d);
12807 %}
12808 
12809 // ============================================================================
12810 // Logical Instructions
12811 
12812 // Integer Logical Instructions
12813 
12814 // And Instructions
12815 
12816 
12817 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12818   match(Set dst (AndI src1 src2));
12819 
12820   format %{ "andw  $dst, $src1, $src2\t# int" %}
12821 
12822   ins_cost(INSN_COST);
12823   ins_encode %{
12824     __ andw(as_Register($dst$$reg),
12825             as_Register($src1$$reg),
12826             as_Register($src2$$reg));
12827   %}
12828 
12829   ins_pipe(ialu_reg_reg);
12830 %}
12831 
12832 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12833   match(Set dst (AndI src1 src2));
12834 
12835   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12836 
12837   ins_cost(INSN_COST);
12838   ins_encode %{
12839     __ andw(as_Register($dst$$reg),
12840             as_Register($src1$$reg),
12841             (unsigned long)($src2$$constant));
12842   %}
12843 
12844   ins_pipe(ialu_reg_imm);
12845 %}
12846 
12847 // Or Instructions
12848 
12849 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12850   match(Set dst (OrI src1 src2));
12851 
12852   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12853 
12854   ins_cost(INSN_COST);
12855   ins_encode %{
12856     __ orrw(as_Register($dst$$reg),
12857             as_Register($src1$$reg),
12858             as_Register($src2$$reg));
12859   %}
12860 
12861   ins_pipe(ialu_reg_reg);
12862 %}
12863 
12864 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12865   match(Set dst (OrI src1 src2));
12866 
12867   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12868 
12869   ins_cost(INSN_COST);
12870   ins_encode %{
12871     __ orrw(as_Register($dst$$reg),
12872             as_Register($src1$$reg),
12873             (unsigned long)($src2$$constant));
12874   %}
12875 
12876   ins_pipe(ialu_reg_imm);
12877 %}
12878 
12879 // Xor Instructions
12880 
12881 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12882   match(Set dst (XorI src1 src2));
12883 
12884   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12885 
12886   ins_cost(INSN_COST);
12887   ins_encode %{
12888     __ eorw(as_Register($dst$$reg),
12889             as_Register($src1$$reg),
12890             as_Register($src2$$reg));
12891   %}
12892 
12893   ins_pipe(ialu_reg_reg);
12894 %}
12895 
12896 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12897   match(Set dst (XorI src1 src2));
12898 
12899   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12900 
12901   ins_cost(INSN_COST);
12902   ins_encode %{
12903     __ eorw(as_Register($dst$$reg),
12904             as_Register($src1$$reg),
12905             (unsigned long)($src2$$constant));
12906   %}
12907 
12908   ins_pipe(ialu_reg_imm);
12909 %}
12910 
12911 // Long Logical Instructions
12912 // TODO
12913 
12914 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12915   match(Set dst (AndL src1 src2));
12916 
12917   format %{ "and  $dst, $src1, $src2\t# int" %}
12918 
12919   ins_cost(INSN_COST);
12920   ins_encode %{
12921     __ andr(as_Register($dst$$reg),
12922             as_Register($src1$$reg),
12923             as_Register($src2$$reg));
12924   %}
12925 
12926   ins_pipe(ialu_reg_reg);
12927 %}
12928 
12929 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12930   match(Set dst (AndL src1 src2));
12931 
12932   format %{ "and  $dst, $src1, $src2\t# int" %}
12933 
12934   ins_cost(INSN_COST);
12935   ins_encode %{
12936     __ andr(as_Register($dst$$reg),
12937             as_Register($src1$$reg),
12938             (unsigned long)($src2$$constant));
12939   %}
12940 
12941   ins_pipe(ialu_reg_imm);
12942 %}
12943 
12944 // Or Instructions
12945 
12946 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12947   match(Set dst (OrL src1 src2));
12948 
12949   format %{ "orr  $dst, $src1, $src2\t# int" %}
12950 
12951   ins_cost(INSN_COST);
12952   ins_encode %{
12953     __ orr(as_Register($dst$$reg),
12954            as_Register($src1$$reg),
12955            as_Register($src2$$reg));
12956   %}
12957 
12958   ins_pipe(ialu_reg_reg);
12959 %}
12960 
12961 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12962   match(Set dst (OrL src1 src2));
12963 
12964   format %{ "orr  $dst, $src1, $src2\t# int" %}
12965 
12966   ins_cost(INSN_COST);
12967   ins_encode %{
12968     __ orr(as_Register($dst$$reg),
12969            as_Register($src1$$reg),
12970            (unsigned long)($src2$$constant));
12971   %}
12972 
12973   ins_pipe(ialu_reg_imm);
12974 %}
12975 
12976 // Xor Instructions
12977 
12978 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12979   match(Set dst (XorL src1 src2));
12980 
12981   format %{ "eor  $dst, $src1, $src2\t# int" %}
12982 
12983   ins_cost(INSN_COST);
12984   ins_encode %{
12985     __ eor(as_Register($dst$$reg),
12986            as_Register($src1$$reg),
12987            as_Register($src2$$reg));
12988   %}
12989 
12990   ins_pipe(ialu_reg_reg);
12991 %}
12992 
12993 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12994   match(Set dst (XorL src1 src2));
12995 
12996   ins_cost(INSN_COST);
12997   format %{ "eor  $dst, $src1, $src2\t# int" %}
12998 
12999   ins_encode %{
13000     __ eor(as_Register($dst$$reg),
13001            as_Register($src1$$reg),
13002            (unsigned long)($src2$$constant));
13003   %}
13004 
13005   ins_pipe(ialu_reg_imm);
13006 %}
13007 
13008 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13009 %{
13010   match(Set dst (ConvI2L src));
13011 
13012   ins_cost(INSN_COST);
13013   format %{ "sxtw  $dst, $src\t# i2l" %}
13014   ins_encode %{
13015     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13016   %}
13017   ins_pipe(ialu_reg_shift);
13018 %}
13019 
13020 // this pattern occurs in bigmath arithmetic
13021 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13022 %{
13023   match(Set dst (AndL (ConvI2L src) mask));
13024 
13025   ins_cost(INSN_COST);
13026   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13027   ins_encode %{
13028     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13029   %}
13030 
13031   ins_pipe(ialu_reg_shift);
13032 %}
13033 
13034 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13035   match(Set dst (ConvL2I src));
13036 
13037   ins_cost(INSN_COST);
13038   format %{ "movw  $dst, $src \t// l2i" %}
13039 
13040   ins_encode %{
13041     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13042   %}
13043 
13044   ins_pipe(ialu_reg);
13045 %}
13046 
13047 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13048 %{
13049   match(Set dst (Conv2B src));
13050   effect(KILL cr);
13051 
13052   format %{
13053     "cmpw $src, zr\n\t"
13054     "cset $dst, ne"
13055   %}
13056 
13057   ins_encode %{
13058     __ cmpw(as_Register($src$$reg), zr);
13059     __ cset(as_Register($dst$$reg), Assembler::NE);
13060   %}
13061 
13062   ins_pipe(ialu_reg);
13063 %}
13064 
13065 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13066 %{
13067   match(Set dst (Conv2B src));
13068   effect(KILL cr);
13069 
13070   format %{
13071     "cmp  $src, zr\n\t"
13072     "cset $dst, ne"
13073   %}
13074 
13075   ins_encode %{
13076     __ cmp(as_Register($src$$reg), zr);
13077     __ cset(as_Register($dst$$reg), Assembler::NE);
13078   %}
13079 
13080   ins_pipe(ialu_reg);
13081 %}
13082 
13083 instruct convD2F_reg(vRegF dst, vRegD src) %{
13084   match(Set dst (ConvD2F src));
13085 
13086   ins_cost(INSN_COST * 5);
13087   format %{ "fcvtd  $dst, $src \t// d2f" %}
13088 
13089   ins_encode %{
13090     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13091   %}
13092 
13093   ins_pipe(fp_d2f);
13094 %}
13095 
13096 instruct convF2D_reg(vRegD dst, vRegF src) %{
13097   match(Set dst (ConvF2D src));
13098 
13099   ins_cost(INSN_COST * 5);
13100   format %{ "fcvts  $dst, $src \t// f2d" %}
13101 
13102   ins_encode %{
13103     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13104   %}
13105 
13106   ins_pipe(fp_f2d);
13107 %}
13108 
13109 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13110   match(Set dst (ConvF2I src));
13111 
13112   ins_cost(INSN_COST * 5);
13113   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13114 
13115   ins_encode %{
13116     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13117   %}
13118 
13119   ins_pipe(fp_f2i);
13120 %}
13121 
13122 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13123   match(Set dst (ConvF2L src));
13124 
13125   ins_cost(INSN_COST * 5);
13126   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13127 
13128   ins_encode %{
13129     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13130   %}
13131 
13132   ins_pipe(fp_f2l);
13133 %}
13134 
13135 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13136   match(Set dst (ConvI2F src));
13137 
13138   ins_cost(INSN_COST * 5);
13139   format %{ "scvtfws  $dst, $src \t// i2f" %}
13140 
13141   ins_encode %{
13142     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13143   %}
13144 
13145   ins_pipe(fp_i2f);
13146 %}
13147 
13148 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13149   match(Set dst (ConvL2F src));
13150 
13151   ins_cost(INSN_COST * 5);
13152   format %{ "scvtfs  $dst, $src \t// l2f" %}
13153 
13154   ins_encode %{
13155     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13156   %}
13157 
13158   ins_pipe(fp_l2f);
13159 %}
13160 
13161 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13162   match(Set dst (ConvD2I src));
13163 
13164   ins_cost(INSN_COST * 5);
13165   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13166 
13167   ins_encode %{
13168     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13169   %}
13170 
13171   ins_pipe(fp_d2i);
13172 %}
13173 
13174 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13175   match(Set dst (ConvD2L src));
13176 
13177   ins_cost(INSN_COST * 5);
13178   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13179 
13180   ins_encode %{
13181     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13182   %}
13183 
13184   ins_pipe(fp_d2l);
13185 %}
13186 
13187 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13188   match(Set dst (ConvI2D src));
13189 
13190   ins_cost(INSN_COST * 5);
13191   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13192 
13193   ins_encode %{
13194     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13195   %}
13196 
13197   ins_pipe(fp_i2d);
13198 %}
13199 
13200 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13201   match(Set dst (ConvL2D src));
13202 
13203   ins_cost(INSN_COST * 5);
13204   format %{ "scvtfd  $dst, $src \t// l2d" %}
13205 
13206   ins_encode %{
13207     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13208   %}
13209 
13210   ins_pipe(fp_l2d);
13211 %}
13212 
13213 // stack <-> reg and reg <-> reg shuffles with no conversion
13214 
13215 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13216 
13217   match(Set dst (MoveF2I src));
13218 
13219   effect(DEF dst, USE src);
13220 
13221   ins_cost(4 * INSN_COST);
13222 
13223   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13224 
13225   ins_encode %{
13226     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13227   %}
13228 
13229   ins_pipe(iload_reg_reg);
13230 
13231 %}
13232 
13233 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13234 
13235   match(Set dst (MoveI2F src));
13236 
13237   effect(DEF dst, USE src);
13238 
13239   ins_cost(4 * INSN_COST);
13240 
13241   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13242 
13243   ins_encode %{
13244     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13245   %}
13246 
13247   ins_pipe(pipe_class_memory);
13248 
13249 %}
13250 
13251 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13252 
13253   match(Set dst (MoveD2L src));
13254 
13255   effect(DEF dst, USE src);
13256 
13257   ins_cost(4 * INSN_COST);
13258 
13259   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13260 
13261   ins_encode %{
13262     __ ldr($dst$$Register, Address(sp, $src$$disp));
13263   %}
13264 
13265   ins_pipe(iload_reg_reg);
13266 
13267 %}
13268 
13269 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13270 
13271   match(Set dst (MoveL2D src));
13272 
13273   effect(DEF dst, USE src);
13274 
13275   ins_cost(4 * INSN_COST);
13276 
13277   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13278 
13279   ins_encode %{
13280     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13281   %}
13282 
13283   ins_pipe(pipe_class_memory);
13284 
13285 %}
13286 
13287 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13288 
13289   match(Set dst (MoveF2I src));
13290 
13291   effect(DEF dst, USE src);
13292 
13293   ins_cost(INSN_COST);
13294 
13295   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13296 
13297   ins_encode %{
13298     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13299   %}
13300 
13301   ins_pipe(pipe_class_memory);
13302 
13303 %}
13304 
13305 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13306 
13307   match(Set dst (MoveI2F src));
13308 
13309   effect(DEF dst, USE src);
13310 
13311   ins_cost(INSN_COST);
13312 
13313   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13314 
13315   ins_encode %{
13316     __ strw($src$$Register, Address(sp, $dst$$disp));
13317   %}
13318 
13319   ins_pipe(istore_reg_reg);
13320 
13321 %}
13322 
13323 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13324 
13325   match(Set dst (MoveD2L src));
13326 
13327   effect(DEF dst, USE src);
13328 
13329   ins_cost(INSN_COST);
13330 
13331   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13332 
13333   ins_encode %{
13334     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13335   %}
13336 
13337   ins_pipe(pipe_class_memory);
13338 
13339 %}
13340 
13341 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13342 
13343   match(Set dst (MoveL2D src));
13344 
13345   effect(DEF dst, USE src);
13346 
13347   ins_cost(INSN_COST);
13348 
13349   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13350 
13351   ins_encode %{
13352     __ str($src$$Register, Address(sp, $dst$$disp));
13353   %}
13354 
13355   ins_pipe(istore_reg_reg);
13356 
13357 %}
13358 
13359 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13360 
13361   match(Set dst (MoveF2I src));
13362 
13363   effect(DEF dst, USE src);
13364 
13365   ins_cost(INSN_COST);
13366 
13367   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13368 
13369   ins_encode %{
13370     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13371   %}
13372 
13373   ins_pipe(fp_f2i);
13374 
13375 %}
13376 
13377 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13378 
13379   match(Set dst (MoveI2F src));
13380 
13381   effect(DEF dst, USE src);
13382 
13383   ins_cost(INSN_COST);
13384 
13385   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13386 
13387   ins_encode %{
13388     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13389   %}
13390 
13391   ins_pipe(fp_i2f);
13392 
13393 %}
13394 
13395 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13396 
13397   match(Set dst (MoveD2L src));
13398 
13399   effect(DEF dst, USE src);
13400 
13401   ins_cost(INSN_COST);
13402 
13403   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13404 
13405   ins_encode %{
13406     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13407   %}
13408 
13409   ins_pipe(fp_d2l);
13410 
13411 %}
13412 
13413 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13414 
13415   match(Set dst (MoveL2D src));
13416 
13417   effect(DEF dst, USE src);
13418 
13419   ins_cost(INSN_COST);
13420 
13421   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13422 
13423   ins_encode %{
13424     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13425   %}
13426 
13427   ins_pipe(fp_l2d);
13428 
13429 %}
13430 
13431 // ============================================================================
13432 // clearing of an array
13433 
13434 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13435 %{
13436   match(Set dummy (ClearArray cnt base));
13437   effect(USE_KILL cnt, USE_KILL base);
13438 
13439   ins_cost(4 * INSN_COST);
13440   format %{ "ClearArray $cnt, $base" %}
13441 
13442   ins_encode %{
13443     __ zero_words($base$$Register, $cnt$$Register);
13444   %}
13445 
13446   ins_pipe(pipe_class_memory);
13447 %}
13448 
13449 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13450 %{
13451   predicate((u_int64_t)n->in(2)->get_long()
13452             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13453   match(Set dummy (ClearArray cnt base));
13454   effect(USE_KILL base);
13455 
13456   ins_cost(4 * INSN_COST);
13457   format %{ "ClearArray $cnt, $base" %}
13458 
13459   ins_encode %{
13460     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13461   %}
13462 
13463   ins_pipe(pipe_class_memory);
13464 %}
13465 
13466 // ============================================================================
13467 // Overflow Math Instructions
13468 
13469 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13470 %{
13471   match(Set cr (OverflowAddI op1 op2));
13472 
13473   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13474   ins_cost(INSN_COST);
13475   ins_encode %{
13476     __ cmnw($op1$$Register, $op2$$Register);
13477   %}
13478 
13479   ins_pipe(icmp_reg_reg);
13480 %}
13481 
13482 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13483 %{
13484   match(Set cr (OverflowAddI op1 op2));
13485 
13486   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13487   ins_cost(INSN_COST);
13488   ins_encode %{
13489     __ cmnw($op1$$Register, $op2$$constant);
13490   %}
13491 
13492   ins_pipe(icmp_reg_imm);
13493 %}
13494 
13495 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13496 %{
13497   match(Set cr (OverflowAddL op1 op2));
13498 
13499   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13500   ins_cost(INSN_COST);
13501   ins_encode %{
13502     __ cmn($op1$$Register, $op2$$Register);
13503   %}
13504 
13505   ins_pipe(icmp_reg_reg);
13506 %}
13507 
13508 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13509 %{
13510   match(Set cr (OverflowAddL op1 op2));
13511 
13512   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13513   ins_cost(INSN_COST);
13514   ins_encode %{
13515     __ cmn($op1$$Register, $op2$$constant);
13516   %}
13517 
13518   ins_pipe(icmp_reg_imm);
13519 %}
13520 
13521 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13522 %{
13523   match(Set cr (OverflowSubI op1 op2));
13524 
13525   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13526   ins_cost(INSN_COST);
13527   ins_encode %{
13528     __ cmpw($op1$$Register, $op2$$Register);
13529   %}
13530 
13531   ins_pipe(icmp_reg_reg);
13532 %}
13533 
13534 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13535 %{
13536   match(Set cr (OverflowSubI op1 op2));
13537 
13538   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13539   ins_cost(INSN_COST);
13540   ins_encode %{
13541     __ cmpw($op1$$Register, $op2$$constant);
13542   %}
13543 
13544   ins_pipe(icmp_reg_imm);
13545 %}
13546 
13547 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13548 %{
13549   match(Set cr (OverflowSubL op1 op2));
13550 
13551   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13552   ins_cost(INSN_COST);
13553   ins_encode %{
13554     __ cmp($op1$$Register, $op2$$Register);
13555   %}
13556 
13557   ins_pipe(icmp_reg_reg);
13558 %}
13559 
13560 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13561 %{
13562   match(Set cr (OverflowSubL op1 op2));
13563 
13564   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13565   ins_cost(INSN_COST);
13566   ins_encode %{
13567     __ subs(zr, $op1$$Register, $op2$$constant);
13568   %}
13569 
13570   ins_pipe(icmp_reg_imm);
13571 %}
13572 
13573 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13574 %{
13575   match(Set cr (OverflowSubI zero op1));
13576 
13577   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13578   ins_cost(INSN_COST);
13579   ins_encode %{
13580     __ cmpw(zr, $op1$$Register);
13581   %}
13582 
13583   ins_pipe(icmp_reg_imm);
13584 %}
13585 
13586 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13587 %{
13588   match(Set cr (OverflowSubL zero op1));
13589 
13590   format %{ "cmp   zr, $op1\t# overflow check long" %}
13591   ins_cost(INSN_COST);
13592   ins_encode %{
13593     __ cmp(zr, $op1$$Register);
13594   %}
13595 
13596   ins_pipe(icmp_reg_imm);
13597 %}
13598 
13599 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13600 %{
13601   match(Set cr (OverflowMulI op1 op2));
13602 
13603   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13604             "cmp   rscratch1, rscratch1, sxtw\n\t"
13605             "movw  rscratch1, #0x80000000\n\t"
13606             "cselw rscratch1, rscratch1, zr, NE\n\t"
13607             "cmpw  rscratch1, #1" %}
13608   ins_cost(5 * INSN_COST);
13609   ins_encode %{
13610     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13611     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13612     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13613     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13614     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13615   %}
13616 
13617   ins_pipe(pipe_slow);
13618 %}
13619 
13620 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13621 %{
13622   match(If cmp (OverflowMulI op1 op2));
13623   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13624             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13625   effect(USE labl, KILL cr);
13626 
13627   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13628             "cmp   rscratch1, rscratch1, sxtw\n\t"
13629             "b$cmp   $labl" %}
13630   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13631   ins_encode %{
13632     Label* L = $labl$$label;
13633     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13634     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13635     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13636     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13637   %}
13638 
13639   ins_pipe(pipe_serial);
13640 %}
13641 
13642 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13643 %{
13644   match(Set cr (OverflowMulL op1 op2));
13645 
13646   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13647             "smulh rscratch2, $op1, $op2\n\t"
13648             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13649             "movw  rscratch1, #0x80000000\n\t"
13650             "cselw rscratch1, rscratch1, zr, NE\n\t"
13651             "cmpw  rscratch1, #1" %}
13652   ins_cost(6 * INSN_COST);
13653   ins_encode %{
13654     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13655     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13656     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13657     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13658     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13659     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13660   %}
13661 
13662   ins_pipe(pipe_slow);
13663 %}
13664 
13665 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13666 %{
13667   match(If cmp (OverflowMulL op1 op2));
13668   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13669             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13670   effect(USE labl, KILL cr);
13671 
13672   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13673             "smulh rscratch2, $op1, $op2\n\t"
13674             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13675             "b$cmp $labl" %}
13676   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13677   ins_encode %{
13678     Label* L = $labl$$label;
13679     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13680     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13681     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13682     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13683     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13684   %}
13685 
13686   ins_pipe(pipe_serial);
13687 %}
13688 
13689 // ============================================================================
13690 // Compare Instructions
13691 
13692 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13693 %{
13694   match(Set cr (CmpI op1 op2));
13695 
13696   effect(DEF cr, USE op1, USE op2);
13697 
13698   ins_cost(INSN_COST);
13699   format %{ "cmpw  $op1, $op2" %}
13700 
13701   ins_encode(aarch64_enc_cmpw(op1, op2));
13702 
13703   ins_pipe(icmp_reg_reg);
13704 %}
13705 
13706 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13707 %{
13708   match(Set cr (CmpI op1 zero));
13709 
13710   effect(DEF cr, USE op1);
13711 
13712   ins_cost(INSN_COST);
13713   format %{ "cmpw $op1, 0" %}
13714 
13715   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13716 
13717   ins_pipe(icmp_reg_imm);
13718 %}
13719 
13720 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13721 %{
13722   match(Set cr (CmpI op1 op2));
13723 
13724   effect(DEF cr, USE op1);
13725 
13726   ins_cost(INSN_COST);
13727   format %{ "cmpw  $op1, $op2" %}
13728 
13729   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13730 
13731   ins_pipe(icmp_reg_imm);
13732 %}
13733 
13734 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13735 %{
13736   match(Set cr (CmpI op1 op2));
13737 
13738   effect(DEF cr, USE op1);
13739 
13740   ins_cost(INSN_COST * 2);
13741   format %{ "cmpw  $op1, $op2" %}
13742 
13743   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13744 
13745   ins_pipe(icmp_reg_imm);
13746 %}
13747 
13748 // Unsigned compare Instructions; really, same as signed compare
13749 // except it should only be used to feed an If or a CMovI which takes a
13750 // cmpOpU.
13751 
13752 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13753 %{
13754   match(Set cr (CmpU op1 op2));
13755 
13756   effect(DEF cr, USE op1, USE op2);
13757 
13758   ins_cost(INSN_COST);
13759   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13760 
13761   ins_encode(aarch64_enc_cmpw(op1, op2));
13762 
13763   ins_pipe(icmp_reg_reg);
13764 %}
13765 
13766 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13767 %{
13768   match(Set cr (CmpU op1 zero));
13769 
13770   effect(DEF cr, USE op1);
13771 
13772   ins_cost(INSN_COST);
13773   format %{ "cmpw $op1, #0\t# unsigned" %}
13774 
13775   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13776 
13777   ins_pipe(icmp_reg_imm);
13778 %}
13779 
13780 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13781 %{
13782   match(Set cr (CmpU op1 op2));
13783 
13784   effect(DEF cr, USE op1);
13785 
13786   ins_cost(INSN_COST);
13787   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13788 
13789   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13790 
13791   ins_pipe(icmp_reg_imm);
13792 %}
13793 
13794 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13795 %{
13796   match(Set cr (CmpU op1 op2));
13797 
13798   effect(DEF cr, USE op1);
13799 
13800   ins_cost(INSN_COST * 2);
13801   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13802 
13803   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13804 
13805   ins_pipe(icmp_reg_imm);
13806 %}
13807 
13808 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13809 %{
13810   match(Set cr (CmpL op1 op2));
13811 
13812   effect(DEF cr, USE op1, USE op2);
13813 
13814   ins_cost(INSN_COST);
13815   format %{ "cmp  $op1, $op2" %}
13816 
13817   ins_encode(aarch64_enc_cmp(op1, op2));
13818 
13819   ins_pipe(icmp_reg_reg);
13820 %}
13821 
13822 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
13823 %{
13824   match(Set cr (CmpL op1 zero));
13825 
13826   effect(DEF cr, USE op1);
13827 
13828   ins_cost(INSN_COST);
13829   format %{ "tst  $op1" %}
13830 
13831   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13832 
13833   ins_pipe(icmp_reg_imm);
13834 %}
13835 
13836 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13837 %{
13838   match(Set cr (CmpL op1 op2));
13839 
13840   effect(DEF cr, USE op1);
13841 
13842   ins_cost(INSN_COST);
13843   format %{ "cmp  $op1, $op2" %}
13844 
13845   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13846 
13847   ins_pipe(icmp_reg_imm);
13848 %}
13849 
13850 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13851 %{
13852   match(Set cr (CmpL op1 op2));
13853 
13854   effect(DEF cr, USE op1);
13855 
13856   ins_cost(INSN_COST * 2);
13857   format %{ "cmp  $op1, $op2" %}
13858 
13859   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13860 
13861   ins_pipe(icmp_reg_imm);
13862 %}
13863 
13864 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
13865 %{
13866   match(Set cr (CmpUL op1 op2));
13867 
13868   effect(DEF cr, USE op1, USE op2);
13869 
13870   ins_cost(INSN_COST);
13871   format %{ "cmp  $op1, $op2" %}
13872 
13873   ins_encode(aarch64_enc_cmp(op1, op2));
13874 
13875   ins_pipe(icmp_reg_reg);
13876 %}
13877 
13878 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
13879 %{
13880   match(Set cr (CmpUL op1 zero));
13881 
13882   effect(DEF cr, USE op1);
13883 
13884   ins_cost(INSN_COST);
13885   format %{ "tst  $op1" %}
13886 
13887   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13888 
13889   ins_pipe(icmp_reg_imm);
13890 %}
13891 
13892 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
13893 %{
13894   match(Set cr (CmpUL op1 op2));
13895 
13896   effect(DEF cr, USE op1);
13897 
13898   ins_cost(INSN_COST);
13899   format %{ "cmp  $op1, $op2" %}
13900 
13901   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13902 
13903   ins_pipe(icmp_reg_imm);
13904 %}
13905 
13906 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
13907 %{
13908   match(Set cr (CmpUL op1 op2));
13909 
13910   effect(DEF cr, USE op1);
13911 
13912   ins_cost(INSN_COST * 2);
13913   format %{ "cmp  $op1, $op2" %}
13914 
13915   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13916 
13917   ins_pipe(icmp_reg_imm);
13918 %}
13919 
13920 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13921 %{
13922   match(Set cr (CmpP op1 op2));
13923 
13924   effect(DEF cr, USE op1, USE op2);
13925 
13926   ins_cost(INSN_COST);
13927   format %{ "cmp  $op1, $op2\t // ptr" %}
13928 
13929   ins_encode(aarch64_enc_cmpp(op1, op2));
13930 
13931   ins_pipe(icmp_reg_reg);
13932 %}
13933 
13934 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13935 %{
13936   match(Set cr (CmpN op1 op2));
13937 
13938   effect(DEF cr, USE op1, USE op2);
13939 
13940   ins_cost(INSN_COST);
13941   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13942 
13943   ins_encode(aarch64_enc_cmpn(op1, op2));
13944 
13945   ins_pipe(icmp_reg_reg);
13946 %}
13947 
13948 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13949 %{
13950   match(Set cr (CmpP op1 zero));
13951 
13952   effect(DEF cr, USE op1, USE zero);
13953 
13954   ins_cost(INSN_COST);
13955   format %{ "cmp  $op1, 0\t // ptr" %}
13956 
13957   ins_encode(aarch64_enc_testp(op1));
13958 
13959   ins_pipe(icmp_reg_imm);
13960 %}
13961 
13962 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13963 %{
13964   match(Set cr (CmpN op1 zero));
13965 
13966   effect(DEF cr, USE op1, USE zero);
13967 
13968   ins_cost(INSN_COST);
13969   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13970 
13971   ins_encode(aarch64_enc_testn(op1));
13972 
13973   ins_pipe(icmp_reg_imm);
13974 %}
13975 
13976 // FP comparisons
13977 //
13978 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13979 // using normal cmpOp. See declaration of rFlagsReg for details.
13980 
13981 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13982 %{
13983   match(Set cr (CmpF src1 src2));
13984 
13985   ins_cost(3 * INSN_COST);
13986   format %{ "fcmps $src1, $src2" %}
13987 
13988   ins_encode %{
13989     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13990   %}
13991 
13992   ins_pipe(pipe_class_compare);
13993 %}
13994 
13995 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13996 %{
13997   match(Set cr (CmpF src1 src2));
13998 
13999   ins_cost(3 * INSN_COST);
14000   format %{ "fcmps $src1, 0.0" %}
14001 
14002   ins_encode %{
14003     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
14004   %}
14005 
14006   ins_pipe(pipe_class_compare);
14007 %}
14008 // FROM HERE
14009 
14010 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14011 %{
14012   match(Set cr (CmpD src1 src2));
14013 
14014   ins_cost(3 * INSN_COST);
14015   format %{ "fcmpd $src1, $src2" %}
14016 
14017   ins_encode %{
14018     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14019   %}
14020 
14021   ins_pipe(pipe_class_compare);
14022 %}
14023 
14024 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14025 %{
14026   match(Set cr (CmpD src1 src2));
14027 
14028   ins_cost(3 * INSN_COST);
14029   format %{ "fcmpd $src1, 0.0" %}
14030 
14031   ins_encode %{
14032     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
14033   %}
14034 
14035   ins_pipe(pipe_class_compare);
14036 %}
14037 
14038 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14039 %{
14040   match(Set dst (CmpF3 src1 src2));
14041   effect(KILL cr);
14042 
14043   ins_cost(5 * INSN_COST);
14044   format %{ "fcmps $src1, $src2\n\t"
14045             "csinvw($dst, zr, zr, eq\n\t"
14046             "csnegw($dst, $dst, $dst, lt)"
14047   %}
14048 
14049   ins_encode %{
14050     Label done;
14051     FloatRegister s1 = as_FloatRegister($src1$$reg);
14052     FloatRegister s2 = as_FloatRegister($src2$$reg);
14053     Register d = as_Register($dst$$reg);
14054     __ fcmps(s1, s2);
14055     // installs 0 if EQ else -1
14056     __ csinvw(d, zr, zr, Assembler::EQ);
14057     // keeps -1 if less or unordered else installs 1
14058     __ csnegw(d, d, d, Assembler::LT);
14059     __ bind(done);
14060   %}
14061 
14062   ins_pipe(pipe_class_default);
14063 
14064 %}
14065 
14066 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14067 %{
14068   match(Set dst (CmpD3 src1 src2));
14069   effect(KILL cr);
14070 
14071   ins_cost(5 * INSN_COST);
14072   format %{ "fcmpd $src1, $src2\n\t"
14073             "csinvw($dst, zr, zr, eq\n\t"
14074             "csnegw($dst, $dst, $dst, lt)"
14075   %}
14076 
14077   ins_encode %{
14078     Label done;
14079     FloatRegister s1 = as_FloatRegister($src1$$reg);
14080     FloatRegister s2 = as_FloatRegister($src2$$reg);
14081     Register d = as_Register($dst$$reg);
14082     __ fcmpd(s1, s2);
14083     // installs 0 if EQ else -1
14084     __ csinvw(d, zr, zr, Assembler::EQ);
14085     // keeps -1 if less or unordered else installs 1
14086     __ csnegw(d, d, d, Assembler::LT);
14087     __ bind(done);
14088   %}
14089   ins_pipe(pipe_class_default);
14090 
14091 %}
14092 
14093 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14094 %{
14095   match(Set dst (CmpF3 src1 zero));
14096   effect(KILL cr);
14097 
14098   ins_cost(5 * INSN_COST);
14099   format %{ "fcmps $src1, 0.0\n\t"
14100             "csinvw($dst, zr, zr, eq\n\t"
14101             "csnegw($dst, $dst, $dst, lt)"
14102   %}
14103 
14104   ins_encode %{
14105     Label done;
14106     FloatRegister s1 = as_FloatRegister($src1$$reg);
14107     Register d = as_Register($dst$$reg);
14108     __ fcmps(s1, 0.0D);
14109     // installs 0 if EQ else -1
14110     __ csinvw(d, zr, zr, Assembler::EQ);
14111     // keeps -1 if less or unordered else installs 1
14112     __ csnegw(d, d, d, Assembler::LT);
14113     __ bind(done);
14114   %}
14115 
14116   ins_pipe(pipe_class_default);
14117 
14118 %}
14119 
14120 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14121 %{
14122   match(Set dst (CmpD3 src1 zero));
14123   effect(KILL cr);
14124 
14125   ins_cost(5 * INSN_COST);
14126   format %{ "fcmpd $src1, 0.0\n\t"
14127             "csinvw($dst, zr, zr, eq\n\t"
14128             "csnegw($dst, $dst, $dst, lt)"
14129   %}
14130 
14131   ins_encode %{
14132     Label done;
14133     FloatRegister s1 = as_FloatRegister($src1$$reg);
14134     Register d = as_Register($dst$$reg);
14135     __ fcmpd(s1, 0.0D);
14136     // installs 0 if EQ else -1
14137     __ csinvw(d, zr, zr, Assembler::EQ);
14138     // keeps -1 if less or unordered else installs 1
14139     __ csnegw(d, d, d, Assembler::LT);
14140     __ bind(done);
14141   %}
14142   ins_pipe(pipe_class_default);
14143 
14144 %}
14145 
14146 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14147 %{
14148   match(Set dst (CmpLTMask p q));
14149   effect(KILL cr);
14150 
14151   ins_cost(3 * INSN_COST);
14152 
14153   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14154             "csetw $dst, lt\n\t"
14155             "subw $dst, zr, $dst"
14156   %}
14157 
14158   ins_encode %{
14159     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14160     __ csetw(as_Register($dst$$reg), Assembler::LT);
14161     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14162   %}
14163 
14164   ins_pipe(ialu_reg_reg);
14165 %}
14166 
14167 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14168 %{
14169   match(Set dst (CmpLTMask src zero));
14170   effect(KILL cr);
14171 
14172   ins_cost(INSN_COST);
14173 
14174   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14175 
14176   ins_encode %{
14177     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14178   %}
14179 
14180   ins_pipe(ialu_reg_shift);
14181 %}
14182 
14183 // ============================================================================
14184 // Max and Min
14185 
14186 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14187 %{
14188   match(Set dst (MinI src1 src2));
14189 
14190   effect(DEF dst, USE src1, USE src2, KILL cr);
14191   size(8);
14192 
14193   ins_cost(INSN_COST * 3);
14194   format %{
14195     "cmpw $src1 $src2\t signed int\n\t"
14196     "cselw $dst, $src1, $src2 lt\t"
14197   %}
14198 
14199   ins_encode %{
14200     __ cmpw(as_Register($src1$$reg),
14201             as_Register($src2$$reg));
14202     __ cselw(as_Register($dst$$reg),
14203              as_Register($src1$$reg),
14204              as_Register($src2$$reg),
14205              Assembler::LT);
14206   %}
14207 
14208   ins_pipe(ialu_reg_reg);
14209 %}
14210 // FROM HERE
14211 
14212 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14213 %{
14214   match(Set dst (MaxI src1 src2));
14215 
14216   effect(DEF dst, USE src1, USE src2, KILL cr);
14217   size(8);
14218 
14219   ins_cost(INSN_COST * 3);
14220   format %{
14221     "cmpw $src1 $src2\t signed int\n\t"
14222     "cselw $dst, $src1, $src2 gt\t"
14223   %}
14224 
14225   ins_encode %{
14226     __ cmpw(as_Register($src1$$reg),
14227             as_Register($src2$$reg));
14228     __ cselw(as_Register($dst$$reg),
14229              as_Register($src1$$reg),
14230              as_Register($src2$$reg),
14231              Assembler::GT);
14232   %}
14233 
14234   ins_pipe(ialu_reg_reg);
14235 %}
14236 
14237 // ============================================================================
14238 // Branch Instructions
14239 
14240 // Direct Branch.
14241 instruct branch(label lbl)
14242 %{
14243   match(Goto);
14244 
14245   effect(USE lbl);
14246 
14247   ins_cost(BRANCH_COST);
14248   format %{ "b  $lbl" %}
14249 
14250   ins_encode(aarch64_enc_b(lbl));
14251 
14252   ins_pipe(pipe_branch);
14253 %}
14254 
14255 // Conditional Near Branch
14256 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14257 %{
14258   // Same match rule as `branchConFar'.
14259   match(If cmp cr);
14260 
14261   effect(USE lbl);
14262 
14263   ins_cost(BRANCH_COST);
14264   // If set to 1 this indicates that the current instruction is a
14265   // short variant of a long branch. This avoids using this
14266   // instruction in first-pass matching. It will then only be used in
14267   // the `Shorten_branches' pass.
14268   // ins_short_branch(1);
14269   format %{ "b$cmp  $lbl" %}
14270 
14271   ins_encode(aarch64_enc_br_con(cmp, lbl));
14272 
14273   ins_pipe(pipe_branch_cond);
14274 %}
14275 
14276 // Conditional Near Branch Unsigned
14277 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14278 %{
14279   // Same match rule as `branchConFar'.
14280   match(If cmp cr);
14281 
14282   effect(USE lbl);
14283 
14284   ins_cost(BRANCH_COST);
14285   // If set to 1 this indicates that the current instruction is a
14286   // short variant of a long branch. This avoids using this
14287   // instruction in first-pass matching. It will then only be used in
14288   // the `Shorten_branches' pass.
14289   // ins_short_branch(1);
14290   format %{ "b$cmp  $lbl\t# unsigned" %}
14291 
14292   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14293 
14294   ins_pipe(pipe_branch_cond);
14295 %}
14296 
14297 // Make use of CBZ and CBNZ.  These instructions, as well as being
14298 // shorter than (cmp; branch), have the additional benefit of not
14299 // killing the flags.
14300 
14301 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14302   match(If cmp (CmpI op1 op2));
14303   effect(USE labl);
14304 
14305   ins_cost(BRANCH_COST);
14306   format %{ "cbw$cmp   $op1, $labl" %}
14307   ins_encode %{
14308     Label* L = $labl$$label;
14309     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14310     if (cond == Assembler::EQ)
14311       __ cbzw($op1$$Register, *L);
14312     else
14313       __ cbnzw($op1$$Register, *L);
14314   %}
14315   ins_pipe(pipe_cmp_branch);
14316 %}
14317 
14318 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14319   match(If cmp (CmpL op1 op2));
14320   effect(USE labl);
14321 
14322   ins_cost(BRANCH_COST);
14323   format %{ "cb$cmp   $op1, $labl" %}
14324   ins_encode %{
14325     Label* L = $labl$$label;
14326     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14327     if (cond == Assembler::EQ)
14328       __ cbz($op1$$Register, *L);
14329     else
14330       __ cbnz($op1$$Register, *L);
14331   %}
14332   ins_pipe(pipe_cmp_branch);
14333 %}
14334 
14335 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14336   match(If cmp (CmpP op1 op2));
14337   effect(USE labl);
14338 
14339   ins_cost(BRANCH_COST);
14340   format %{ "cb$cmp   $op1, $labl" %}
14341   ins_encode %{
14342     Label* L = $labl$$label;
14343     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14344     if (cond == Assembler::EQ)
14345       __ cbz($op1$$Register, *L);
14346     else
14347       __ cbnz($op1$$Register, *L);
14348   %}
14349   ins_pipe(pipe_cmp_branch);
14350 %}
14351 
14352 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14353   match(If cmp (CmpN op1 op2));
14354   effect(USE labl);
14355 
14356   ins_cost(BRANCH_COST);
14357   format %{ "cbw$cmp   $op1, $labl" %}
14358   ins_encode %{
14359     Label* L = $labl$$label;
14360     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14361     if (cond == Assembler::EQ)
14362       __ cbzw($op1$$Register, *L);
14363     else
14364       __ cbnzw($op1$$Register, *L);
14365   %}
14366   ins_pipe(pipe_cmp_branch);
14367 %}
14368 
14369 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14370   match(If cmp (CmpP (DecodeN oop) zero));
14371   effect(USE labl);
14372 
14373   ins_cost(BRANCH_COST);
14374   format %{ "cb$cmp   $oop, $labl" %}
14375   ins_encode %{
14376     Label* L = $labl$$label;
14377     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14378     if (cond == Assembler::EQ)
14379       __ cbzw($oop$$Register, *L);
14380     else
14381       __ cbnzw($oop$$Register, *L);
14382   %}
14383   ins_pipe(pipe_cmp_branch);
14384 %}
14385 
14386 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14387   match(If cmp (CmpU op1 op2));
14388   effect(USE labl);
14389 
14390   ins_cost(BRANCH_COST);
14391   format %{ "cbw$cmp   $op1, $labl" %}
14392   ins_encode %{
14393     Label* L = $labl$$label;
14394     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14395     if (cond == Assembler::EQ || cond == Assembler::LS)
14396       __ cbzw($op1$$Register, *L);
14397     else
14398       __ cbnzw($op1$$Register, *L);
14399   %}
14400   ins_pipe(pipe_cmp_branch);
14401 %}
14402 
14403 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14404   match(If cmp (CmpUL op1 op2));
14405   effect(USE labl);
14406 
14407   ins_cost(BRANCH_COST);
14408   format %{ "cb$cmp   $op1, $labl" %}
14409   ins_encode %{
14410     Label* L = $labl$$label;
14411     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14412     if (cond == Assembler::EQ || cond == Assembler::LS)
14413       __ cbz($op1$$Register, *L);
14414     else
14415       __ cbnz($op1$$Register, *L);
14416   %}
14417   ins_pipe(pipe_cmp_branch);
14418 %}
14419 
14420 // Test bit and Branch
14421 
14422 // Patterns for short (< 32KiB) variants
14423 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14424   match(If cmp (CmpL op1 op2));
14425   effect(USE labl);
14426 
14427   ins_cost(BRANCH_COST);
14428   format %{ "cb$cmp   $op1, $labl # long" %}
14429   ins_encode %{
14430     Label* L = $labl$$label;
14431     Assembler::Condition cond =
14432       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14433     __ tbr(cond, $op1$$Register, 63, *L);
14434   %}
14435   ins_pipe(pipe_cmp_branch);
14436   ins_short_branch(1);
14437 %}
14438 
14439 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14440   match(If cmp (CmpI op1 op2));
14441   effect(USE labl);
14442 
14443   ins_cost(BRANCH_COST);
14444   format %{ "cb$cmp   $op1, $labl # int" %}
14445   ins_encode %{
14446     Label* L = $labl$$label;
14447     Assembler::Condition cond =
14448       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14449     __ tbr(cond, $op1$$Register, 31, *L);
14450   %}
14451   ins_pipe(pipe_cmp_branch);
14452   ins_short_branch(1);
14453 %}
14454 
14455 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14456   match(If cmp (CmpL (AndL op1 op2) op3));
14457   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14458   effect(USE labl);
14459 
14460   ins_cost(BRANCH_COST);
14461   format %{ "tb$cmp   $op1, $op2, $labl" %}
14462   ins_encode %{
14463     Label* L = $labl$$label;
14464     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14465     int bit = exact_log2($op2$$constant);
14466     __ tbr(cond, $op1$$Register, bit, *L);
14467   %}
14468   ins_pipe(pipe_cmp_branch);
14469   ins_short_branch(1);
14470 %}
14471 
14472 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14473   match(If cmp (CmpI (AndI op1 op2) op3));
14474   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14475   effect(USE labl);
14476 
14477   ins_cost(BRANCH_COST);
14478   format %{ "tb$cmp   $op1, $op2, $labl" %}
14479   ins_encode %{
14480     Label* L = $labl$$label;
14481     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14482     int bit = exact_log2($op2$$constant);
14483     __ tbr(cond, $op1$$Register, bit, *L);
14484   %}
14485   ins_pipe(pipe_cmp_branch);
14486   ins_short_branch(1);
14487 %}
14488 
14489 // And far variants
14490 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14491   match(If cmp (CmpL op1 op2));
14492   effect(USE labl);
14493 
14494   ins_cost(BRANCH_COST);
14495   format %{ "cb$cmp   $op1, $labl # long" %}
14496   ins_encode %{
14497     Label* L = $labl$$label;
14498     Assembler::Condition cond =
14499       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14500     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14501   %}
14502   ins_pipe(pipe_cmp_branch);
14503 %}
14504 
14505 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14506   match(If cmp (CmpI op1 op2));
14507   effect(USE labl);
14508 
14509   ins_cost(BRANCH_COST);
14510   format %{ "cb$cmp   $op1, $labl # int" %}
14511   ins_encode %{
14512     Label* L = $labl$$label;
14513     Assembler::Condition cond =
14514       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14515     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14516   %}
14517   ins_pipe(pipe_cmp_branch);
14518 %}
14519 
14520 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14521   match(If cmp (CmpL (AndL op1 op2) op3));
14522   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14523   effect(USE labl);
14524 
14525   ins_cost(BRANCH_COST);
14526   format %{ "tb$cmp   $op1, $op2, $labl" %}
14527   ins_encode %{
14528     Label* L = $labl$$label;
14529     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14530     int bit = exact_log2($op2$$constant);
14531     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14532   %}
14533   ins_pipe(pipe_cmp_branch);
14534 %}
14535 
14536 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14537   match(If cmp (CmpI (AndI op1 op2) op3));
14538   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14539   effect(USE labl);
14540 
14541   ins_cost(BRANCH_COST);
14542   format %{ "tb$cmp   $op1, $op2, $labl" %}
14543   ins_encode %{
14544     Label* L = $labl$$label;
14545     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14546     int bit = exact_log2($op2$$constant);
14547     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14548   %}
14549   ins_pipe(pipe_cmp_branch);
14550 %}
14551 
14552 // Test bits
14553 
14554 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14555   match(Set cr (CmpL (AndL op1 op2) op3));
14556   predicate(Assembler::operand_valid_for_logical_immediate
14557             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14558 
14559   ins_cost(INSN_COST);
14560   format %{ "tst $op1, $op2 # long" %}
14561   ins_encode %{
14562     __ tst($op1$$Register, $op2$$constant);
14563   %}
14564   ins_pipe(ialu_reg_reg);
14565 %}
14566 
14567 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14568   match(Set cr (CmpI (AndI op1 op2) op3));
14569   predicate(Assembler::operand_valid_for_logical_immediate
14570             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14571 
14572   ins_cost(INSN_COST);
14573   format %{ "tst $op1, $op2 # int" %}
14574   ins_encode %{
14575     __ tstw($op1$$Register, $op2$$constant);
14576   %}
14577   ins_pipe(ialu_reg_reg);
14578 %}
14579 
14580 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14581   match(Set cr (CmpL (AndL op1 op2) op3));
14582 
14583   ins_cost(INSN_COST);
14584   format %{ "tst $op1, $op2 # long" %}
14585   ins_encode %{
14586     __ tst($op1$$Register, $op2$$Register);
14587   %}
14588   ins_pipe(ialu_reg_reg);
14589 %}
14590 
14591 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14592   match(Set cr (CmpI (AndI op1 op2) op3));
14593 
14594   ins_cost(INSN_COST);
14595   format %{ "tstw $op1, $op2 # int" %}
14596   ins_encode %{
14597     __ tstw($op1$$Register, $op2$$Register);
14598   %}
14599   ins_pipe(ialu_reg_reg);
14600 %}
14601 
14602 
14603 // Conditional Far Branch
14604 // Conditional Far Branch Unsigned
14605 // TODO: fixme
14606 
14607 // counted loop end branch near
14608 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14609 %{
14610   match(CountedLoopEnd cmp cr);
14611 
14612   effect(USE lbl);
14613 
14614   ins_cost(BRANCH_COST);
14615   // short variant.
14616   // ins_short_branch(1);
14617   format %{ "b$cmp $lbl \t// counted loop end" %}
14618 
14619   ins_encode(aarch64_enc_br_con(cmp, lbl));
14620 
14621   ins_pipe(pipe_branch);
14622 %}
14623 
14624 // counted loop end branch near Unsigned
14625 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14626 %{
14627   match(CountedLoopEnd cmp cr);
14628 
14629   effect(USE lbl);
14630 
14631   ins_cost(BRANCH_COST);
14632   // short variant.
14633   // ins_short_branch(1);
14634   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14635 
14636   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14637 
14638   ins_pipe(pipe_branch);
14639 %}
14640 
14641 // counted loop end branch far
14642 // counted loop end branch far unsigned
14643 // TODO: fixme
14644 
14645 // ============================================================================
14646 // inlined locking and unlocking
14647 
14648 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14649 %{
14650   match(Set cr (FastLock object box));
14651   effect(TEMP tmp, TEMP tmp2);
14652 
14653   // TODO
14654   // identify correct cost
14655   ins_cost(5 * INSN_COST);
14656   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14657 
14658   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14659 
14660   ins_pipe(pipe_serial);
14661 %}
14662 
14663 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14664 %{
14665   match(Set cr (FastUnlock object box));
14666   effect(TEMP tmp, TEMP tmp2);
14667 
14668   ins_cost(5 * INSN_COST);
14669   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14670 
14671   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14672 
14673   ins_pipe(pipe_serial);
14674 %}
14675 
14676 
14677 // ============================================================================
14678 // Safepoint Instructions
14679 
14680 // TODO
14681 // provide a near and far version of this code
14682 
14683 instruct safePoint(iRegP poll)
14684 %{
14685   match(SafePoint poll);
14686 
14687   format %{
14688     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14689   %}
14690   ins_encode %{
14691     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14692   %}
14693   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14694 %}
14695 
14696 
14697 // ============================================================================
14698 // Procedure Call/Return Instructions
14699 
14700 // Call Java Static Instruction
14701 
14702 instruct CallStaticJavaDirect(method meth)
14703 %{
14704   match(CallStaticJava);
14705 
14706   effect(USE meth);
14707 
14708   ins_cost(CALL_COST);
14709 
14710   format %{ "call,static $meth \t// ==> " %}
14711 
14712   ins_encode( aarch64_enc_java_static_call(meth),
14713               aarch64_enc_call_epilog );
14714 
14715   ins_pipe(pipe_class_call);
14716 %}
14717 
14718 // TO HERE
14719 
14720 // Call Java Dynamic Instruction
14721 instruct CallDynamicJavaDirect(method meth)
14722 %{
14723   match(CallDynamicJava);
14724 
14725   effect(USE meth);
14726 
14727   ins_cost(CALL_COST);
14728 
14729   format %{ "CALL,dynamic $meth \t// ==> " %}
14730 
14731   ins_encode( aarch64_enc_java_dynamic_call(meth),
14732                aarch64_enc_call_epilog );
14733 
14734   ins_pipe(pipe_class_call);
14735 %}
14736 
14737 // Call Runtime Instruction
14738 
14739 instruct CallRuntimeDirect(method meth)
14740 %{
14741   match(CallRuntime);
14742 
14743   effect(USE meth);
14744 
14745   ins_cost(CALL_COST);
14746 
14747   format %{ "CALL, runtime $meth" %}
14748 
14749   ins_encode( aarch64_enc_java_to_runtime(meth) );
14750 
14751   ins_pipe(pipe_class_call);
14752 %}
14753 
14754 // Call Runtime Instruction
14755 
14756 instruct CallLeafDirect(method meth)
14757 %{
14758   match(CallLeaf);
14759 
14760   effect(USE meth);
14761 
14762   ins_cost(CALL_COST);
14763 
14764   format %{ "CALL, runtime leaf $meth" %}
14765 
14766   ins_encode( aarch64_enc_java_to_runtime(meth) );
14767 
14768   ins_pipe(pipe_class_call);
14769 %}
14770 
14771 // Call Runtime Instruction
14772 
14773 instruct CallLeafNoFPDirect(method meth)
14774 %{
14775   match(CallLeafNoFP);
14776 
14777   effect(USE meth);
14778 
14779   ins_cost(CALL_COST);
14780 
14781   format %{ "CALL, runtime leaf nofp $meth" %}
14782 
14783   ins_encode( aarch64_enc_java_to_runtime(meth) );
14784 
14785   ins_pipe(pipe_class_call);
14786 %}
14787 
14788 // Tail Call; Jump from runtime stub to Java code.
14789 // Also known as an 'interprocedural jump'.
14790 // Target of jump will eventually return to caller.
14791 // TailJump below removes the return address.
14792 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14793 %{
14794   match(TailCall jump_target method_oop);
14795 
14796   ins_cost(CALL_COST);
14797 
14798   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14799 
14800   ins_encode(aarch64_enc_tail_call(jump_target));
14801 
14802   ins_pipe(pipe_class_call);
14803 %}
14804 
14805 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14806 %{
14807   match(TailJump jump_target ex_oop);
14808 
14809   ins_cost(CALL_COST);
14810 
14811   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14812 
14813   ins_encode(aarch64_enc_tail_jmp(jump_target));
14814 
14815   ins_pipe(pipe_class_call);
14816 %}
14817 
14818 // Create exception oop: created by stack-crawling runtime code.
14819 // Created exception is now available to this handler, and is setup
14820 // just prior to jumping to this handler. No code emitted.
14821 // TODO check
14822 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14823 instruct CreateException(iRegP_R0 ex_oop)
14824 %{
14825   match(Set ex_oop (CreateEx));
14826 
14827   format %{ " -- \t// exception oop; no code emitted" %}
14828 
14829   size(0);
14830 
14831   ins_encode( /*empty*/ );
14832 
14833   ins_pipe(pipe_class_empty);
14834 %}
14835 
14836 // Rethrow exception: The exception oop will come in the first
14837 // argument position. Then JUMP (not call) to the rethrow stub code.
14838 instruct RethrowException() %{
14839   match(Rethrow);
14840   ins_cost(CALL_COST);
14841 
14842   format %{ "b rethrow_stub" %}
14843 
14844   ins_encode( aarch64_enc_rethrow() );
14845 
14846   ins_pipe(pipe_class_call);
14847 %}
14848 
14849 
14850 // Return Instruction
14851 // epilog node loads ret address into lr as part of frame pop
14852 instruct Ret()
14853 %{
14854   match(Return);
14855 
14856   format %{ "ret\t// return register" %}
14857 
14858   ins_encode( aarch64_enc_ret() );
14859 
14860   ins_pipe(pipe_branch);
14861 %}
14862 
14863 // Die now.
14864 instruct ShouldNotReachHere() %{
14865   match(Halt);
14866 
14867   ins_cost(CALL_COST);
14868   format %{ "ShouldNotReachHere" %}
14869 
14870   ins_encode %{
14871     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
14872     // return true
14873     __ dpcs1(0xdead + 1);
14874   %}
14875 
14876   ins_pipe(pipe_class_default);
14877 %}
14878 
14879 // ============================================================================
14880 // Partial Subtype Check
14881 //
14882 // superklass array for an instance of the superklass.  Set a hidden
14883 // internal cache on a hit (cache is checked with exposed code in
14884 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14885 // encoding ALSO sets flags.
14886 
14887 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14888 %{
14889   match(Set result (PartialSubtypeCheck sub super));
14890   effect(KILL cr, KILL temp);
14891 
14892   ins_cost(1100);  // slightly larger than the next version
14893   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14894 
14895   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14896 
14897   opcode(0x1); // Force zero of result reg on hit
14898 
14899   ins_pipe(pipe_class_memory);
14900 %}
14901 
14902 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14903 %{
14904   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14905   effect(KILL temp, KILL result);
14906 
14907   ins_cost(1100);  // slightly larger than the next version
14908   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14909 
14910   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14911 
14912   opcode(0x0); // Don't zero result reg on hit
14913 
14914   ins_pipe(pipe_class_memory);
14915 %}
14916 
14917 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14918                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14919 %{
14920   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
14921   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14922   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14923 
14924   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14925   ins_encode %{
14926     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14927     __ string_compare($str1$$Register, $str2$$Register,
14928                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14929                       $tmp1$$Register, $tmp2$$Register,
14930                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
14931   %}
14932   ins_pipe(pipe_class_memory);
14933 %}
14934 
14935 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14936                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14937 %{
14938   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
14939   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14940   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14941 
14942   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14943   ins_encode %{
14944     __ string_compare($str1$$Register, $str2$$Register,
14945                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14946                       $tmp1$$Register, $tmp2$$Register,
14947                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
14948   %}
14949   ins_pipe(pipe_class_memory);
14950 %}
14951 
14952 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14953                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14954                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14955 %{
14956   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
14957   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14958   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14959          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14960 
14961   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14962   ins_encode %{
14963     __ string_compare($str1$$Register, $str2$$Register,
14964                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14965                       $tmp1$$Register, $tmp2$$Register,
14966                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14967                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
14968   %}
14969   ins_pipe(pipe_class_memory);
14970 %}
14971 
14972 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14973                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14974                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14975 %{
14976   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
14977   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14978   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14979          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14980 
14981   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14982   ins_encode %{
14983     __ string_compare($str1$$Register, $str2$$Register,
14984                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14985                       $tmp1$$Register, $tmp2$$Register,
14986                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14987                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
14988   %}
14989   ins_pipe(pipe_class_memory);
14990 %}
14991 
14992 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14993        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14994        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14995 %{
14996   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14997   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14998   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14999          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15000   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15001 
15002   ins_encode %{
15003     __ string_indexof($str1$$Register, $str2$$Register,
15004                       $cnt1$$Register, $cnt2$$Register,
15005                       $tmp1$$Register, $tmp2$$Register,
15006                       $tmp3$$Register, $tmp4$$Register,
15007                       $tmp5$$Register, $tmp6$$Register,
15008                       -1, $result$$Register, StrIntrinsicNode::UU);
15009   %}
15010   ins_pipe(pipe_class_memory);
15011 %}
15012 
15013 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15014        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15015        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15016 %{
15017   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15018   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15019   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15020          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15021   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15022 
15023   ins_encode %{
15024     __ string_indexof($str1$$Register, $str2$$Register,
15025                       $cnt1$$Register, $cnt2$$Register,
15026                       $tmp1$$Register, $tmp2$$Register,
15027                       $tmp3$$Register, $tmp4$$Register,
15028                       $tmp5$$Register, $tmp6$$Register,
15029                       -1, $result$$Register, StrIntrinsicNode::LL);
15030   %}
15031   ins_pipe(pipe_class_memory);
15032 %}
15033 
15034 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15035        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15036        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15037 %{
15038   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15039   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15040   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15041          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15042   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15043 
15044   ins_encode %{
15045     __ string_indexof($str1$$Register, $str2$$Register,
15046                       $cnt1$$Register, $cnt2$$Register,
15047                       $tmp1$$Register, $tmp2$$Register,
15048                       $tmp3$$Register, $tmp4$$Register,
15049                       $tmp5$$Register, $tmp6$$Register,
15050                       -1, $result$$Register, StrIntrinsicNode::UL);
15051   %}
15052   ins_pipe(pipe_class_memory);
15053 %}
15054 
15055 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15056                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15057                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15058 %{
15059   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15060   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15061   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15062          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15063   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15064 
15065   ins_encode %{
15066     int icnt2 = (int)$int_cnt2$$constant;
15067     __ string_indexof($str1$$Register, $str2$$Register,
15068                       $cnt1$$Register, zr,
15069                       $tmp1$$Register, $tmp2$$Register,
15070                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15071                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15072   %}
15073   ins_pipe(pipe_class_memory);
15074 %}
15075 
15076 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15077                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15078                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15079 %{
15080   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15081   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15082   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15083          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15084   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15085 
15086   ins_encode %{
15087     int icnt2 = (int)$int_cnt2$$constant;
15088     __ string_indexof($str1$$Register, $str2$$Register,
15089                       $cnt1$$Register, zr,
15090                       $tmp1$$Register, $tmp2$$Register,
15091                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15092                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15093   %}
15094   ins_pipe(pipe_class_memory);
15095 %}
15096 
15097 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15098                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15099                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15100 %{
15101   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15102   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15103   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15104          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15105   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15106 
15107   ins_encode %{
15108     int icnt2 = (int)$int_cnt2$$constant;
15109     __ string_indexof($str1$$Register, $str2$$Register,
15110                       $cnt1$$Register, zr,
15111                       $tmp1$$Register, $tmp2$$Register,
15112                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15113                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15114   %}
15115   ins_pipe(pipe_class_memory);
15116 %}
15117 
15118 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15119                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15120                               iRegINoSp tmp3, rFlagsReg cr)
15121 %{
15122   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15123   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15124          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15125 
15126   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15127 
15128   ins_encode %{
15129     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15130                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15131                            $tmp3$$Register);
15132   %}
15133   ins_pipe(pipe_class_memory);
15134 %}
15135 
15136 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15137                         iRegI_R0 result, rFlagsReg cr)
15138 %{
15139   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15140   match(Set result (StrEquals (Binary str1 str2) cnt));
15141   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15142 
15143   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15144   ins_encode %{
15145     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15146     __ string_equals($str1$$Register, $str2$$Register,
15147                      $result$$Register, $cnt$$Register, 1);
15148   %}
15149   ins_pipe(pipe_class_memory);
15150 %}
15151 
15152 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15153                         iRegI_R0 result, rFlagsReg cr)
15154 %{
15155   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15156   match(Set result (StrEquals (Binary str1 str2) cnt));
15157   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15158 
15159   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15160   ins_encode %{
15161     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15162     __ string_equals($str1$$Register, $str2$$Register,
15163                      $result$$Register, $cnt$$Register, 2);
15164   %}
15165   ins_pipe(pipe_class_memory);
15166 %}
15167 
15168 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15169                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15170                        iRegP_R10 tmp, rFlagsReg cr)
15171 %{
15172   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15173   match(Set result (AryEq ary1 ary2));
15174   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15175 
15176   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15177   ins_encode %{
15178     __ arrays_equals($ary1$$Register, $ary2$$Register,
15179                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15180                      $result$$Register, $tmp$$Register, 1);
15181     %}
15182   ins_pipe(pipe_class_memory);
15183 %}
15184 
15185 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15186                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15187                        iRegP_R10 tmp, rFlagsReg cr)
15188 %{
15189   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15190   match(Set result (AryEq ary1 ary2));
15191   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15192 
15193   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15194   ins_encode %{
15195     __ arrays_equals($ary1$$Register, $ary2$$Register,
15196                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15197                      $result$$Register, $tmp$$Register, 2);
15198   %}
15199   ins_pipe(pipe_class_memory);
15200 %}
15201 
15202 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15203 %{
15204   match(Set result (HasNegatives ary1 len));
15205   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15206   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15207   ins_encode %{
15208     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15209   %}
15210   ins_pipe( pipe_slow );
15211 %}
15212 
15213 // fast char[] to byte[] compression
15214 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15215                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15216                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15217                          iRegI_R0 result, rFlagsReg cr)
15218 %{
15219   match(Set result (StrCompressedCopy src (Binary dst len)));
15220   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15221 
15222   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15223   ins_encode %{
15224     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15225                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15226                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15227                            $result$$Register);
15228   %}
15229   ins_pipe( pipe_slow );
15230 %}
15231 
15232 // fast byte[] to char[] inflation
15233 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15234                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15235 %{
15236   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15237   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15238 
15239   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15240   ins_encode %{
15241     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15242                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15243   %}
15244   ins_pipe(pipe_class_memory);
15245 %}
15246 
15247 // encode char[] to byte[] in ISO_8859_1
15248 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15249                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15250                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15251                           iRegI_R0 result, rFlagsReg cr)
15252 %{
15253   match(Set result (EncodeISOArray src (Binary dst len)));
15254   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15255          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15256 
15257   format %{ "Encode array $src,$dst,$len -> $result" %}
15258   ins_encode %{
15259     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15260          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15261          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15262   %}
15263   ins_pipe( pipe_class_memory );
15264 %}
15265 
15266 // ============================================================================
15267 // This name is KNOWN by the ADLC and cannot be changed.
15268 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15269 // for this guy.
15270 instruct tlsLoadP(thread_RegP dst)
15271 %{
15272   match(Set dst (ThreadLocal));
15273 
15274   ins_cost(0);
15275 
15276   format %{ " -- \t// $dst=Thread::current(), empty" %}
15277 
15278   size(0);
15279 
15280   ins_encode( /*empty*/ );
15281 
15282   ins_pipe(pipe_class_empty);
15283 %}
15284 
15285 // ====================VECTOR INSTRUCTIONS=====================================
15286 
15287 instruct reinterpretD(vecD dst) %{
15288   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 8 &&
15289             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 8);
15290   match(Set dst (VectorReinterpret dst));
15291   ins_cost(0);
15292   format %{ " # reinterpret $dst" %}
15293   ins_encode %{
15294     // empty
15295   %}
15296   ins_pipe(pipe_class_empty);
15297 %}
15298 
15299 instruct reinterpretD2X(vecX dst, vecD src) %{
15300   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 16 &&
15301             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 8);
15302   match(Set dst (VectorReinterpret src));
15303   ins_cost(INSN_COST);
15304   format %{ " # reinterpret $dst,$src" %}
15305   ins_encode %{
15306     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
15307       __ orr(as_FloatRegister($dst$$reg), __ T8B,
15308              as_FloatRegister($src$$reg),
15309              as_FloatRegister($src$$reg));
15310     }
15311   %}
15312   ins_pipe(vlogical64);
15313 %}
15314 
15315 instruct reinterpretX2D(vecD dst, vecX src) %{
15316   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 8 &&
15317             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 16);
15318   match(Set dst (VectorReinterpret src));
15319   ins_cost(INSN_COST);
15320   format %{ " # reinterpret $dst,$src" %}
15321   ins_encode %{
15322     // If register is the same, then move is not needed.
15323     if (as_FloatRegister($dst$$reg) != as_FloatRegister($src$$reg)) {
15324       __ orr(as_FloatRegister($dst$$reg), __ T8B,
15325              as_FloatRegister($src$$reg),
15326              as_FloatRegister($src$$reg));
15327     }
15328   %}
15329   ins_pipe(vlogical64);
15330 %}
15331 
15332 instruct reinterpretX(vecX dst) %{
15333   predicate(n->bottom_type()->is_vect()->length_in_bytes() == 16 &&
15334             n->in(1)->bottom_type()->is_vect()->length_in_bytes() == 16);
15335   match(Set dst (VectorReinterpret dst));
15336   ins_cost(0);
15337   format %{ " # reinterpret $dst" %}
15338   ins_encode %{
15339     // empty
15340   %}
15341   ins_pipe(pipe_class_empty);
15342 %}
15343 
15344 // Load vector (16 bits)
15345 instruct loadV2(vecD dst, memory mem)
15346 %{
15347   predicate(n->as_LoadVector()->memory_size() == 2);
15348   match(Set dst (LoadVector mem));
15349   ins_cost(4 * INSN_COST);
15350   format %{ "ldrh   $dst,$mem\t# vector (16 bits)" %}
15351   ins_encode( aarch64_enc_ldrvH(dst, mem) );
15352   ins_pipe(vload_reg_mem64);
15353 %}
15354 
15355 // Load vector (32 bits)
15356 instruct loadV4(vecD dst, vmem4 mem)
15357 %{
15358   predicate(n->as_LoadVector()->memory_size() == 4);
15359   match(Set dst (LoadVector mem));
15360   ins_cost(4 * INSN_COST);
15361   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15362   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15363   ins_pipe(vload_reg_mem64);
15364 %}
15365 
15366 // Load vector (64 bits)
15367 instruct loadV8(vecD dst, vmem8 mem)
15368 %{
15369   predicate(n->as_LoadVector()->memory_size() == 8);
15370   match(Set dst (LoadVector mem));
15371   ins_cost(4 * INSN_COST);
15372   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15373   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15374   ins_pipe(vload_reg_mem64);
15375 %}
15376 
15377 // Load Vector (128 bits)
15378 instruct loadV16(vecX dst, vmem16 mem)
15379 %{
15380   predicate(n->as_LoadVector()->memory_size() == 16);
15381   match(Set dst (LoadVector mem));
15382   ins_cost(4 * INSN_COST);
15383   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15384   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15385   ins_pipe(vload_reg_mem128);
15386 %}
15387 
15388 // Store Vector (16 bits)
15389 instruct storeV2(vecD src, memory mem)
15390 %{
15391   predicate(n->as_StoreVector()->memory_size() == 2);
15392   match(Set mem (StoreVector mem src));
15393   ins_cost(4 * INSN_COST);
15394   format %{ "strh   $mem,$src\t# vector (16 bits)" %}
15395   ins_encode( aarch64_enc_strvH(src, mem) );
15396   ins_pipe(vstore_reg_mem64);
15397 %}
15398 
15399 // Store Vector (32 bits)
15400 instruct storeV4(vecD src, vmem4 mem)
15401 %{
15402   predicate(n->as_StoreVector()->memory_size() == 4);
15403   match(Set mem (StoreVector mem src));
15404   ins_cost(4 * INSN_COST);
15405   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15406   ins_encode( aarch64_enc_strvS(src, mem) );
15407   ins_pipe(vstore_reg_mem64);
15408 %}
15409 
15410 // Store Vector (64 bits)
15411 instruct storeV8(vecD src, vmem8 mem)
15412 %{
15413   predicate(n->as_StoreVector()->memory_size() == 8);
15414   match(Set mem (StoreVector mem src));
15415   ins_cost(4 * INSN_COST);
15416   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15417   ins_encode( aarch64_enc_strvD(src, mem) );
15418   ins_pipe(vstore_reg_mem64);
15419 %}
15420 
15421 // Store Vector (128 bits)
15422 instruct storeV16(vecX src, vmem16 mem)
15423 %{
15424   predicate(n->as_StoreVector()->memory_size() == 16);
15425   match(Set mem (StoreVector mem src));
15426   ins_cost(4 * INSN_COST);
15427   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15428   ins_encode( aarch64_enc_strvQ(src, mem) );
15429   ins_pipe(vstore_reg_mem128);
15430 %}
15431 
15432 instruct replicate8B(vecD dst, iRegIorL2I src)
15433 %{
15434   predicate(n->as_Vector()->length() == 4 ||
15435             n->as_Vector()->length() == 8);
15436   match(Set dst (ReplicateB src));
15437   ins_cost(INSN_COST);
15438   format %{ "dup  $dst, $src\t# vector (8B)" %}
15439   ins_encode %{
15440     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15441   %}
15442   ins_pipe(vdup_reg_reg64);
15443 %}
15444 
15445 instruct replicate16B(vecX dst, iRegIorL2I src)
15446 %{
15447   predicate(n->as_Vector()->length() == 16);
15448   match(Set dst (ReplicateB src));
15449   ins_cost(INSN_COST);
15450   format %{ "dup  $dst, $src\t# vector (16B)" %}
15451   ins_encode %{
15452     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15453   %}
15454   ins_pipe(vdup_reg_reg128);
15455 %}
15456 
15457 instruct replicate8B_imm(vecD dst, immI con)
15458 %{
15459   predicate(n->as_Vector()->length() == 4 ||
15460             n->as_Vector()->length() == 8);
15461   match(Set dst (ReplicateB con));
15462   ins_cost(INSN_COST);
15463   format %{ "movi  $dst, $con\t# vector(8B)" %}
15464   ins_encode %{
15465     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15466   %}
15467   ins_pipe(vmovi_reg_imm64);
15468 %}
15469 
15470 instruct replicate16B_imm(vecX dst, immI con)
15471 %{
15472   predicate(n->as_Vector()->length() == 16);
15473   match(Set dst (ReplicateB con));
15474   ins_cost(INSN_COST);
15475   format %{ "movi  $dst, $con\t# vector(16B)" %}
15476   ins_encode %{
15477     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15478   %}
15479   ins_pipe(vmovi_reg_imm128);
15480 %}
15481 
15482 instruct replicate4S(vecD dst, iRegIorL2I src)
15483 %{
15484   predicate(n->as_Vector()->length() == 2 ||
15485             n->as_Vector()->length() == 4);
15486   match(Set dst (ReplicateS src));
15487   ins_cost(INSN_COST);
15488   format %{ "dup  $dst, $src\t# vector (4S)" %}
15489   ins_encode %{
15490     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15491   %}
15492   ins_pipe(vdup_reg_reg64);
15493 %}
15494 
15495 instruct replicate8S(vecX dst, iRegIorL2I src)
15496 %{
15497   predicate(n->as_Vector()->length() == 8);
15498   match(Set dst (ReplicateS src));
15499   ins_cost(INSN_COST);
15500   format %{ "dup  $dst, $src\t# vector (8S)" %}
15501   ins_encode %{
15502     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15503   %}
15504   ins_pipe(vdup_reg_reg128);
15505 %}
15506 
15507 instruct replicate4S_imm(vecD dst, immI con)
15508 %{
15509   predicate(n->as_Vector()->length() == 2 ||
15510             n->as_Vector()->length() == 4);
15511   match(Set dst (ReplicateS con));
15512   ins_cost(INSN_COST);
15513   format %{ "movi  $dst, $con\t# vector(4H)" %}
15514   ins_encode %{
15515     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15516   %}
15517   ins_pipe(vmovi_reg_imm64);
15518 %}
15519 
15520 instruct replicate8S_imm(vecX dst, immI con)
15521 %{
15522   predicate(n->as_Vector()->length() == 8);
15523   match(Set dst (ReplicateS con));
15524   ins_cost(INSN_COST);
15525   format %{ "movi  $dst, $con\t# vector(8H)" %}
15526   ins_encode %{
15527     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15528   %}
15529   ins_pipe(vmovi_reg_imm128);
15530 %}
15531 
15532 instruct replicate2I(vecD dst, iRegIorL2I src)
15533 %{
15534   predicate(n->as_Vector()->length() == 2);
15535   match(Set dst (ReplicateI src));
15536   ins_cost(INSN_COST);
15537   format %{ "dup  $dst, $src\t# vector (2I)" %}
15538   ins_encode %{
15539     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15540   %}
15541   ins_pipe(vdup_reg_reg64);
15542 %}
15543 
15544 instruct replicate4I(vecX dst, iRegIorL2I src)
15545 %{
15546   predicate(n->as_Vector()->length() == 4);
15547   match(Set dst (ReplicateI src));
15548   ins_cost(INSN_COST);
15549   format %{ "dup  $dst, $src\t# vector (4I)" %}
15550   ins_encode %{
15551     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15552   %}
15553   ins_pipe(vdup_reg_reg128);
15554 %}
15555 
15556 instruct replicate2I_imm(vecD dst, immI con)
15557 %{
15558   predicate(n->as_Vector()->length() == 2);
15559   match(Set dst (ReplicateI con));
15560   ins_cost(INSN_COST);
15561   format %{ "movi  $dst, $con\t# vector(2I)" %}
15562   ins_encode %{
15563     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15564   %}
15565   ins_pipe(vmovi_reg_imm64);
15566 %}
15567 
15568 instruct replicate4I_imm(vecX dst, immI con)
15569 %{
15570   predicate(n->as_Vector()->length() == 4);
15571   match(Set dst (ReplicateI con));
15572   ins_cost(INSN_COST);
15573   format %{ "movi  $dst, $con\t# vector(4I)" %}
15574   ins_encode %{
15575     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15576   %}
15577   ins_pipe(vmovi_reg_imm128);
15578 %}
15579 
15580 instruct replicate2L(vecX dst, iRegL src)
15581 %{
15582   predicate(n->as_Vector()->length() == 2);
15583   match(Set dst (ReplicateL src));
15584   ins_cost(INSN_COST);
15585   format %{ "dup  $dst, $src\t# vector (2L)" %}
15586   ins_encode %{
15587     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15588   %}
15589   ins_pipe(vdup_reg_reg128);
15590 %}
15591 
15592 instruct replicate2L_zero(vecX dst, immI0 zero)
15593 %{
15594   predicate(n->as_Vector()->length() == 2);
15595   match(Set dst (ReplicateI zero));
15596   ins_cost(INSN_COST);
15597   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15598   ins_encode %{
15599     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15600            as_FloatRegister($dst$$reg),
15601            as_FloatRegister($dst$$reg));
15602   %}
15603   ins_pipe(vmovi_reg_imm128);
15604 %}
15605 
15606 instruct replicate2F(vecD dst, vRegF src)
15607 %{
15608   predicate(n->as_Vector()->length() == 2);
15609   match(Set dst (ReplicateF src));
15610   ins_cost(INSN_COST);
15611   format %{ "dup  $dst, $src\t# vector (2F)" %}
15612   ins_encode %{
15613     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15614            as_FloatRegister($src$$reg));
15615   %}
15616   ins_pipe(vdup_reg_freg64);
15617 %}
15618 
15619 instruct replicate4F(vecX dst, vRegF src)
15620 %{
15621   predicate(n->as_Vector()->length() == 4);
15622   match(Set dst (ReplicateF src));
15623   ins_cost(INSN_COST);
15624   format %{ "dup  $dst, $src\t# vector (4F)" %}
15625   ins_encode %{
15626     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15627            as_FloatRegister($src$$reg));
15628   %}
15629   ins_pipe(vdup_reg_freg128);
15630 %}
15631 
15632 instruct replicate2D(vecX dst, vRegD src)
15633 %{
15634   predicate(n->as_Vector()->length() == 2);
15635   match(Set dst (ReplicateD src));
15636   ins_cost(INSN_COST);
15637   format %{ "dup  $dst, $src\t# vector (2D)" %}
15638   ins_encode %{
15639     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15640            as_FloatRegister($src$$reg));
15641   %}
15642   ins_pipe(vdup_reg_dreg128);
15643 %}
15644 
15645 // ====================REDUCTION ARITHMETIC====================================
15646 
15647 instruct reduce_add8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp)
15648 %{
15649   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
15650   match(Set dst (AddReductionVI src1 src2));
15651   ins_cost(INSN_COST);
15652   effect(TEMP_DEF dst, TEMP tmp);
15653   format %{ "addv  $tmp, T8B, $src2\n\t"
15654             "umov  $dst, $tmp, B, 0\n\t"
15655             "addw  $dst, $dst, $src1\n\t"
15656             "sxtb  $dst, $dst\t add reduction8B"
15657   %}
15658   ins_encode %{
15659     __ addv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src2$$reg));
15660     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
15661     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
15662     __ sxtb($dst$$Register, $dst$$Register);
15663   %}
15664   ins_pipe(pipe_class_default);
15665 %}
15666 
15667 instruct reduce_add16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp)
15668 %{
15669   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
15670   match(Set dst (AddReductionVI src1 src2));
15671   ins_cost(INSN_COST);
15672   effect(TEMP_DEF dst, TEMP tmp);
15673   format %{ "addv  $tmp, T16B, $src2\n\t"
15674             "umov  $dst, $tmp, B, 0\n\t"
15675             "addw  $dst, $dst, $src1\n\t"
15676             "sxtb  $dst, $dst\t add reduction16B"
15677   %}
15678   ins_encode %{
15679     __ addv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src2$$reg));
15680     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
15681     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
15682     __ sxtb($dst$$Register, $dst$$Register);
15683   %}
15684   ins_pipe(pipe_class_default);
15685 %}
15686 
15687 instruct reduce_add4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp)
15688 %{
15689   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
15690   match(Set dst (AddReductionVI src1 src2));
15691   ins_cost(INSN_COST);
15692   effect(TEMP_DEF dst, TEMP tmp);
15693   format %{ "addv  $tmp, T4H, $src2\n\t"
15694             "umov  $dst, $tmp, H, 0\n\t"
15695             "addw  $dst, $dst, $src1\n\t"
15696             "sxth  $dst, $dst\t add reduction4S"
15697   %}
15698   ins_encode %{
15699     __ addv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($src2$$reg));
15700     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
15701     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
15702     __ sxth($dst$$Register, $dst$$Register);
15703   %}
15704   ins_pipe(pipe_class_default);
15705 %}
15706 
15707 instruct reduce_add8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp)
15708 %{
15709   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
15710   match(Set dst (AddReductionVI src1 src2));
15711   ins_cost(INSN_COST);
15712   effect(TEMP_DEF dst, TEMP tmp);
15713   format %{ "addv  $tmp, T8H, $src2\n\t"
15714             "umov  $dst, $tmp, H, 0\n\t"
15715             "addw  $dst, $dst, $src1\n\t"
15716             "sxth  $dst, $dst\t add reduction8S"
15717   %}
15718   ins_encode %{
15719     __ addv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($src2$$reg));
15720     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
15721     __ addw($dst$$Register, $dst$$Register, $src1$$Register);
15722     __ sxth($dst$$Register, $dst$$Register);
15723   %}
15724   ins_pipe(pipe_class_default);
15725 %}
15726 
15727 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15728 %{
15729   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
15730   match(Set dst (AddReductionVI src1 src2));
15731   ins_cost(INSN_COST);
15732   effect(TEMP tmp, TEMP tmp2);
15733   format %{ "umov  $tmp, $src2, S, 0\n\t"
15734             "umov  $tmp2, $src2, S, 1\n\t"
15735             "addw  $tmp, $src1, $tmp\n\t"
15736             "addw  $dst, $tmp, $tmp2\t add reduction2I"
15737   %}
15738   ins_encode %{
15739     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15740     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15741     __ addw($tmp$$Register, $src1$$Register, $tmp$$Register);
15742     __ addw($dst$$Register, $tmp$$Register, $tmp2$$Register);
15743   %}
15744   ins_pipe(pipe_class_default);
15745 %}
15746 
15747 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15748 %{
15749   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
15750   match(Set dst (AddReductionVI src1 src2));
15751   ins_cost(INSN_COST);
15752   effect(TEMP tmp, TEMP tmp2);
15753   format %{ "addv  $tmp, T4S, $src2\n\t"
15754             "umov  $tmp2, $tmp, S, 0\n\t"
15755             "addw  $dst, $tmp2, $src1\t add reduction4I"
15756   %}
15757   ins_encode %{
15758     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15759             as_FloatRegister($src2$$reg));
15760     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15761     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15762   %}
15763   ins_pipe(pipe_class_default);
15764 %}
15765 
15766 instruct reduce_mul8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD vtmp1, vecD vtmp2, iRegINoSp itmp)
15767 %{
15768   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
15769   match(Set dst (MulReductionVI src1 src2));
15770   ins_cost(INSN_COST);
15771   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
15772   format %{ "ins   $vtmp1, S, $src2, 0, 1\n\t"
15773             "mulv  $vtmp1, T8B, $vtmp1, $src2\n\t"
15774             "ins   $vtmp2, H, $vtmp1, 0, 1\n\t"
15775             "mulv  $vtmp2, T8B, $vtmp2, $vtmp1\n\t"
15776             "umov  $itmp, $vtmp2, B, 0\n\t"
15777             "mulw  $dst, $itmp, $src1\n\t"
15778             "sxtb  $dst, $dst\n\t"
15779             "umov  $itmp, $vtmp2, B, 1\n\t"
15780             "mulw  $dst, $itmp, $dst\n\t"
15781             "sxtb  $dst, $dst\t mul reduction8B"
15782   %}
15783   ins_encode %{
15784     __ ins(as_FloatRegister($vtmp1$$reg), __ S,
15785            as_FloatRegister($src2$$reg), 0, 1);
15786     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
15787             as_FloatRegister($vtmp1$$reg), as_FloatRegister($src2$$reg));
15788     __ ins(as_FloatRegister($vtmp2$$reg), __ H,
15789            as_FloatRegister($vtmp1$$reg), 0, 1);
15790     __ mulv(as_FloatRegister($vtmp2$$reg), __ T8B,
15791             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
15792     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 0);
15793     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
15794     __ sxtb($dst$$Register, $dst$$Register);
15795     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 1);
15796     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
15797     __ sxtb($dst$$Register, $dst$$Register);
15798   %}
15799   ins_pipe(pipe_class_default);
15800 %}
15801 
15802 instruct reduce_mul16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX vtmp1, vecX vtmp2, iRegINoSp itmp)
15803 %{
15804   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
15805   match(Set dst (MulReductionVI src1 src2));
15806   ins_cost(INSN_COST);
15807   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
15808   format %{ "ins   $vtmp1, D, $src2, 0, 1\n\t"
15809             "mulv  $vtmp1, T8B, $vtmp1, $src2\n\t"
15810             "ins   $vtmp2, S, $vtmp1, 0, 1\n\t"
15811             "mulv  $vtmp1, T8B, $vtmp2, $vtmp1\n\t"
15812             "ins   $vtmp2, H, $vtmp1, 0, 1\n\t"
15813             "mulv  $vtmp2, T8B, $vtmp2, $vtmp1\n\t"
15814             "umov  $itmp, $vtmp2, B, 0\n\t"
15815             "mulw  $dst, $itmp, $src1\n\t"
15816             "sxtb  $dst, $dst\n\t"
15817             "umov  $itmp, $vtmp2, B, 1\n\t"
15818             "mulw  $dst, $itmp, $dst\n\t"
15819             "sxtb  $dst, $dst\t mul reduction16B"
15820   %}
15821   ins_encode %{
15822     __ ins(as_FloatRegister($vtmp1$$reg), __ D,
15823            as_FloatRegister($src2$$reg), 0, 1);
15824     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
15825             as_FloatRegister($vtmp1$$reg), as_FloatRegister($src2$$reg));
15826     __ ins(as_FloatRegister($vtmp2$$reg), __ S,
15827            as_FloatRegister($vtmp1$$reg), 0, 1);
15828     __ mulv(as_FloatRegister($vtmp1$$reg), __ T8B,
15829             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
15830     __ ins(as_FloatRegister($vtmp2$$reg), __ H,
15831            as_FloatRegister($vtmp1$$reg), 0, 1);
15832     __ mulv(as_FloatRegister($vtmp2$$reg), __ T8B,
15833             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
15834     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 0);
15835     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
15836     __ sxtb($dst$$Register, $dst$$Register);
15837     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ B, 1);
15838     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
15839     __ sxtb($dst$$Register, $dst$$Register);
15840   %}
15841   ins_pipe(pipe_class_default);
15842 %}
15843 
15844 instruct reduce_mul4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD vtmp, iRegINoSp itmp)
15845 %{
15846   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
15847   match(Set dst (MulReductionVI src1 src2));
15848   ins_cost(INSN_COST);
15849   effect(TEMP_DEF dst, TEMP vtmp, TEMP itmp);
15850   format %{ "ins   $vtmp, S, $src2, 0, 1\n\t"
15851             "mulv  $vtmp, T4H, $vtmp, $src2\n\t"
15852             "umov  $itmp, $vtmp, H, 0\n\t"
15853             "mulw  $dst, $itmp, $src1\n\t"
15854             "sxth  $dst, $dst\n\t"
15855             "umov  $itmp, $vtmp, H, 1\n\t"
15856             "mulw  $dst, $itmp, $dst\n\t"
15857             "sxth  $dst, $dst\t mul reduction4S"
15858   %}
15859   ins_encode %{
15860     __ ins(as_FloatRegister($vtmp$$reg), __ S,
15861            as_FloatRegister($src2$$reg), 0, 1);
15862     __ mulv(as_FloatRegister($vtmp$$reg), __ T4H,
15863             as_FloatRegister($vtmp$$reg), as_FloatRegister($src2$$reg));
15864     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ H, 0);
15865     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
15866     __ sxth($dst$$Register, $dst$$Register);
15867     __ umov($itmp$$Register, as_FloatRegister($vtmp$$reg), __ H, 1);
15868     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
15869     __ sxth($dst$$Register, $dst$$Register);
15870   %}
15871   ins_pipe(pipe_class_default);
15872 %}
15873 
15874 instruct reduce_mul8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX vtmp1, vecX vtmp2, iRegINoSp itmp)
15875 %{
15876   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
15877   match(Set dst (MulReductionVI src1 src2));
15878   ins_cost(INSN_COST);
15879   effect(TEMP_DEF dst, TEMP vtmp1, TEMP vtmp2, TEMP itmp);
15880   format %{ "ins   $vtmp1, D, $src2, 0, 1\n\t"
15881             "mulv  $vtmp1, T4H, $vtmp1, $src2\n\t"
15882             "ins   $vtmp2, S, $vtmp1, 0, 1\n\t"
15883             "mulv  $vtmp2, T4H, $vtmp2, $vtmp1\n\t"
15884             "umov  $itmp, $vtmp2, H, 0\n\t"
15885             "mulw  $dst, $itmp, $src1\n\t"
15886             "sxth  $dst, $dst\n\t"
15887             "umov  $itmp, $vtmp2, H, 1\n\t"
15888             "mulw  $dst, $itmp, $dst\n\t"
15889             "sxth  $dst, $dst\t mul reduction8S"
15890   %}
15891   ins_encode %{
15892     __ ins(as_FloatRegister($vtmp1$$reg), __ D,
15893            as_FloatRegister($src2$$reg), 0, 1);
15894     __ mulv(as_FloatRegister($vtmp1$$reg), __ T4H,
15895             as_FloatRegister($vtmp1$$reg), as_FloatRegister($src2$$reg));
15896     __ ins(as_FloatRegister($vtmp2$$reg), __ S,
15897            as_FloatRegister($vtmp1$$reg), 0, 1);
15898     __ mulv(as_FloatRegister($vtmp2$$reg), __ T4H,
15899             as_FloatRegister($vtmp2$$reg), as_FloatRegister($vtmp1$$reg));
15900     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ H, 0);
15901     __ mulw($dst$$Register, $itmp$$Register, $src1$$Register);
15902     __ sxth($dst$$Register, $dst$$Register);
15903     __ umov($itmp$$Register, as_FloatRegister($vtmp2$$reg), __ H, 1);
15904     __ mulw($dst$$Register, $itmp$$Register, $dst$$Register);
15905     __ sxth($dst$$Register, $dst$$Register);
15906   %}
15907   ins_pipe(pipe_class_default);
15908 %}
15909 
15910 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15911 %{
15912   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
15913   match(Set dst (MulReductionVI src1 src2));
15914   ins_cost(INSN_COST);
15915   effect(TEMP tmp, TEMP dst);
15916   format %{ "umov  $tmp, $src2, S, 0\n\t"
15917             "mul   $dst, $tmp, $src1\n\t"
15918             "umov  $tmp, $src2, S, 1\n\t"
15919             "mul   $dst, $tmp, $dst\t mul reduction2I\n\t"
15920   %}
15921   ins_encode %{
15922     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15923     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15924     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15925     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15926   %}
15927   ins_pipe(pipe_class_default);
15928 %}
15929 
15930 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15931 %{
15932   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
15933   match(Set dst (MulReductionVI src1 src2));
15934   ins_cost(INSN_COST);
15935   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15936   format %{ "ins   $tmp, D, $src2, 0, 1\n\t"
15937             "mulv  $tmp, T2S, $tmp, $src2\n\t"
15938             "umov  $tmp2, $tmp, S, 0\n\t"
15939             "mul   $dst, $tmp2, $src1\n\t"
15940             "umov  $tmp2, $tmp, S, 1\n\t"
15941             "mul   $dst, $tmp2, $dst\t mul reduction4I\n\t"
15942   %}
15943   ins_encode %{
15944     __ ins(as_FloatRegister($tmp$$reg), __ D,
15945            as_FloatRegister($src2$$reg), 0, 1);
15946     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15947             as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15948     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15949     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15950     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15951     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15952   %}
15953   ins_pipe(pipe_class_default);
15954 %}
15955 
15956 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15957 %{
15958   match(Set dst (AddReductionVF src1 src2));
15959   ins_cost(INSN_COST);
15960   effect(TEMP tmp, TEMP dst);
15961   format %{ "fadds $dst, $src1, $src2\n\t"
15962             "ins   $tmp, S, $src2, 0, 1\n\t"
15963             "fadds $dst, $dst, $tmp\t add reduction2F"
15964   %}
15965   ins_encode %{
15966     __ fadds(as_FloatRegister($dst$$reg),
15967              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15968     __ ins(as_FloatRegister($tmp$$reg), __ S,
15969            as_FloatRegister($src2$$reg), 0, 1);
15970     __ fadds(as_FloatRegister($dst$$reg),
15971              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15972   %}
15973   ins_pipe(pipe_class_default);
15974 %}
15975 
15976 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15977 %{
15978   match(Set dst (AddReductionVF src1 src2));
15979   ins_cost(INSN_COST);
15980   effect(TEMP tmp, TEMP dst);
15981   format %{ "fadds $dst, $src1, $src2\n\t"
15982             "ins   $tmp, S, $src2, 0, 1\n\t"
15983             "fadds $dst, $dst, $tmp\n\t"
15984             "ins   $tmp, S, $src2, 0, 2\n\t"
15985             "fadds $dst, $dst, $tmp\n\t"
15986             "ins   $tmp, S, $src2, 0, 3\n\t"
15987             "fadds $dst, $dst, $tmp\t add reduction4F"
15988   %}
15989   ins_encode %{
15990     __ fadds(as_FloatRegister($dst$$reg),
15991              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15992     __ ins(as_FloatRegister($tmp$$reg), __ S,
15993            as_FloatRegister($src2$$reg), 0, 1);
15994     __ fadds(as_FloatRegister($dst$$reg),
15995              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15996     __ ins(as_FloatRegister($tmp$$reg), __ S,
15997            as_FloatRegister($src2$$reg), 0, 2);
15998     __ fadds(as_FloatRegister($dst$$reg),
15999              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16000     __ ins(as_FloatRegister($tmp$$reg), __ S,
16001            as_FloatRegister($src2$$reg), 0, 3);
16002     __ fadds(as_FloatRegister($dst$$reg),
16003              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16004   %}
16005   ins_pipe(pipe_class_default);
16006 %}
16007 
16008 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16009 %{
16010   match(Set dst (MulReductionVF src1 src2));
16011   ins_cost(INSN_COST);
16012   effect(TEMP tmp, TEMP dst);
16013   format %{ "fmuls $dst, $src1, $src2\n\t"
16014             "ins   $tmp, S, $src2, 0, 1\n\t"
16015             "fmuls $dst, $dst, $tmp\t mul reduction2F"
16016   %}
16017   ins_encode %{
16018     __ fmuls(as_FloatRegister($dst$$reg),
16019              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16020     __ ins(as_FloatRegister($tmp$$reg), __ S,
16021            as_FloatRegister($src2$$reg), 0, 1);
16022     __ fmuls(as_FloatRegister($dst$$reg),
16023              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16024   %}
16025   ins_pipe(pipe_class_default);
16026 %}
16027 
16028 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16029 %{
16030   match(Set dst (MulReductionVF src1 src2));
16031   ins_cost(INSN_COST);
16032   effect(TEMP tmp, TEMP dst);
16033   format %{ "fmuls $dst, $src1, $src2\n\t"
16034             "ins   $tmp, S, $src2, 0, 1\n\t"
16035             "fmuls $dst, $dst, $tmp\n\t"
16036             "ins   $tmp, S, $src2, 0, 2\n\t"
16037             "fmuls $dst, $dst, $tmp\n\t"
16038             "ins   $tmp, S, $src2, 0, 3\n\t"
16039             "fmuls $dst, $dst, $tmp\t mul reduction4F"
16040   %}
16041   ins_encode %{
16042     __ fmuls(as_FloatRegister($dst$$reg),
16043              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16044     __ ins(as_FloatRegister($tmp$$reg), __ S,
16045            as_FloatRegister($src2$$reg), 0, 1);
16046     __ fmuls(as_FloatRegister($dst$$reg),
16047              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16048     __ ins(as_FloatRegister($tmp$$reg), __ S,
16049            as_FloatRegister($src2$$reg), 0, 2);
16050     __ fmuls(as_FloatRegister($dst$$reg),
16051              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16052     __ ins(as_FloatRegister($tmp$$reg), __ S,
16053            as_FloatRegister($src2$$reg), 0, 3);
16054     __ fmuls(as_FloatRegister($dst$$reg),
16055              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16056   %}
16057   ins_pipe(pipe_class_default);
16058 %}
16059 
16060 instruct reduce_add2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
16061 %{
16062   match(Set dst (AddReductionVL src1 src2));
16063   ins_cost(INSN_COST);
16064   effect(TEMP_DEF dst, TEMP tmp);
16065   format %{ "umov  $tmp, $src2, D, 0\n\t"
16066             "add   $dst, $src1, $tmp\n\t"
16067             "umov  $tmp, $src2, D, 1\n\t"
16068             "add   $dst, $dst, $tmp\t add reduction2L"
16069   %}
16070   ins_encode %{
16071     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16072     __ add($dst$$Register, $src1$$Register, $tmp$$Register);
16073     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16074     __ add($dst$$Register, $dst$$Register, $tmp$$Register);
16075   %}
16076   ins_pipe(pipe_class_default);
16077 %}
16078 
16079 instruct reduce_mul2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
16080 %{
16081   match(Set dst (MulReductionVL src1 src2));
16082   ins_cost(INSN_COST);
16083   effect(TEMP_DEF dst, TEMP tmp);
16084   format %{ "umov  $tmp, $src2, D, 0\n\t"
16085             "mul   $dst, $src1, $tmp\n\t"
16086             "umov  $tmp, $src2, D, 1\n\t"
16087             "mul   $dst, $dst, $tmp\t mul reduction2L"
16088   %}
16089   ins_encode %{
16090     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16091     __ mul($dst$$Register, $src1$$Register, $tmp$$Register);
16092     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16093     __ mul($dst$$Register, $dst$$Register, $tmp$$Register);
16094   %}
16095   ins_pipe(pipe_class_default);
16096 %}
16097 
16098 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16099 %{
16100   match(Set dst (AddReductionVD src1 src2));
16101   ins_cost(INSN_COST);
16102   effect(TEMP tmp, TEMP dst);
16103   format %{ "faddd $dst, $src1, $src2\n\t"
16104             "ins   $tmp, D, $src2, 0, 1\n\t"
16105             "faddd $dst, $dst, $tmp\t add reduction2D"
16106   %}
16107   ins_encode %{
16108     __ faddd(as_FloatRegister($dst$$reg),
16109              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16110     __ ins(as_FloatRegister($tmp$$reg), __ D,
16111            as_FloatRegister($src2$$reg), 0, 1);
16112     __ faddd(as_FloatRegister($dst$$reg),
16113              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16114   %}
16115   ins_pipe(pipe_class_default);
16116 %}
16117 
16118 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16119 %{
16120   match(Set dst (MulReductionVD src1 src2));
16121   ins_cost(INSN_COST);
16122   effect(TEMP tmp, TEMP dst);
16123   format %{ "fmuld $dst, $src1, $src2\n\t"
16124             "ins   $tmp, D, $src2, 0, 1\n\t"
16125             "fmuld $dst, $dst, $tmp\t mul reduction2D"
16126   %}
16127   ins_encode %{
16128     __ fmuld(as_FloatRegister($dst$$reg),
16129              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16130     __ ins(as_FloatRegister($tmp$$reg), __ D,
16131            as_FloatRegister($src2$$reg), 0, 1);
16132     __ fmuld(as_FloatRegister($dst$$reg),
16133              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16134   %}
16135   ins_pipe(pipe_class_default);
16136 %}
16137 
16138 instruct reduce_and8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16139 %{
16140   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
16141   match(Set dst (AndReductionV src1 src2));
16142   ins_cost(INSN_COST);
16143   effect(TEMP_DEF dst, TEMP tmp);
16144   format %{ "umov   $tmp, $src2, S, 0\n\t"
16145             "umov   $dst, $src2, S, 1\n\t"
16146             "andw   $dst, $dst, $tmp\n\t"
16147             "andw   $dst, $dst, $dst, LSR #16\n\t"
16148             "andw   $dst, $dst, $dst, LSR #8\n\t"
16149             "andw   $dst, $src1, $dst\n\t"
16150             "sxtb   $dst, $dst\t and reduction8B"
16151   %}
16152   ins_encode %{
16153     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16154     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16155     __ andw($dst$$Register, $dst$$Register, $tmp$$Register);
16156     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16157     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
16158     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
16159     __ sxtb($dst$$Register, $dst$$Register);
16160   %}
16161   ins_pipe(pipe_class_default);
16162 %}
16163 
16164 instruct reduce_and16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
16165 %{
16166   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
16167   match(Set dst (AndReductionV src1 src2));
16168   ins_cost(INSN_COST);
16169   effect(TEMP_DEF dst, TEMP tmp);
16170   format %{ "umov   $tmp, $src2, D, 0\n\t"
16171             "umov   $dst, $src2, D, 1\n\t"
16172             "andr   $dst, $dst, $tmp\n\t"
16173             "andr   $dst, $dst, $dst, LSR #32\n\t"
16174             "andw   $dst, $dst, $dst, LSR #16\n\t"
16175             "andw   $dst, $dst, $dst, LSR #8\n\t"
16176             "andw   $dst, $src1, $dst\n\t"
16177             "sxtb   $dst, $dst\t and reduction16B"
16178   %}
16179   ins_encode %{
16180     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16181     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16182     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
16183     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
16184     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16185     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
16186     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
16187     __ sxtb($dst$$Register, $dst$$Register);
16188   %}
16189   ins_pipe(pipe_class_default);
16190 %}
16191 
16192 instruct reduce_and4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16193 %{
16194   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16195   match(Set dst (AndReductionV src1 src2));
16196   ins_cost(INSN_COST);
16197   effect(TEMP_DEF dst, TEMP tmp);
16198   format %{ "umov   $tmp, $src2, S, 0\n\t"
16199             "umov   $dst, $src2, S, 1\n\t"
16200             "andw   $dst, $dst, $tmp\n\t"
16201             "andw   $dst, $dst, $dst, LSR #16\n\t"
16202             "andw   $dst, $src1, $dst\n\t"
16203             "sxth   $dst, $dst\t and reduction4S"
16204   %}
16205   ins_encode %{
16206     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16207     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16208     __ andw($dst$$Register, $dst$$Register, $tmp$$Register);
16209     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16210     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
16211     __ sxth($dst$$Register, $dst$$Register);
16212   %}
16213   ins_pipe(pipe_class_default);
16214 %}
16215 
16216 instruct reduce_and8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
16217 %{
16218   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16219   match(Set dst (AndReductionV src1 src2));
16220   ins_cost(INSN_COST);
16221   effect(TEMP_DEF dst, TEMP tmp);
16222   format %{ "umov   $tmp, $src2, D, 0\n\t"
16223             "umov   $dst, $src2, D, 1\n\t"
16224             "andr   $dst, $dst, $tmp\n\t"
16225             "andr   $dst, $dst, $dst, LSR #32\n\t"
16226             "andw   $dst, $dst, $dst, LSR #16\n\t"
16227             "andw   $dst, $src1, $dst\n\t"
16228             "sxth   $dst, $dst\t and reduction8S"
16229   %}
16230   ins_encode %{
16231     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16232     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16233     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
16234     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
16235     __ andw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16236     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
16237     __ sxth($dst$$Register, $dst$$Register);
16238   %}
16239   ins_pipe(pipe_class_default);
16240 %}
16241 
16242 instruct reduce_and2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16243 %{
16244   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16245   match(Set dst (AndReductionV src1 src2));
16246   ins_cost(INSN_COST);
16247   effect(TEMP_DEF dst, TEMP tmp);
16248   format %{ "umov  $tmp, $src2, S, 0\n\t"
16249             "andw  $dst, $tmp, $src1\n\t"
16250             "umov  $tmp, $src2, S, 1\n\t"
16251             "andw  $dst, $tmp, $dst\t and reduction2I"
16252   %}
16253   ins_encode %{
16254     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16255     __ andw($dst$$Register, $tmp$$Register, $src1$$Register);
16256     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16257     __ andw($dst$$Register, $tmp$$Register, $dst$$Register);
16258   %}
16259   ins_pipe(pipe_class_default);
16260 %}
16261 
16262 instruct reduce_and4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
16263 %{
16264   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16265   match(Set dst (AndReductionV src1 src2));
16266   ins_cost(INSN_COST);
16267   effect(TEMP_DEF dst, TEMP tmp);
16268   format %{ "umov   $tmp, $src2, D, 0\n\t"
16269             "umov   $dst, $src2, D, 1\n\t"
16270             "andr   $dst, $dst, $tmp\n\t"
16271             "andr   $dst, $dst, $dst, LSR #32\n\t"
16272             "andw   $dst, $src1, $dst\t and reduction4I"
16273   %}
16274   ins_encode %{
16275     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16276     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16277     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
16278     __ andr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
16279     __ andw($dst$$Register, $src1$$Register, $dst$$Register);
16280   %}
16281   ins_pipe(pipe_class_default);
16282 %}
16283 
16284 instruct reduce_and2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
16285 %{
16286   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
16287   match(Set dst (AndReductionV src1 src2));
16288   ins_cost(INSN_COST);
16289   effect(TEMP_DEF dst, TEMP tmp);
16290   format %{ "umov  $tmp, $src2, D, 0\n\t"
16291             "andr  $dst, $src1, $tmp\n\t"
16292             "umov  $tmp, $src2, D, 1\n\t"
16293             "andr  $dst, $dst, $tmp\t and reduction2L"
16294   %}
16295   ins_encode %{
16296     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16297     __ andr($dst$$Register, $src1$$Register, $tmp$$Register);
16298     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16299     __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
16300   %}
16301   ins_pipe(pipe_class_default);
16302 %}
16303 
16304 instruct reduce_or8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16305 %{
16306   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
16307   match(Set dst (OrReductionV src1 src2));
16308   ins_cost(INSN_COST);
16309   effect(TEMP_DEF dst, TEMP tmp);
16310   format %{ "umov   $tmp, $src2, S, 0\n\t"
16311             "umov   $dst, $src2, S, 1\n\t"
16312             "orrw   $dst, $dst, $tmp\n\t"
16313             "orrw   $dst, $dst, $dst, LSR #16\n\t"
16314             "orrw   $dst, $dst, $dst, LSR #8\n\t"
16315             "orrw   $dst, $src1, $dst\n\t"
16316             "sxtb   $dst, $dst\t or reduction8B"
16317   %}
16318   ins_encode %{
16319     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16320     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16321     __ orrw($dst$$Register, $dst$$Register, $tmp$$Register);
16322     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16323     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
16324     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
16325     __ sxtb($dst$$Register, $dst$$Register);
16326   %}
16327   ins_pipe(pipe_class_default);
16328 %}
16329 
16330 instruct reduce_or16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
16331 %{
16332   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
16333   match(Set dst (OrReductionV src1 src2));
16334   ins_cost(INSN_COST);
16335   effect(TEMP_DEF dst, TEMP tmp);
16336   format %{ "umov   $tmp, $src2, D, 0\n\t"
16337             "umov   $dst, $src2, D, 1\n\t"
16338             "orr    $dst, $dst, $tmp\n\t"
16339             "orr    $dst, $dst, $dst, LSR #32\n\t"
16340             "orrw   $dst, $dst, $dst, LSR #16\n\t"
16341             "orrw   $dst, $dst, $dst, LSR #8\n\t"
16342             "orrw   $dst, $src1, $dst\n\t"
16343             "sxtb   $dst, $dst\t or reduction16B"
16344   %}
16345   ins_encode %{
16346     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16347     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16348     __ orr($dst$$Register, $dst$$Register, $tmp$$Register);
16349     __ orr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
16350     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16351     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
16352     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
16353     __ sxtb($dst$$Register, $dst$$Register);
16354   %}
16355   ins_pipe(pipe_class_default);
16356 %}
16357 
16358 instruct reduce_or4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16359 %{
16360   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16361   match(Set dst (OrReductionV src1 src2));
16362   ins_cost(INSN_COST);
16363   effect(TEMP_DEF dst, TEMP tmp);
16364   format %{ "umov   $tmp, $src2, S, 0\n\t"
16365             "umov   $dst, $src2, S, 1\n\t"
16366             "orrw   $dst, $dst, $tmp\n\t"
16367             "orrw   $dst, $dst, $dst, LSR #16\n\t"
16368             "orrw   $dst, $src1, $dst\n\t"
16369             "sxth   $dst, $dst\t or reduction4S"
16370   %}
16371   ins_encode %{
16372     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16373     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16374     __ orrw($dst$$Register, $dst$$Register, $tmp$$Register);
16375     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16376     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
16377     __ sxth($dst$$Register, $dst$$Register);
16378   %}
16379   ins_pipe(pipe_class_default);
16380 %}
16381 
16382 instruct reduce_or8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
16383 %{
16384   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16385   match(Set dst (OrReductionV src1 src2));
16386   ins_cost(INSN_COST);
16387   effect(TEMP_DEF dst, TEMP tmp);
16388   format %{ "umov   $tmp, $src2, D, 0\n\t"
16389             "umov   $dst, $src2, D, 1\n\t"
16390             "orr    $dst, $dst, $tmp\n\t"
16391             "orr    $dst, $dst, $dst, LSR #32\n\t"
16392             "orrw   $dst, $dst, $dst, LSR #16\n\t"
16393             "orrw   $dst, $src1, $dst\n\t"
16394             "sxth   $dst, $dst\t or reduction8S"
16395   %}
16396   ins_encode %{
16397     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16398     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16399     __ orr($dst$$Register, $dst$$Register, $tmp$$Register);
16400     __ orr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
16401     __ orrw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16402     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
16403     __ sxth($dst$$Register, $dst$$Register);
16404   %}
16405   ins_pipe(pipe_class_default);
16406 %}
16407 
16408 instruct reduce_or2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16409 %{
16410   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16411   match(Set dst (OrReductionV src1 src2));
16412   ins_cost(INSN_COST);
16413   effect(TEMP_DEF dst, TEMP tmp);
16414   format %{ "umov  $tmp, $src2, S, 0\n\t"
16415             "orrw  $dst, $tmp, $src1\n\t"
16416             "umov  $tmp, $src2, S, 1\n\t"
16417             "orrw  $dst, $tmp, $dst\t or reduction2I"
16418   %}
16419   ins_encode %{
16420     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16421     __ orrw($dst$$Register, $tmp$$Register, $src1$$Register);
16422     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16423     __ orrw($dst$$Register, $tmp$$Register, $dst$$Register);
16424   %}
16425   ins_pipe(pipe_class_default);
16426 %}
16427 
16428 instruct reduce_or4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
16429 %{
16430   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16431   match(Set dst (OrReductionV src1 src2));
16432   ins_cost(INSN_COST);
16433   effect(TEMP_DEF dst, TEMP tmp);
16434   format %{ "umov   $tmp, $src2, D, 0\n\t"
16435             "umov   $dst, $src2, D, 1\n\t"
16436             "orr    $dst, $dst, $tmp\n\t"
16437             "orr    $dst, $dst, $dst, LSR #32\n\t"
16438             "orrw   $dst, $src1, $dst\t or reduction4I"
16439   %}
16440   ins_encode %{
16441     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16442     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16443     __ orr($dst$$Register, $dst$$Register, $tmp$$Register);
16444     __ orr($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
16445     __ orrw($dst$$Register, $src1$$Register, $dst$$Register);
16446   %}
16447   ins_pipe(pipe_class_default);
16448 %}
16449 
16450 instruct reduce_or2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
16451 %{
16452   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
16453   match(Set dst (OrReductionV src1 src2));
16454   ins_cost(INSN_COST);
16455   effect(TEMP_DEF dst, TEMP tmp);
16456   format %{ "umov  $tmp, $src2, D, 0\n\t"
16457             "orr   $dst, $src1, $tmp\n\t"
16458             "umov  $tmp, $src2, D, 1\n\t"
16459             "orr   $dst, $dst, $tmp\t or reduction2L"
16460   %}
16461   ins_encode %{
16462     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16463     __ orr($dst$$Register, $src1$$Register, $tmp$$Register);
16464     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16465     __ orr($dst$$Register, $dst$$Register, $tmp$$Register);
16466   %}
16467   ins_pipe(pipe_class_default);
16468 %}
16469 
16470 instruct reduce_eor8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16471 %{
16472   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
16473   match(Set dst (XorReductionV src1 src2));
16474   ins_cost(INSN_COST);
16475   effect(TEMP_DEF dst, TEMP tmp);
16476   format %{ "umov   $tmp, $src2, S, 0\n\t"
16477             "umov   $dst, $src2, S, 1\n\t"
16478             "eorw   $dst, $dst, $tmp\n\t"
16479             "eorw   $dst, $dst, $dst, LSR #16\n\t"
16480             "eorw   $dst, $dst, $dst, LSR #8\n\t"
16481             "eorw   $dst, $src1, $dst\n\t"
16482             "sxtb   $dst, $dst\t xor reduction8B"
16483   %}
16484   ins_encode %{
16485     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16486     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16487     __ eorw($dst$$Register, $dst$$Register, $tmp$$Register);
16488     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16489     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
16490     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
16491     __ sxtb($dst$$Register, $dst$$Register);
16492   %}
16493   ins_pipe(pipe_class_default);
16494 %}
16495 
16496 instruct reduce_eor16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
16497 %{
16498   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
16499   match(Set dst (XorReductionV src1 src2));
16500   ins_cost(INSN_COST);
16501   effect(TEMP_DEF dst, TEMP tmp);
16502   format %{ "umov   $tmp, $src2, D, 0\n\t"
16503             "umov   $dst, $src2, D, 1\n\t"
16504             "eor    $dst, $dst, $tmp\n\t"
16505             "eor    $dst, $dst, $dst, LSR #32\n\t"
16506             "eorw   $dst, $dst, $dst, LSR #16\n\t"
16507             "eorw   $dst, $dst, $dst, LSR #8\n\t"
16508             "eorw   $dst, $src1, $dst\n\t"
16509             "sxtb   $dst, $dst\t xor reduction16B"
16510   %}
16511   ins_encode %{
16512     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16513     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16514     __ eor($dst$$Register, $dst$$Register, $tmp$$Register);
16515     __ eor($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
16516     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16517     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 8);
16518     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
16519     __ sxtb($dst$$Register, $dst$$Register);
16520   %}
16521   ins_pipe(pipe_class_default);
16522 %}
16523 
16524 instruct reduce_eor4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16525 %{
16526   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16527   match(Set dst (XorReductionV src1 src2));
16528   ins_cost(INSN_COST);
16529   effect(TEMP_DEF dst, TEMP tmp);
16530   format %{ "umov   $tmp, $src2, S, 0\n\t"
16531             "umov   $dst, $src2, S, 1\n\t"
16532             "eorw   $dst, $dst, $tmp\n\t"
16533             "eorw   $dst, $dst, $dst, LSR #16\n\t"
16534             "eorw   $dst, $src1, $dst\n\t"
16535             "sxth   $dst, $dst\t xor reduction4S"
16536   %}
16537   ins_encode %{
16538     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16539     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16540     __ eorw($dst$$Register, $dst$$Register, $tmp$$Register);
16541     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16542     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
16543     __ sxth($dst$$Register, $dst$$Register);
16544   %}
16545   ins_pipe(pipe_class_default);
16546 %}
16547 
16548 instruct reduce_eor8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
16549 %{
16550   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16551   match(Set dst (XorReductionV src1 src2));
16552   ins_cost(INSN_COST);
16553   effect(TEMP_DEF dst, TEMP tmp);
16554   format %{ "umov   $tmp, $src2, D, 0\n\t"
16555             "umov   $dst, $src2, D, 1\n\t"
16556             "eor    $dst, $dst, $tmp\n\t"
16557             "eor    $dst, $dst, $dst, LSR #32\n\t"
16558             "eorw   $dst, $dst, $dst, LSR #16\n\t"
16559             "eorw   $dst, $src1, $dst\n\t"
16560             "sxth   $dst, $dst\t xor reduction8S"
16561   %}
16562   ins_encode %{
16563     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16564     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16565     __ eor($dst$$Register, $dst$$Register, $tmp$$Register);
16566     __ eor($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
16567     __ eorw($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 16);
16568     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
16569     __ sxth($dst$$Register, $dst$$Register);
16570   %}
16571   ins_pipe(pipe_class_default);
16572 %}
16573 
16574 instruct reduce_eor2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16575 %{
16576   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16577   match(Set dst (XorReductionV src1 src2));
16578   ins_cost(INSN_COST);
16579   effect(TEMP_DEF dst, TEMP tmp);
16580   format %{ "umov  $tmp, $src2, S, 0\n\t"
16581             "eorw  $dst, $tmp, $src1\n\t"
16582             "umov  $tmp, $src2, S, 1\n\t"
16583             "eorw  $dst, $tmp, $dst\t xor reduction2I"
16584   %}
16585   ins_encode %{
16586     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16587     __ eorw($dst$$Register, $tmp$$Register, $src1$$Register);
16588     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16589     __ eorw($dst$$Register, $tmp$$Register, $dst$$Register);
16590   %}
16591   ins_pipe(pipe_class_default);
16592 %}
16593 
16594 instruct reduce_eor4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, iRegINoSp tmp)
16595 %{
16596   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16597   match(Set dst (XorReductionV src1 src2));
16598   ins_cost(INSN_COST);
16599   effect(TEMP_DEF dst, TEMP tmp);
16600   format %{ "umov   $tmp, $src2, D, 0\n\t"
16601             "umov   $dst, $src2, D, 1\n\t"
16602             "eor    $dst, $dst, $tmp\n\t"
16603             "eor    $dst, $dst, $dst, LSR #32\n\t"
16604             "eorw   $dst, $src1, $dst\t xor reduction4I"
16605   %}
16606   ins_encode %{
16607     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16608     __ umov($dst$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16609     __ eor($dst$$Register, $dst$$Register, $tmp$$Register);
16610     __ eor($dst$$Register, $dst$$Register, $dst$$Register, Assembler::LSR, 32);
16611     __ eorw($dst$$Register, $src1$$Register, $dst$$Register);
16612   %}
16613   ins_pipe(pipe_class_default);
16614 %}
16615 
16616 instruct reduce_eor2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp)
16617 %{
16618   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
16619   match(Set dst (XorReductionV src1 src2));
16620   ins_cost(INSN_COST);
16621   effect(TEMP_DEF dst, TEMP tmp);
16622   format %{ "umov  $tmp, $src2, D, 0\n\t"
16623             "eor   $dst, $src1, $tmp\n\t"
16624             "umov  $tmp, $src2, D, 1\n\t"
16625             "eor   $dst, $dst, $tmp\t xor reduction2L"
16626   %}
16627   ins_encode %{
16628     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16629     __ eor($dst$$Register, $src1$$Register, $tmp$$Register);
16630     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16631     __ eor($dst$$Register, $dst$$Register, $tmp$$Register);
16632   %}
16633   ins_pipe(pipe_class_default);
16634 %}
16635 
16636 instruct reduce_max8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
16637   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
16638   match(Set dst (MaxReductionV src1 src2));
16639   ins_cost(INSN_COST);
16640   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16641   format %{ "smaxv $tmp, T8B, $src2\n\t"
16642             "smov  $dst, $tmp, B, 0\n\t"
16643             "cmpw  $dst, $src1\n\t"
16644             "cselw $dst, $dst, $src1 gt\t max reduction8B" %}
16645   ins_encode %{
16646     __ smaxv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src2$$reg));
16647     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
16648     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16649     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
16650   %}
16651   ins_pipe(pipe_class_default);
16652 %}
16653 
16654 instruct reduce_max16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
16655   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
16656   match(Set dst (MaxReductionV src1 src2));
16657   ins_cost(INSN_COST);
16658   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16659   format %{ "smaxv $tmp, T16B, $src2\n\t"
16660             "smov  $dst, $tmp, B, 0\n\t"
16661             "cmpw  $dst, $src1\n\t"
16662             "cselw $dst, $dst, $src1 gt\t max reduction16B" %}
16663   ins_encode %{
16664     __ smaxv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src2$$reg));
16665     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
16666     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16667     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
16668   %}
16669   ins_pipe(pipe_class_default);
16670 %}
16671 
16672 instruct reduce_max4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
16673   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16674   match(Set dst (MaxReductionV src1 src2));
16675   ins_cost(INSN_COST);
16676   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16677   format %{ "smaxv $tmp, T4H, $src2\n\t"
16678             "smov  $dst, $tmp, H, 0\n\t"
16679             "cmpw  $dst, $src1\n\t"
16680             "cselw $dst, $dst, $src1 gt\t max reduction4S" %}
16681   ins_encode %{
16682     __ smaxv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($src2$$reg));
16683     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
16684     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16685     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
16686   %}
16687   ins_pipe(pipe_class_default);
16688 %}
16689 
16690 instruct reduce_max8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
16691   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16692   match(Set dst (MaxReductionV src1 src2));
16693   ins_cost(INSN_COST);
16694   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16695   format %{ "smaxv  $tmp, T8H, $src2\n\t"
16696             "smov  $dst, $tmp, H, 0\n\t"
16697             "cmpw  $dst, $src1\n\t"
16698             "cselw $dst, $dst, $src1 gt\t max reduction8S" %}
16699   ins_encode %{
16700     __ smaxv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($src2$$reg));
16701     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
16702     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16703     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
16704   %}
16705   ins_pipe(pipe_class_default);
16706 %}
16707 
16708 instruct reduce_max2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecX tmp, rFlagsReg cr) %{
16709   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16710   match(Set dst (MaxReductionV src1 src2));
16711   ins_cost(INSN_COST);
16712   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16713   format %{ "dup   $tmp, T2D, $src2\n\t"
16714             "smaxv $tmp, T4S, $tmp\n\t"
16715             "umov  $dst, $tmp, S, 0\n\t"
16716             "cmpw  $dst, $src1\n\t"
16717             "cselw $dst, $dst, $src1 gt\t max reduction2I" %}
16718   ins_encode %{
16719     __ dup(as_FloatRegister($tmp$$reg), __ T2D, as_FloatRegister($src2$$reg));
16720     __ smaxv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($tmp$$reg));
16721     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16722     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16723     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
16724   %}
16725   ins_pipe(pipe_class_default);
16726 %}
16727 
16728 instruct reduce_max4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
16729   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16730   match(Set dst (MaxReductionV src1 src2));
16731   ins_cost(INSN_COST);
16732   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16733   format %{ "smaxv $tmp, T4S, $src2\n\t"
16734             "umov  $dst, $tmp, S, 0\n\t"
16735             "cmpw  $dst, $src1\n\t"
16736             "cselw $dst, $dst, $src1 gt\t max reduction4I" %}
16737   ins_encode %{
16738     __ smaxv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($src2$$reg));
16739     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16740     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16741     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::GT);
16742   %}
16743   ins_pipe(pipe_class_default);
16744 %}
16745 
16746 instruct reduce_max2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp, rFlagsReg cr) %{
16747   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
16748   match(Set dst (MaxReductionV src1 src2));
16749   ins_cost(INSN_COST);
16750   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16751   format %{ "umov  $tmp, $src2, D, 0\n\t"
16752             "cmp   $src1,$tmp\n\t"
16753             "csel  $dst, $src1, $tmp gt\n\t"
16754             "umov  $tmp, $src2, D, 1\n\t"
16755             "cmp   $dst, $tmp\n\t"
16756             "csel  $dst, $dst, $tmp gt\t max reduction2L" %}
16757   ins_encode %{
16758     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16759     __ cmp(as_Register($src1$$reg), as_Register($tmp$$reg));
16760     __ csel(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($tmp$$reg), Assembler::GT);
16761     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16762     __ cmp(as_Register($dst$$reg), as_Register($tmp$$reg));
16763     __ csel(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($tmp$$reg), Assembler::GT);
16764   %}
16765   ins_pipe(pipe_class_default);
16766 %}
16767 
16768 instruct reduce_max2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
16769   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16770   match(Set dst (MaxReductionV src1 src2));
16771   ins_cost(INSN_COST);
16772   effect(TEMP_DEF dst, TEMP tmp);
16773   format %{ "fmaxs $dst, $src1, $src2\n\t"
16774             "ins   $tmp, S, $src2, 0, 1\n\t"
16775             "fmaxs $dst, $dst, $tmp\t max reduction2F" %}
16776   ins_encode %{
16777     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16778     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
16779     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16780   %}
16781   ins_pipe(pipe_class_default);
16782 %}
16783 
16784 instruct reduce_max4F(vRegF dst, vRegF src1, vecX src2) %{
16785   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16786   match(Set dst (MaxReductionV src1 src2));
16787   ins_cost(INSN_COST);
16788   effect(TEMP_DEF dst);
16789   format %{ "fmaxv $dst, T4S, $src2\n\t"
16790             "fmaxs $dst, $dst, $src1\t max reduction4F" %}
16791   ins_encode %{
16792     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
16793     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
16794   %}
16795   ins_pipe(pipe_class_default);
16796 %}
16797 
16798 instruct reduce_max2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
16799   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16800   match(Set dst (MaxReductionV src1 src2));
16801   ins_cost(INSN_COST);
16802   effect(TEMP_DEF dst, TEMP tmp);
16803   format %{ "fmaxd $dst, $src1, $src2\n\t"
16804             "ins   $tmp, D, $src2, 0, 1\n\t"
16805             "fmaxd $dst, $dst, $tmp\t max reduction2D" %}
16806   ins_encode %{
16807     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16808     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
16809     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16810   %}
16811   ins_pipe(pipe_class_default);
16812 %}
16813 
16814 instruct reduce_min8B(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
16815   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
16816   match(Set dst (MinReductionV src1 src2));
16817   ins_cost(INSN_COST);
16818   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16819   format %{ "sminv $tmp, T8B, $src2\n\t"
16820             "smov  $dst, $tmp, B, 0\n\t"
16821             "cmpw  $dst, $src1\n\t"
16822             "cselw $dst, $dst, $src1 lt\t min reduction8B" %}
16823   ins_encode %{
16824     __ sminv(as_FloatRegister($tmp$$reg), __ T8B, as_FloatRegister($src2$$reg));
16825     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
16826     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16827     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
16828   %}
16829   ins_pipe(pipe_class_default);
16830 %}
16831 
16832 instruct reduce_min16B(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
16833   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
16834   match(Set dst (MinReductionV src1 src2));
16835   ins_cost(INSN_COST);
16836   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16837   format %{ "sminv $tmp, T16B, $src2\n\t"
16838             "smov  $dst, $tmp, B, 0\n\t"
16839             "cmpw  $dst, $src1\n\t"
16840             "cselw $dst, $dst, $src1 lt\t min reduction16B" %}
16841   ins_encode %{
16842     __ sminv(as_FloatRegister($tmp$$reg), __ T16B, as_FloatRegister($src2$$reg));
16843     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ B, 0);
16844     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16845     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
16846   %}
16847   ins_pipe(pipe_class_default);
16848 %}
16849 
16850 instruct reduce_min4S(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecD tmp, rFlagsReg cr) %{
16851   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16852   match(Set dst (MinReductionV src1 src2));
16853   ins_cost(INSN_COST);
16854   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16855   format %{ "sminv $tmp, T4H, $src2\n\t"
16856             "smov  $dst, $tmp, H, 0\n\t"
16857             "cmpw  $dst, $src1\n\t"
16858             "cselw $dst, $dst, $src1 lt\t min reduction4S" %}
16859   ins_encode %{
16860     __ sminv(as_FloatRegister($tmp$$reg), __ T4H, as_FloatRegister($src2$$reg));
16861     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
16862     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16863     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
16864   %}
16865   ins_pipe(pipe_class_default);
16866 %}
16867 
16868 instruct reduce_min8S(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
16869   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
16870   match(Set dst (MinReductionV src1 src2));
16871   ins_cost(INSN_COST);
16872   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16873   format %{ "sminv $tmp, T8H, $src2\n\t"
16874             "smov  $dst, $tmp, H, 0\n\t"
16875             "cmpw  $dst, $src1\n\t"
16876             "cselw $dst, $dst, $src1 lt\t min reduction8S" %}
16877   ins_encode %{
16878     __ sminv(as_FloatRegister($tmp$$reg), __ T8H, as_FloatRegister($src2$$reg));
16879     __ smov($dst$$Register, as_FloatRegister($tmp$$reg), __ H, 0);
16880     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16881     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
16882   %}
16883   ins_pipe(pipe_class_default);
16884 %}
16885 
16886 instruct reduce_min2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, vecX tmp, rFlagsReg cr) %{
16887   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16888   match(Set dst (MinReductionV src1 src2));
16889   ins_cost(INSN_COST);
16890   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16891   format %{ "dup   $tmp, T2D, $src2\n\t"
16892             "sminv $tmp, T2S, $tmp\n\t"
16893             "umov  $dst, $tmp, S, 0\n\t"
16894             "cmpw  $dst, $src1\n\t"
16895             "cselw $dst, $dst, $src1 lt\t min reduction2I" %}
16896   ins_encode %{
16897     __ dup(as_FloatRegister($tmp$$reg), __ T2D, as_FloatRegister($src2$$reg));
16898     __ sminv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($tmp$$reg));
16899     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16900     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16901     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
16902   %}
16903   ins_pipe(pipe_class_default);
16904 %}
16905 
16906 instruct reduce_min4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, rFlagsReg cr) %{
16907   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_INT);
16908   match(Set dst (MinReductionV src1 src2));
16909   ins_cost(INSN_COST);
16910   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16911   format %{ "sminv $tmp, T4S, $src2\n\t"
16912             "umov  $dst, $tmp, S, 0\n\t"
16913             "cmpw  $dst, $src1\n\t"
16914             "cselw $dst, $dst, $src1 lt\t min reduction4I" %}
16915   ins_encode %{
16916     __ sminv(as_FloatRegister($tmp$$reg), __ T4S, as_FloatRegister($src2$$reg));
16917     __ umov($dst$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16918     __ cmpw(as_Register($dst$$reg), as_Register($src1$$reg));
16919     __ cselw(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($src1$$reg), Assembler::LT);
16920   %}
16921   ins_pipe(pipe_class_default);
16922 %}
16923 
16924 instruct reduce_min2L(iRegLNoSp dst, iRegL src1, vecX src2, iRegLNoSp tmp, rFlagsReg cr) %{
16925   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
16926   match(Set dst (MinReductionV src1 src2));
16927   ins_cost(INSN_COST);
16928   effect(TEMP_DEF dst, TEMP tmp, KILL cr);
16929   format %{ "umov  $tmp, $src2, D, 0\n\t"
16930             "cmp   $src1,$tmp\n\t"
16931             "csel  $dst, $src1, $tmp lt\n\t"
16932             "umov  $tmp, $src2, D, 1\n\t"
16933             "cmp   $dst, $tmp\n\t"
16934             "csel  $dst, $dst, $tmp lt\t min reduction2L" %}
16935   ins_encode %{
16936     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 0);
16937     __ cmp(as_Register($src1$$reg), as_Register($tmp$$reg));
16938     __ csel(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($tmp$$reg), Assembler::LT);
16939     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ D, 1);
16940     __ cmp(as_Register($dst$$reg), as_Register($tmp$$reg));
16941     __ csel(as_Register($dst$$reg), as_Register($dst$$reg), as_Register($tmp$$reg), Assembler::LT);
16942   %}
16943   ins_pipe(pipe_class_default);
16944 %}
16945 
16946 instruct reduce_min2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
16947   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16948   match(Set dst (MinReductionV src1 src2));
16949   ins_cost(INSN_COST);
16950   effect(TEMP_DEF dst, TEMP tmp);
16951   format %{ "fmins $dst, $src1, $src2\n\t"
16952             "ins   $tmp, S, $src2, 0, 1\n\t"
16953             "fmins $dst, $dst, $tmp\t min reduction2F" %}
16954   ins_encode %{
16955     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16956     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
16957     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16958   %}
16959   ins_pipe(pipe_class_default);
16960 %}
16961 
16962 instruct reduce_min4F(vRegF dst, vRegF src1, vecX src2) %{
16963   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16964   match(Set dst (MinReductionV src1 src2));
16965   ins_cost(INSN_COST);
16966   effect(TEMP_DEF dst);
16967   format %{ "fminv $dst, T4S, $src2\n\t"
16968             "fmins $dst, $dst, $src1\t min reduction4F" %}
16969   ins_encode %{
16970     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
16971     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
16972   %}
16973   ins_pipe(pipe_class_default);
16974 %}
16975 
16976 instruct reduce_min2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
16977   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16978   match(Set dst (MinReductionV src1 src2));
16979   ins_cost(INSN_COST);
16980   effect(TEMP_DEF dst, TEMP tmp);
16981   format %{ "fmind $dst, $src1, $src2\n\t"
16982             "ins   $tmp, D, $src2, 0, 1\n\t"
16983             "fmind $dst, $dst, $tmp\t min reduction2D" %}
16984   ins_encode %{
16985     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16986     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
16987     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16988   %}
16989   ins_pipe(pipe_class_default);
16990 %}
16991 
16992 // ====================VECTOR ARITHMETIC=======================================
16993 
16994 // --------------------------------- ADD --------------------------------------
16995 
16996 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16997 %{
16998   predicate(n->as_Vector()->length() == 4 ||
16999             n->as_Vector()->length() == 8);
17000   match(Set dst (AddVB src1 src2));
17001   ins_cost(INSN_COST);
17002   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
17003   ins_encode %{
17004     __ addv(as_FloatRegister($dst$$reg), __ T8B,
17005             as_FloatRegister($src1$$reg),
17006             as_FloatRegister($src2$$reg));
17007   %}
17008   ins_pipe(vdop64);
17009 %}
17010 
17011 instruct vadd16B(vecX dst, vecX src1, vecX src2)
17012 %{
17013   predicate(n->as_Vector()->length() == 16);
17014   match(Set dst (AddVB src1 src2));
17015   ins_cost(INSN_COST);
17016   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
17017   ins_encode %{
17018     __ addv(as_FloatRegister($dst$$reg), __ T16B,
17019             as_FloatRegister($src1$$reg),
17020             as_FloatRegister($src2$$reg));
17021   %}
17022   ins_pipe(vdop128);
17023 %}
17024 
17025 instruct vadd4S(vecD dst, vecD src1, vecD src2)
17026 %{
17027   predicate(n->as_Vector()->length() == 2 ||
17028             n->as_Vector()->length() == 4);
17029   match(Set dst (AddVS src1 src2));
17030   ins_cost(INSN_COST);
17031   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
17032   ins_encode %{
17033     __ addv(as_FloatRegister($dst$$reg), __ T4H,
17034             as_FloatRegister($src1$$reg),
17035             as_FloatRegister($src2$$reg));
17036   %}
17037   ins_pipe(vdop64);
17038 %}
17039 
17040 instruct vadd8S(vecX dst, vecX src1, vecX src2)
17041 %{
17042   predicate(n->as_Vector()->length() == 8);
17043   match(Set dst (AddVS src1 src2));
17044   ins_cost(INSN_COST);
17045   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
17046   ins_encode %{
17047     __ addv(as_FloatRegister($dst$$reg), __ T8H,
17048             as_FloatRegister($src1$$reg),
17049             as_FloatRegister($src2$$reg));
17050   %}
17051   ins_pipe(vdop128);
17052 %}
17053 
17054 instruct vadd2I(vecD dst, vecD src1, vecD src2)
17055 %{
17056   predicate(n->as_Vector()->length() == 2);
17057   match(Set dst (AddVI src1 src2));
17058   ins_cost(INSN_COST);
17059   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
17060   ins_encode %{
17061     __ addv(as_FloatRegister($dst$$reg), __ T2S,
17062             as_FloatRegister($src1$$reg),
17063             as_FloatRegister($src2$$reg));
17064   %}
17065   ins_pipe(vdop64);
17066 %}
17067 
17068 instruct vadd4I(vecX dst, vecX src1, vecX src2)
17069 %{
17070   predicate(n->as_Vector()->length() == 4);
17071   match(Set dst (AddVI src1 src2));
17072   ins_cost(INSN_COST);
17073   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
17074   ins_encode %{
17075     __ addv(as_FloatRegister($dst$$reg), __ T4S,
17076             as_FloatRegister($src1$$reg),
17077             as_FloatRegister($src2$$reg));
17078   %}
17079   ins_pipe(vdop128);
17080 %}
17081 
17082 instruct vadd2L(vecX dst, vecX src1, vecX src2)
17083 %{
17084   predicate(n->as_Vector()->length() == 2);
17085   match(Set dst (AddVL src1 src2));
17086   ins_cost(INSN_COST);
17087   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
17088   ins_encode %{
17089     __ addv(as_FloatRegister($dst$$reg), __ T2D,
17090             as_FloatRegister($src1$$reg),
17091             as_FloatRegister($src2$$reg));
17092   %}
17093   ins_pipe(vdop128);
17094 %}
17095 
17096 instruct vadd2F(vecD dst, vecD src1, vecD src2)
17097 %{
17098   predicate(n->as_Vector()->length() == 2);
17099   match(Set dst (AddVF src1 src2));
17100   ins_cost(INSN_COST);
17101   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
17102   ins_encode %{
17103     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
17104             as_FloatRegister($src1$$reg),
17105             as_FloatRegister($src2$$reg));
17106   %}
17107   ins_pipe(vdop_fp64);
17108 %}
17109 
17110 instruct vadd4F(vecX dst, vecX src1, vecX src2)
17111 %{
17112   predicate(n->as_Vector()->length() == 4);
17113   match(Set dst (AddVF src1 src2));
17114   ins_cost(INSN_COST);
17115   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
17116   ins_encode %{
17117     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
17118             as_FloatRegister($src1$$reg),
17119             as_FloatRegister($src2$$reg));
17120   %}
17121   ins_pipe(vdop_fp128);
17122 %}
17123 
17124 instruct vadd2D(vecX dst, vecX src1, vecX src2)
17125 %{
17126   match(Set dst (AddVD src1 src2));
17127   ins_cost(INSN_COST);
17128   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
17129   ins_encode %{
17130     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
17131             as_FloatRegister($src1$$reg),
17132             as_FloatRegister($src2$$reg));
17133   %}
17134   ins_pipe(vdop_fp128);
17135 %}
17136 
17137 // --------------------------------- SUB --------------------------------------
17138 
17139 instruct vsub8B(vecD dst, vecD src1, vecD src2)
17140 %{
17141   predicate(n->as_Vector()->length() == 4 ||
17142             n->as_Vector()->length() == 8);
17143   match(Set dst (SubVB src1 src2));
17144   ins_cost(INSN_COST);
17145   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
17146   ins_encode %{
17147     __ subv(as_FloatRegister($dst$$reg), __ T8B,
17148             as_FloatRegister($src1$$reg),
17149             as_FloatRegister($src2$$reg));
17150   %}
17151   ins_pipe(vdop64);
17152 %}
17153 
17154 instruct vsub16B(vecX dst, vecX src1, vecX src2)
17155 %{
17156   predicate(n->as_Vector()->length() == 16);
17157   match(Set dst (SubVB src1 src2));
17158   ins_cost(INSN_COST);
17159   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
17160   ins_encode %{
17161     __ subv(as_FloatRegister($dst$$reg), __ T16B,
17162             as_FloatRegister($src1$$reg),
17163             as_FloatRegister($src2$$reg));
17164   %}
17165   ins_pipe(vdop128);
17166 %}
17167 
17168 instruct vsub4S(vecD dst, vecD src1, vecD src2)
17169 %{
17170   predicate(n->as_Vector()->length() == 2 ||
17171             n->as_Vector()->length() == 4);
17172   match(Set dst (SubVS src1 src2));
17173   ins_cost(INSN_COST);
17174   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
17175   ins_encode %{
17176     __ subv(as_FloatRegister($dst$$reg), __ T4H,
17177             as_FloatRegister($src1$$reg),
17178             as_FloatRegister($src2$$reg));
17179   %}
17180   ins_pipe(vdop64);
17181 %}
17182 
17183 instruct vsub8S(vecX dst, vecX src1, vecX src2)
17184 %{
17185   predicate(n->as_Vector()->length() == 8);
17186   match(Set dst (SubVS src1 src2));
17187   ins_cost(INSN_COST);
17188   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
17189   ins_encode %{
17190     __ subv(as_FloatRegister($dst$$reg), __ T8H,
17191             as_FloatRegister($src1$$reg),
17192             as_FloatRegister($src2$$reg));
17193   %}
17194   ins_pipe(vdop128);
17195 %}
17196 
17197 instruct vsub2I(vecD dst, vecD src1, vecD src2)
17198 %{
17199   predicate(n->as_Vector()->length() == 2);
17200   match(Set dst (SubVI src1 src2));
17201   ins_cost(INSN_COST);
17202   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
17203   ins_encode %{
17204     __ subv(as_FloatRegister($dst$$reg), __ T2S,
17205             as_FloatRegister($src1$$reg),
17206             as_FloatRegister($src2$$reg));
17207   %}
17208   ins_pipe(vdop64);
17209 %}
17210 
17211 instruct vsub4I(vecX dst, vecX src1, vecX src2)
17212 %{
17213   predicate(n->as_Vector()->length() == 4);
17214   match(Set dst (SubVI src1 src2));
17215   ins_cost(INSN_COST);
17216   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
17217   ins_encode %{
17218     __ subv(as_FloatRegister($dst$$reg), __ T4S,
17219             as_FloatRegister($src1$$reg),
17220             as_FloatRegister($src2$$reg));
17221   %}
17222   ins_pipe(vdop128);
17223 %}
17224 
17225 instruct vsub2L(vecX dst, vecX src1, vecX src2)
17226 %{
17227   predicate(n->as_Vector()->length() == 2);
17228   match(Set dst (SubVL src1 src2));
17229   ins_cost(INSN_COST);
17230   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
17231   ins_encode %{
17232     __ subv(as_FloatRegister($dst$$reg), __ T2D,
17233             as_FloatRegister($src1$$reg),
17234             as_FloatRegister($src2$$reg));
17235   %}
17236   ins_pipe(vdop128);
17237 %}
17238 
17239 instruct vsub2F(vecD dst, vecD src1, vecD src2)
17240 %{
17241   predicate(n->as_Vector()->length() == 2);
17242   match(Set dst (SubVF src1 src2));
17243   ins_cost(INSN_COST);
17244   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
17245   ins_encode %{
17246     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
17247             as_FloatRegister($src1$$reg),
17248             as_FloatRegister($src2$$reg));
17249   %}
17250   ins_pipe(vdop_fp64);
17251 %}
17252 
17253 instruct vsub4F(vecX dst, vecX src1, vecX src2)
17254 %{
17255   predicate(n->as_Vector()->length() == 4);
17256   match(Set dst (SubVF src1 src2));
17257   ins_cost(INSN_COST);
17258   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
17259   ins_encode %{
17260     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
17261             as_FloatRegister($src1$$reg),
17262             as_FloatRegister($src2$$reg));
17263   %}
17264   ins_pipe(vdop_fp128);
17265 %}
17266 
17267 instruct vsub2D(vecX dst, vecX src1, vecX src2)
17268 %{
17269   predicate(n->as_Vector()->length() == 2);
17270   match(Set dst (SubVD src1 src2));
17271   ins_cost(INSN_COST);
17272   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
17273   ins_encode %{
17274     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
17275             as_FloatRegister($src1$$reg),
17276             as_FloatRegister($src2$$reg));
17277   %}
17278   ins_pipe(vdop_fp128);
17279 %}
17280 
17281 // --------------------------------- MUL --------------------------------------
17282 
17283 instruct vmul8B(vecD dst, vecD src1, vecD src2)
17284 %{
17285   predicate(n->as_Vector()->length() == 4 ||
17286             n->as_Vector()->length() == 8);
17287   match(Set dst (MulVB src1 src2));
17288   ins_cost(INSN_COST);
17289   format %{ "mulv  $dst,$src1,$src2\t# vector (8B)" %}
17290   ins_encode %{
17291     __ mulv(as_FloatRegister($dst$$reg), __ T8B,
17292             as_FloatRegister($src1$$reg),
17293             as_FloatRegister($src2$$reg));
17294   %}
17295   ins_pipe(vmul64);
17296 %}
17297 
17298 instruct vmul16B(vecX dst, vecX src1, vecX src2)
17299 %{
17300   predicate(n->as_Vector()->length() == 16);
17301   match(Set dst (MulVB src1 src2));
17302   ins_cost(INSN_COST);
17303   format %{ "mulv  $dst,$src1,$src2\t# vector (16B)" %}
17304   ins_encode %{
17305     __ mulv(as_FloatRegister($dst$$reg), __ T16B,
17306             as_FloatRegister($src1$$reg),
17307             as_FloatRegister($src2$$reg));
17308   %}
17309   ins_pipe(vmul128);
17310 %}
17311 
17312 instruct vmul4S(vecD dst, vecD src1, vecD src2)
17313 %{
17314   predicate(n->as_Vector()->length() == 2 ||
17315             n->as_Vector()->length() == 4);
17316   match(Set dst (MulVS src1 src2));
17317   ins_cost(INSN_COST);
17318   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
17319   ins_encode %{
17320     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
17321             as_FloatRegister($src1$$reg),
17322             as_FloatRegister($src2$$reg));
17323   %}
17324   ins_pipe(vmul64);
17325 %}
17326 
17327 instruct vmul8S(vecX dst, vecX src1, vecX src2)
17328 %{
17329   predicate(n->as_Vector()->length() == 8);
17330   match(Set dst (MulVS src1 src2));
17331   ins_cost(INSN_COST);
17332   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
17333   ins_encode %{
17334     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
17335             as_FloatRegister($src1$$reg),
17336             as_FloatRegister($src2$$reg));
17337   %}
17338   ins_pipe(vmul128);
17339 %}
17340 
17341 instruct vmul2I(vecD dst, vecD src1, vecD src2)
17342 %{
17343   predicate(n->as_Vector()->length() == 2);
17344   match(Set dst (MulVI src1 src2));
17345   ins_cost(INSN_COST);
17346   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
17347   ins_encode %{
17348     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
17349             as_FloatRegister($src1$$reg),
17350             as_FloatRegister($src2$$reg));
17351   %}
17352   ins_pipe(vmul64);
17353 %}
17354 
17355 instruct vmul4I(vecX dst, vecX src1, vecX src2)
17356 %{
17357   predicate(n->as_Vector()->length() == 4);
17358   match(Set dst (MulVI src1 src2));
17359   ins_cost(INSN_COST);
17360   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
17361   ins_encode %{
17362     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
17363             as_FloatRegister($src1$$reg),
17364             as_FloatRegister($src2$$reg));
17365   %}
17366   ins_pipe(vmul128);
17367 %}
17368 
17369 instruct vmul2F(vecD dst, vecD src1, vecD src2)
17370 %{
17371   predicate(n->as_Vector()->length() == 2);
17372   match(Set dst (MulVF src1 src2));
17373   ins_cost(INSN_COST);
17374   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
17375   ins_encode %{
17376     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
17377             as_FloatRegister($src1$$reg),
17378             as_FloatRegister($src2$$reg));
17379   %}
17380   ins_pipe(vmuldiv_fp64);
17381 %}
17382 
17383 instruct vmul4F(vecX dst, vecX src1, vecX src2)
17384 %{
17385   predicate(n->as_Vector()->length() == 4);
17386   match(Set dst (MulVF src1 src2));
17387   ins_cost(INSN_COST);
17388   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
17389   ins_encode %{
17390     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
17391             as_FloatRegister($src1$$reg),
17392             as_FloatRegister($src2$$reg));
17393   %}
17394   ins_pipe(vmuldiv_fp128);
17395 %}
17396 
17397 instruct vmul2D(vecX dst, vecX src1, vecX src2)
17398 %{
17399   predicate(n->as_Vector()->length() == 2);
17400   match(Set dst (MulVD src1 src2));
17401   ins_cost(INSN_COST);
17402   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
17403   ins_encode %{
17404     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
17405             as_FloatRegister($src1$$reg),
17406             as_FloatRegister($src2$$reg));
17407   %}
17408   ins_pipe(vmuldiv_fp128);
17409 %}
17410 
17411 // --------------------------------- MLA --------------------------------------
17412 
17413 instruct vmla4S(vecD dst, vecD src1, vecD src2)
17414 %{
17415   predicate(n->as_Vector()->length() == 2 ||
17416             n->as_Vector()->length() == 4);
17417   match(Set dst (AddVS dst (MulVS src1 src2)));
17418   ins_cost(INSN_COST);
17419   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
17420   ins_encode %{
17421     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
17422             as_FloatRegister($src1$$reg),
17423             as_FloatRegister($src2$$reg));
17424   %}
17425   ins_pipe(vmla64);
17426 %}
17427 
17428 instruct vmla8S(vecX dst, vecX src1, vecX src2)
17429 %{
17430   predicate(n->as_Vector()->length() == 8);
17431   match(Set dst (AddVS dst (MulVS src1 src2)));
17432   ins_cost(INSN_COST);
17433   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
17434   ins_encode %{
17435     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
17436             as_FloatRegister($src1$$reg),
17437             as_FloatRegister($src2$$reg));
17438   %}
17439   ins_pipe(vmla128);
17440 %}
17441 
17442 instruct vmla2I(vecD dst, vecD src1, vecD src2)
17443 %{
17444   predicate(n->as_Vector()->length() == 2);
17445   match(Set dst (AddVI dst (MulVI src1 src2)));
17446   ins_cost(INSN_COST);
17447   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
17448   ins_encode %{
17449     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
17450             as_FloatRegister($src1$$reg),
17451             as_FloatRegister($src2$$reg));
17452   %}
17453   ins_pipe(vmla64);
17454 %}
17455 
17456 instruct vmla4I(vecX dst, vecX src1, vecX src2)
17457 %{
17458   predicate(n->as_Vector()->length() == 4);
17459   match(Set dst (AddVI dst (MulVI src1 src2)));
17460   ins_cost(INSN_COST);
17461   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
17462   ins_encode %{
17463     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
17464             as_FloatRegister($src1$$reg),
17465             as_FloatRegister($src2$$reg));
17466   %}
17467   ins_pipe(vmla128);
17468 %}
17469 
17470 // dst + src1 * src2
17471 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
17472   predicate(UseFMA && n->as_Vector()->length() == 2);
17473   match(Set dst (FmaVF  dst (Binary src1 src2)));
17474   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
17475   ins_cost(INSN_COST);
17476   ins_encode %{
17477     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
17478             as_FloatRegister($src1$$reg),
17479             as_FloatRegister($src2$$reg));
17480   %}
17481   ins_pipe(vmuldiv_fp64);
17482 %}
17483 
17484 // dst + src1 * src2
17485 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
17486   predicate(UseFMA && n->as_Vector()->length() == 4);
17487   match(Set dst (FmaVF  dst (Binary src1 src2)));
17488   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
17489   ins_cost(INSN_COST);
17490   ins_encode %{
17491     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
17492             as_FloatRegister($src1$$reg),
17493             as_FloatRegister($src2$$reg));
17494   %}
17495   ins_pipe(vmuldiv_fp128);
17496 %}
17497 
17498 // dst + src1 * src2
17499 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
17500   predicate(UseFMA && n->as_Vector()->length() == 2);
17501   match(Set dst (FmaVD  dst (Binary src1 src2)));
17502   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
17503   ins_cost(INSN_COST);
17504   ins_encode %{
17505     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
17506             as_FloatRegister($src1$$reg),
17507             as_FloatRegister($src2$$reg));
17508   %}
17509   ins_pipe(vmuldiv_fp128);
17510 %}
17511 
17512 // --------------------------------- MLS --------------------------------------
17513 
17514 instruct vmls4S(vecD dst, vecD src1, vecD src2)
17515 %{
17516   predicate(n->as_Vector()->length() == 2 ||
17517             n->as_Vector()->length() == 4);
17518   match(Set dst (SubVS dst (MulVS src1 src2)));
17519   ins_cost(INSN_COST);
17520   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
17521   ins_encode %{
17522     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
17523             as_FloatRegister($src1$$reg),
17524             as_FloatRegister($src2$$reg));
17525   %}
17526   ins_pipe(vmla64);
17527 %}
17528 
17529 instruct vmls8S(vecX dst, vecX src1, vecX src2)
17530 %{
17531   predicate(n->as_Vector()->length() == 8);
17532   match(Set dst (SubVS dst (MulVS src1 src2)));
17533   ins_cost(INSN_COST);
17534   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
17535   ins_encode %{
17536     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
17537             as_FloatRegister($src1$$reg),
17538             as_FloatRegister($src2$$reg));
17539   %}
17540   ins_pipe(vmla128);
17541 %}
17542 
17543 instruct vmls2I(vecD dst, vecD src1, vecD src2)
17544 %{
17545   predicate(n->as_Vector()->length() == 2);
17546   match(Set dst (SubVI dst (MulVI src1 src2)));
17547   ins_cost(INSN_COST);
17548   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
17549   ins_encode %{
17550     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
17551             as_FloatRegister($src1$$reg),
17552             as_FloatRegister($src2$$reg));
17553   %}
17554   ins_pipe(vmla64);
17555 %}
17556 
17557 instruct vmls4I(vecX dst, vecX src1, vecX src2)
17558 %{
17559   predicate(n->as_Vector()->length() == 4);
17560   match(Set dst (SubVI dst (MulVI src1 src2)));
17561   ins_cost(INSN_COST);
17562   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
17563   ins_encode %{
17564     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
17565             as_FloatRegister($src1$$reg),
17566             as_FloatRegister($src2$$reg));
17567   %}
17568   ins_pipe(vmla128);
17569 %}
17570 
17571 // dst - src1 * src2
17572 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
17573   predicate(UseFMA && n->as_Vector()->length() == 2);
17574   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17575   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17576   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
17577   ins_cost(INSN_COST);
17578   ins_encode %{
17579     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
17580             as_FloatRegister($src1$$reg),
17581             as_FloatRegister($src2$$reg));
17582   %}
17583   ins_pipe(vmuldiv_fp64);
17584 %}
17585 
17586 // dst - src1 * src2
17587 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
17588   predicate(UseFMA && n->as_Vector()->length() == 4);
17589   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17590   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17591   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
17592   ins_cost(INSN_COST);
17593   ins_encode %{
17594     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
17595             as_FloatRegister($src1$$reg),
17596             as_FloatRegister($src2$$reg));
17597   %}
17598   ins_pipe(vmuldiv_fp128);
17599 %}
17600 
17601 // dst - src1 * src2
17602 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
17603   predicate(UseFMA && n->as_Vector()->length() == 2);
17604   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
17605   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
17606   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
17607   ins_cost(INSN_COST);
17608   ins_encode %{
17609     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
17610             as_FloatRegister($src1$$reg),
17611             as_FloatRegister($src2$$reg));
17612   %}
17613   ins_pipe(vmuldiv_fp128);
17614 %}
17615 
17616 // --------------------------------- DIV --------------------------------------
17617 
17618 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
17619 %{
17620   predicate(n->as_Vector()->length() == 2);
17621   match(Set dst (DivVF src1 src2));
17622   ins_cost(INSN_COST);
17623   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
17624   ins_encode %{
17625     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17626             as_FloatRegister($src1$$reg),
17627             as_FloatRegister($src2$$reg));
17628   %}
17629   ins_pipe(vmuldiv_fp64);
17630 %}
17631 
17632 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17633 %{
17634   predicate(n->as_Vector()->length() == 4);
17635   match(Set dst (DivVF src1 src2));
17636   ins_cost(INSN_COST);
17637   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17638   ins_encode %{
17639     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17640             as_FloatRegister($src1$$reg),
17641             as_FloatRegister($src2$$reg));
17642   %}
17643   ins_pipe(vmuldiv_fp128);
17644 %}
17645 
17646 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17647 %{
17648   predicate(n->as_Vector()->length() == 2);
17649   match(Set dst (DivVD src1 src2));
17650   ins_cost(INSN_COST);
17651   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17652   ins_encode %{
17653     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17654             as_FloatRegister($src1$$reg),
17655             as_FloatRegister($src2$$reg));
17656   %}
17657   ins_pipe(vmuldiv_fp128);
17658 %}
17659 
17660 // --------------------------------- SQRT -------------------------------------
17661 
17662 instruct vsqrt2D(vecX dst, vecX src)
17663 %{
17664   predicate(n->as_Vector()->length() == 2);
17665   match(Set dst (SqrtVD src));
17666   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17667   ins_encode %{
17668     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17669              as_FloatRegister($src$$reg));
17670   %}
17671   ins_pipe(vsqrt_fp128);
17672 %}
17673 
17674 // --------------------------------- ABS --------------------------------------
17675 
17676 instruct vabs8B(vecD dst, vecD src)
17677 %{
17678   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17679   match(Set dst (AbsV src));
17680   ins_cost(INSN_COST);
17681   format %{ "abs  $dst,$src\t# vector (8B)" %}
17682   ins_encode %{
17683     __ absr(as_FloatRegister($dst$$reg), __ T8B,
17684             as_FloatRegister($src$$reg));
17685   %}
17686   ins_pipe(vlogical64);
17687 %}
17688 
17689 instruct vabs16B(vecX dst, vecX src)
17690 %{
17691   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17692   match(Set dst (AbsV src));
17693   ins_cost(INSN_COST);
17694   format %{ "abs  $dst,$src\t# vector (16B)" %}
17695   ins_encode %{
17696     __ absr(as_FloatRegister($dst$$reg), __ T16B,
17697             as_FloatRegister($src$$reg));
17698   %}
17699   ins_pipe(vlogical128);
17700 %}
17701 
17702 instruct vabs4S(vecD dst, vecD src)
17703 %{
17704   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17705   match(Set dst (AbsV src));
17706   ins_cost(INSN_COST);
17707   format %{ "abs  $dst,$src\t# vector (4H)" %}
17708   ins_encode %{
17709     __ absr(as_FloatRegister($dst$$reg), __ T4H,
17710             as_FloatRegister($src$$reg));
17711   %}
17712   ins_pipe(vlogical64);
17713 %}
17714 
17715 instruct vabs8S(vecX dst, vecX src)
17716 %{
17717   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
17718   match(Set dst (AbsV src));
17719   ins_cost(INSN_COST);
17720   format %{ "abs  $dst,$src\t# vector (8H)" %}
17721   ins_encode %{
17722     __ absr(as_FloatRegister($dst$$reg), __ T8H,
17723             as_FloatRegister($src$$reg));
17724   %}
17725   ins_pipe(vlogical128);
17726 %}
17727 
17728 instruct vabs2I(vecD dst, vecD src)
17729 %{
17730   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
17731   match(Set dst (AbsV src));
17732   ins_cost(INSN_COST);
17733   format %{ "abs  $dst,$src\t# vector (2S)" %}
17734   ins_encode %{
17735     __ absr(as_FloatRegister($dst$$reg), __ T2S,
17736             as_FloatRegister($src$$reg));
17737   %}
17738   ins_pipe(vlogical64);
17739 %}
17740 
17741 instruct vabs4I(vecX dst, vecX src)
17742 %{
17743   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
17744   match(Set dst (AbsV src));
17745   ins_cost(INSN_COST);
17746   format %{ "abs  $dst,$src\t# vector (4S)" %}
17747   ins_encode %{
17748     __ absr(as_FloatRegister($dst$$reg), __ T4S,
17749             as_FloatRegister($src$$reg));
17750   %}
17751   ins_pipe(vlogical128);
17752 %}
17753 
17754 instruct vabs2L(vecX dst, vecX src)
17755 %{
17756   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
17757   match(Set dst (AbsV src));
17758   ins_cost(INSN_COST);
17759   format %{ "abs  $dst,$src\t# vector (2D)" %}
17760   ins_encode %{
17761     __ absr(as_FloatRegister($dst$$reg), __ T2D,
17762             as_FloatRegister($src$$reg));
17763   %}
17764   ins_pipe(vlogical128);
17765 %}
17766 
17767 instruct vabs2F(vecD dst, vecD src)
17768 %{
17769   predicate(n->as_Vector()->length() == 2);
17770   match(Set dst (AbsVF src));
17771   ins_cost(INSN_COST * 3);
17772   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17773   ins_encode %{
17774     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17775             as_FloatRegister($src$$reg));
17776   %}
17777   ins_pipe(vunop_fp64);
17778 %}
17779 
17780 instruct vabs4F(vecX dst, vecX src)
17781 %{
17782   predicate(n->as_Vector()->length() == 4);
17783   match(Set dst (AbsVF src));
17784   ins_cost(INSN_COST * 3);
17785   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17786   ins_encode %{
17787     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17788             as_FloatRegister($src$$reg));
17789   %}
17790   ins_pipe(vunop_fp128);
17791 %}
17792 
17793 instruct vabs2D(vecX dst, vecX src)
17794 %{
17795   predicate(n->as_Vector()->length() == 2);
17796   match(Set dst (AbsVD src));
17797   ins_cost(INSN_COST * 3);
17798   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17799   ins_encode %{
17800     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17801             as_FloatRegister($src$$reg));
17802   %}
17803   ins_pipe(vunop_fp128);
17804 %}
17805 
17806 // --------------------------------- NEG --------------------------------------
17807 
17808 instruct vneg2F(vecD dst, vecD src)
17809 %{
17810   predicate(n->as_Vector()->length() == 2);
17811   match(Set dst (NegVF src));
17812   ins_cost(INSN_COST * 3);
17813   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17814   ins_encode %{
17815     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17816             as_FloatRegister($src$$reg));
17817   %}
17818   ins_pipe(vunop_fp64);
17819 %}
17820 
17821 instruct vneg4F(vecX dst, vecX src)
17822 %{
17823   predicate(n->as_Vector()->length() == 4);
17824   match(Set dst (NegVF src));
17825   ins_cost(INSN_COST * 3);
17826   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17827   ins_encode %{
17828     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17829             as_FloatRegister($src$$reg));
17830   %}
17831   ins_pipe(vunop_fp128);
17832 %}
17833 
17834 instruct vneg2D(vecX dst, vecX src)
17835 %{
17836   predicate(n->as_Vector()->length() == 2);
17837   match(Set dst (NegVD src));
17838   ins_cost(INSN_COST * 3);
17839   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17840   ins_encode %{
17841     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17842             as_FloatRegister($src$$reg));
17843   %}
17844   ins_pipe(vunop_fp128);
17845 %}
17846 
17847 // --------------------------------- NOT --------------------------------------
17848 
17849 instruct vnot8B(vecD dst, vecD src)
17850 %{
17851   predicate(n->as_Vector()->length_in_bytes() == 8);
17852   match(Set dst (NotV src));
17853   ins_cost(INSN_COST);
17854   format %{ "not  $dst,$src\t# vector (8B)" %}
17855   ins_encode %{
17856     __ notr(as_FloatRegister($dst$$reg), __ T8B,
17857             as_FloatRegister($src$$reg));
17858   %}
17859   ins_pipe(vlogical64);
17860 %}
17861 
17862 instruct vnot16B(vecX dst, vecX src)
17863 %{
17864   predicate(n->as_Vector()->length_in_bytes() == 16);
17865   match(Set dst (NotV src));
17866   ins_cost(INSN_COST);
17867   format %{ "not  $dst,$src\t# vector (16B)" %}
17868   ins_encode %{
17869     __ notr(as_FloatRegister($dst$$reg), __ T16B,
17870             as_FloatRegister($src$$reg));
17871   %}
17872   ins_pipe(vlogical128);
17873 %}
17874 
17875 // --------------------------------- AND --------------------------------------
17876 
17877 instruct vand8B(vecD dst, vecD src1, vecD src2)
17878 %{
17879   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17880             n->as_Vector()->length_in_bytes() == 8);
17881   match(Set dst (AndV src1 src2));
17882   ins_cost(INSN_COST);
17883   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17884   ins_encode %{
17885     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17886             as_FloatRegister($src1$$reg),
17887             as_FloatRegister($src2$$reg));
17888   %}
17889   ins_pipe(vlogical64);
17890 %}
17891 
17892 instruct vand16B(vecX dst, vecX src1, vecX src2)
17893 %{
17894   predicate(n->as_Vector()->length_in_bytes() == 16);
17895   match(Set dst (AndV src1 src2));
17896   ins_cost(INSN_COST);
17897   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17898   ins_encode %{
17899     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17900             as_FloatRegister($src1$$reg),
17901             as_FloatRegister($src2$$reg));
17902   %}
17903   ins_pipe(vlogical128);
17904 %}
17905 
17906 // --------------------------------- OR ---------------------------------------
17907 
17908 instruct vor8B(vecD dst, vecD src1, vecD src2)
17909 %{
17910   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17911             n->as_Vector()->length_in_bytes() == 8);
17912   match(Set dst (OrV src1 src2));
17913   ins_cost(INSN_COST);
17914   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17915   ins_encode %{
17916     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17917             as_FloatRegister($src1$$reg),
17918             as_FloatRegister($src2$$reg));
17919   %}
17920   ins_pipe(vlogical64);
17921 %}
17922 
17923 instruct vor16B(vecX dst, vecX src1, vecX src2)
17924 %{
17925   predicate(n->as_Vector()->length_in_bytes() == 16);
17926   match(Set dst (OrV src1 src2));
17927   ins_cost(INSN_COST);
17928   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17929   ins_encode %{
17930     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17931             as_FloatRegister($src1$$reg),
17932             as_FloatRegister($src2$$reg));
17933   %}
17934   ins_pipe(vlogical128);
17935 %}
17936 
17937 // --------------------------------- XOR --------------------------------------
17938 
17939 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17940 %{
17941   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17942             n->as_Vector()->length_in_bytes() == 8);
17943   match(Set dst (XorV src1 src2));
17944   ins_cost(INSN_COST);
17945   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17946   ins_encode %{
17947     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17948             as_FloatRegister($src1$$reg),
17949             as_FloatRegister($src2$$reg));
17950   %}
17951   ins_pipe(vlogical64);
17952 %}
17953 
17954 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17955 %{
17956   predicate(n->as_Vector()->length_in_bytes() == 16);
17957   match(Set dst (XorV src1 src2));
17958   ins_cost(INSN_COST);
17959   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17960   ins_encode %{
17961     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17962             as_FloatRegister($src1$$reg),
17963             as_FloatRegister($src2$$reg));
17964   %}
17965   ins_pipe(vlogical128);
17966 %}
17967 
17968 // ------------------------------ Max ---------------------------------------
17969 
17970 instruct vmax8B(vecD dst, vecD src1, vecD src2)
17971 %{
17972   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17973   match(Set dst (MaxV src1 src2));
17974   ins_cost(INSN_COST);
17975   format %{ "maxv  $dst,$src1,$src2\t# vector (8B)" %}
17976   ins_encode %{
17977     __ maxv(as_FloatRegister($dst$$reg), __ T8B,
17978             as_FloatRegister($src1$$reg),
17979             as_FloatRegister($src2$$reg));
17980   %}
17981   ins_pipe(vdop64);
17982 %}
17983 
17984 instruct vmax16B(vecX dst, vecX src1, vecX src2)
17985 %{
17986   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
17987   match(Set dst (MaxV src1 src2));
17988   ins_cost(INSN_COST);
17989   format %{ "maxv  $dst,$src1,$src2\t# vector (16B)" %}
17990   ins_encode %{
17991     __ maxv(as_FloatRegister($dst$$reg), __ T16B,
17992             as_FloatRegister($src1$$reg),
17993             as_FloatRegister($src2$$reg));
17994   %}
17995   ins_pipe(vdop128);
17996 %}
17997 
17998 instruct vmax4S(vecD dst, vecD src1, vecD src2)
17999 %{
18000   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18001   match(Set dst (MaxV src1 src2));
18002   ins_cost(INSN_COST);
18003   format %{ "maxv  $dst,$src1,$src2\t# vector (4H)" %}
18004   ins_encode %{
18005     __ maxv(as_FloatRegister($dst$$reg), __ T4H,
18006             as_FloatRegister($src1$$reg),
18007             as_FloatRegister($src2$$reg));
18008   %}
18009   ins_pipe(vdop64);
18010 %}
18011 
18012 instruct vmax8S(vecX dst, vecX src1, vecX src2)
18013 %{
18014   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18015   match(Set dst (MaxV src1 src2));
18016   ins_cost(INSN_COST);
18017   format %{ "maxv  $dst,$src1,$src2\t# vector (8H)" %}
18018   ins_encode %{
18019     __ maxv(as_FloatRegister($dst$$reg), __ T8H,
18020             as_FloatRegister($src1$$reg),
18021             as_FloatRegister($src2$$reg));
18022   %}
18023   ins_pipe(vdop128);
18024 %}
18025 
18026 instruct vmax2I(vecD dst, vecD src1, vecD src2)
18027 %{
18028   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
18029   match(Set dst (MaxV src1 src2));
18030   ins_cost(INSN_COST);
18031   format %{ "maxv  $dst,$src1,$src2\t# vector (2S)" %}
18032   ins_encode %{
18033     __ maxv(as_FloatRegister($dst$$reg), __ T2S,
18034             as_FloatRegister($src1$$reg),
18035             as_FloatRegister($src2$$reg));
18036   %}
18037   ins_pipe(vdop64);
18038 %}
18039 
18040 instruct vmax4I(vecX dst, vecX src1, vecX src2)
18041 %{
18042   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
18043   match(Set dst (MaxV src1 src2));
18044   ins_cost(INSN_COST);
18045   format %{ "maxv  $dst,$src1,$src2\t# vector (4S)" %}
18046   ins_encode %{
18047     __ maxv(as_FloatRegister($dst$$reg), __ T4S,
18048             as_FloatRegister($src1$$reg),
18049             as_FloatRegister($src2$$reg));
18050   %}
18051   ins_pipe(vdop128);
18052 %}
18053 
18054 instruct vmax2L(vecX dst, vecX src1, vecX src2)
18055 %{
18056   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
18057   match(Set dst (MaxV src1 src2));
18058   ins_cost(INSN_COST);
18059   effect(TEMP dst);
18060   format %{ "cmgt  $dst,$src1,$src2\t# vector (2D)"
18061             "bsl  $dst,$src1,$src2\t# vector (16B)" %}
18062   ins_encode %{
18063     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
18064             as_FloatRegister($src1$$reg),
18065             as_FloatRegister($src2$$reg));
18066     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
18067            as_FloatRegister($src1$$reg),
18068            as_FloatRegister($src2$$reg));
18069   %}
18070   ins_pipe(vdop128);
18071 %}
18072 
18073 instruct vmax2F(vecD dst, vecD src1, vecD src2)
18074 %{
18075   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18076   match(Set dst (MaxV src1 src2));
18077   ins_cost(INSN_COST);
18078   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
18079   ins_encode %{
18080     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
18081             as_FloatRegister($src1$$reg),
18082             as_FloatRegister($src2$$reg));
18083   %}
18084   ins_pipe(vdop_fp64);
18085 %}
18086 
18087 instruct vmax4F(vecX dst, vecX src1, vecX src2)
18088 %{
18089   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18090   match(Set dst (MaxV src1 src2));
18091   ins_cost(INSN_COST);
18092   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
18093   ins_encode %{
18094     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
18095             as_FloatRegister($src1$$reg),
18096             as_FloatRegister($src2$$reg));
18097   %}
18098   ins_pipe(vdop_fp128);
18099 %}
18100 
18101 instruct vmax2D(vecX dst, vecX src1, vecX src2)
18102 %{
18103   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18104   match(Set dst (MaxV src1 src2));
18105   ins_cost(INSN_COST);
18106   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
18107   ins_encode %{
18108     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
18109             as_FloatRegister($src1$$reg),
18110             as_FloatRegister($src2$$reg));
18111   %}
18112   ins_pipe(vdop_fp128);
18113 %}
18114 
18115 // ------------------------------ Min ---------------------------------------
18116 
18117 instruct vmin8B(vecD dst, vecD src1, vecD src2)
18118 %{
18119   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18120   match(Set dst (MinV src1 src2));
18121   ins_cost(INSN_COST);
18122   format %{ "minv  $dst,$src1,$src2\t# vector (8B)" %}
18123   ins_encode %{
18124     __ minv(as_FloatRegister($dst$$reg), __ T8B,
18125             as_FloatRegister($src1$$reg),
18126             as_FloatRegister($src2$$reg));
18127   %}
18128   ins_pipe(vdop64);
18129 %}
18130 
18131 instruct vmin16B(vecX dst, vecX src1, vecX src2)
18132 %{
18133   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18134   match(Set dst (MinV src1 src2));
18135   ins_cost(INSN_COST);
18136   format %{ "minv  $dst,$src1,$src2\t# vector (16B)" %}
18137   ins_encode %{
18138     __ minv(as_FloatRegister($dst$$reg), __ T16B,
18139             as_FloatRegister($src1$$reg),
18140             as_FloatRegister($src2$$reg));
18141   %}
18142   ins_pipe(vdop128);
18143 %}
18144 
18145 instruct vmin4S(vecD dst, vecD src1, vecD src2)
18146 %{
18147   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18148   match(Set dst (MinV src1 src2));
18149   ins_cost(INSN_COST);
18150   format %{ "minv  $dst,$src1,$src2\t# vector (4H)" %}
18151   ins_encode %{
18152     __ minv(as_FloatRegister($dst$$reg), __ T4H,
18153             as_FloatRegister($src1$$reg),
18154             as_FloatRegister($src2$$reg));
18155   %}
18156   ins_pipe(vdop64);
18157 %}
18158 
18159 instruct vmin8S(vecX dst, vecX src1, vecX src2)
18160 %{
18161   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18162   match(Set dst (MinV src1 src2));
18163   ins_cost(INSN_COST);
18164   format %{ "minv  $dst,$src1,$src2\t# vector (8H)" %}
18165   ins_encode %{
18166     __ minv(as_FloatRegister($dst$$reg), __ T8H,
18167             as_FloatRegister($src1$$reg),
18168             as_FloatRegister($src2$$reg));
18169   %}
18170   ins_pipe(vdop128);
18171 %}
18172 
18173 instruct vmin2I(vecD dst, vecD src1, vecD src2)
18174 %{
18175   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
18176   match(Set dst (MinV src1 src2));
18177   ins_cost(INSN_COST);
18178   format %{ "minv  $dst,$src1,$src2\t# vector (2S)" %}
18179   ins_encode %{
18180     __ minv(as_FloatRegister($dst$$reg), __ T2S,
18181             as_FloatRegister($src1$$reg),
18182             as_FloatRegister($src2$$reg));
18183   %}
18184   ins_pipe(vdop64);
18185 %}
18186 
18187 instruct vmin4I(vecX dst, vecX src1, vecX src2)
18188 %{
18189   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_INT);
18190   match(Set dst (MinV src1 src2));
18191   ins_cost(INSN_COST);
18192   format %{ "minv  $dst,$src1,$src2\t# vector (4S)" %}
18193   ins_encode %{
18194     __ minv(as_FloatRegister($dst$$reg), __ T4S,
18195             as_FloatRegister($src1$$reg),
18196             as_FloatRegister($src2$$reg));
18197   %}
18198   ins_pipe(vdop128);
18199 %}
18200 
18201 instruct vmin2L(vecX dst, vecX src1, vecX src2)
18202 %{
18203   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_LONG);
18204   match(Set dst (MinV src1 src2));
18205   ins_cost(INSN_COST);
18206   effect(TEMP dst);
18207   format %{ "cmgt  $dst,$src1,$src2\t# vector (2D)"
18208             "bsl  $dst,$src2,$src1\t# vector (16B)" %}
18209   ins_encode %{
18210     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
18211             as_FloatRegister($src1$$reg),
18212             as_FloatRegister($src2$$reg));
18213     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
18214            as_FloatRegister($src2$$reg),
18215            as_FloatRegister($src1$$reg));
18216   %}
18217   ins_pipe(vdop128);
18218 %}
18219 
18220 instruct vmin2F(vecD dst, vecD src1, vecD src2)
18221 %{
18222   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18223   match(Set dst (MinV src1 src2));
18224   ins_cost(INSN_COST);
18225   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
18226   ins_encode %{
18227     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
18228             as_FloatRegister($src1$$reg),
18229             as_FloatRegister($src2$$reg));
18230   %}
18231   ins_pipe(vdop_fp64);
18232 %}
18233 
18234 instruct vmin4F(vecX dst, vecX src1, vecX src2)
18235 %{
18236   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18237   match(Set dst (MinV src1 src2));
18238   ins_cost(INSN_COST);
18239   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
18240   ins_encode %{
18241     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
18242             as_FloatRegister($src1$$reg),
18243             as_FloatRegister($src2$$reg));
18244   %}
18245   ins_pipe(vdop_fp128);
18246 %}
18247 
18248 instruct vmin2D(vecX dst, vecX src1, vecX src2)
18249 %{
18250   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18251   match(Set dst (MinV src1 src2));
18252   ins_cost(INSN_COST);
18253   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
18254   ins_encode %{
18255     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
18256             as_FloatRegister($src1$$reg),
18257             as_FloatRegister($src2$$reg));
18258   %}
18259   ins_pipe(vdop_fp128);
18260 %}
18261 
18262 // ------------------------------ Comparison ---------------------------------
18263 
18264 instruct vcmeq8B(vecD dst, vecD src1, vecD src2)
18265 %{
18266   predicate(n->as_Vector()->length() == 8 &&
18267             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
18268             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18269   match(Set dst (VectorMaskCmp src1 src2));
18270   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (8B)" %}
18271   ins_cost(INSN_COST);
18272   ins_encode %{
18273     __ cmeq(as_FloatRegister($dst$$reg), __ T8B,
18274             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18275   %}
18276   ins_pipe(vdop64);
18277 %}
18278 
18279 instruct vcmeq16B(vecX dst, vecX src1, vecX src2)
18280 %{
18281   predicate(n->as_Vector()->length() == 16 &&
18282             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
18283             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18284   match(Set dst (VectorMaskCmp src1 src2));
18285   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (16B)" %}
18286   ins_cost(INSN_COST);
18287   ins_encode %{
18288     __ cmeq(as_FloatRegister($dst$$reg), __ T16B,
18289             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18290   %}
18291   ins_pipe(vdop128);
18292 %}
18293 
18294 instruct vcmeq4S(vecD dst, vecD src1, vecD src2)
18295 %{
18296   predicate(n->as_Vector()->length() == 4 &&
18297             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
18298             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18299   match(Set dst (VectorMaskCmp src1 src2));
18300   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (4S)" %}
18301   ins_cost(INSN_COST);
18302   ins_encode %{
18303     __ cmeq(as_FloatRegister($dst$$reg), __ T4H,
18304             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18305   %}
18306   ins_pipe(vdop64);
18307 %}
18308 
18309 instruct vcmeq8S(vecX dst, vecX src1, vecX src2)
18310 %{
18311   predicate(n->as_Vector()->length() == 8 &&
18312             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
18313             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18314   match(Set dst (VectorMaskCmp src1 src2));
18315   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (8S)" %}
18316   ins_cost(INSN_COST);
18317   ins_encode %{
18318     __ cmeq(as_FloatRegister($dst$$reg), __ T8H,
18319             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18320   %}
18321   ins_pipe(vdop128);
18322 %}
18323 
18324 instruct vcmeq2I(vecD dst, vecD src1, vecD src2)
18325 %{
18326   predicate(n->as_Vector()->length() == 2 &&
18327             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
18328             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18329   match(Set dst (VectorMaskCmp src1 src2));
18330   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (2I)" %}
18331   ins_cost(INSN_COST);
18332   ins_encode %{
18333     __ cmeq(as_FloatRegister($dst$$reg), __ T2S,
18334             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18335   %}
18336   ins_pipe(vdop64);
18337 %}
18338 
18339 instruct vcmeq4I(vecX dst, vecX src1, vecX src2)
18340 %{
18341   predicate(n->as_Vector()->length() == 4 &&
18342             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
18343             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18344   match(Set dst (VectorMaskCmp src1 src2));
18345   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (4I)" %}
18346   ins_cost(INSN_COST);
18347   ins_encode %{
18348     __ cmeq(as_FloatRegister($dst$$reg), __ T4S,
18349             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18350   %}
18351   ins_pipe(vdop128);
18352 %}
18353 
18354 instruct vcmeq2L(vecX dst, vecX src1, vecX src2)
18355 %{
18356   predicate(n->as_Vector()->length() == 2 &&
18357             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
18358             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
18359   match(Set dst (VectorMaskCmp src1 src2));
18360   format %{ "cmeq  $dst,$src1,$src2\t# vector cmp (2L)" %}
18361   ins_cost(INSN_COST);
18362   ins_encode %{
18363     __ cmeq(as_FloatRegister($dst$$reg), __ T2D,
18364             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18365   %}
18366   ins_pipe(vdop128);
18367 %}
18368 
18369 instruct vcmeq2F(vecD dst, vecD src1, vecD src2)
18370 %{
18371   predicate(n->as_Vector()->length() == 2 &&
18372             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
18373             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18374   match(Set dst (VectorMaskCmp src1 src2));
18375   format %{ "fcmeq  $dst,$src1,$src2\t# vector cmp (2F)" %}
18376   ins_cost(INSN_COST);
18377   ins_encode %{
18378     __ fcmeq(as_FloatRegister($dst$$reg), __ T2S,
18379              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18380   %}
18381   ins_pipe(vdop_fp64);
18382 %}
18383 
18384 instruct vcmeq4F(vecX dst, vecX src1, vecX src2)
18385 %{
18386   predicate(n->as_Vector()->length() == 4 &&
18387             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
18388             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18389   match(Set dst (VectorMaskCmp src1 src2));
18390   format %{ "fcmeq  $dst,$src1,$src2\t# vector cmp (4F)" %}
18391   ins_cost(INSN_COST);
18392   ins_encode %{
18393     __ fcmeq(as_FloatRegister($dst$$reg), __ T4S,
18394              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18395   %}
18396   ins_pipe(vdop_fp128);
18397 %}
18398 
18399 instruct vcmeq2D(vecX dst, vecX src1, vecX src2)
18400 %{
18401   predicate(n->as_Vector()->length() == 2 &&
18402             n->as_VectorMaskCmp()->get_predicate() == BoolTest::eq &&
18403             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18404   match(Set dst (VectorMaskCmp src1 src2));
18405   format %{ "fcmeq  $dst,$src1,$src2\t# vector cmp (2D)" %}
18406   ins_cost(INSN_COST);
18407   ins_encode %{
18408     __ fcmeq(as_FloatRegister($dst$$reg), __ T2D,
18409              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18410   %}
18411   ins_pipe(vdop_fp128);
18412 %}
18413 
18414 instruct vcmne8B(vecD dst, vecD src1, vecD src2)
18415 %{
18416   predicate(n->as_Vector()->length() == 8 &&
18417             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
18418             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18419   match(Set dst (VectorMaskCmp src1 src2));
18420   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (8B)"
18421             "not   $dst,$dst\t" %}
18422   ins_cost(INSN_COST);
18423   ins_encode %{
18424     __ cmeq(as_FloatRegister($dst$$reg), __ T8B,
18425             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18426     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
18427   %}
18428   ins_pipe(vdop64);
18429 %}
18430 
18431 instruct vcmne16B(vecX dst, vecX src1, vecX src2)
18432 %{
18433   predicate(n->as_Vector()->length() == 16 &&
18434             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
18435             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18436   match(Set dst (VectorMaskCmp src1 src2));
18437   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (16B)"
18438             "not   $dst,$dst\t" %}
18439   ins_cost(INSN_COST);
18440   ins_encode %{
18441     __ cmeq(as_FloatRegister($dst$$reg), __ T16B,
18442             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18443     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
18444   %}
18445   ins_pipe(vdop128);
18446 %}
18447 
18448 instruct vcmne4S(vecD dst, vecD src1, vecD src2)
18449 %{
18450   predicate(n->as_Vector()->length() == 4 &&
18451             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
18452             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18453   match(Set dst (VectorMaskCmp src1 src2));
18454   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (4S)"
18455             "not   $dst,$dst\t" %}
18456   ins_cost(INSN_COST);
18457   ins_encode %{
18458     __ cmeq(as_FloatRegister($dst$$reg), __ T4H,
18459             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18460     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
18461   %}
18462   ins_pipe(vdop64);
18463 %}
18464 
18465 instruct vcmne8S(vecX dst, vecX src1, vecX src2)
18466 %{
18467   predicate(n->as_Vector()->length() == 8 &&
18468             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
18469             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18470   match(Set dst (VectorMaskCmp src1 src2));
18471   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (8S)"
18472             "not   $dst,$dst\t" %}
18473   ins_cost(INSN_COST);
18474   ins_encode %{
18475     __ cmeq(as_FloatRegister($dst$$reg), __ T8H,
18476             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18477     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
18478   %}
18479   ins_pipe(vdop128);
18480 %}
18481 
18482 instruct vcmne2I(vecD dst, vecD src1, vecD src2)
18483 %{
18484   predicate(n->as_Vector()->length() == 2 &&
18485             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
18486             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18487   match(Set dst (VectorMaskCmp src1 src2));
18488   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (2I)"
18489             "not   $dst,$dst\t" %}
18490   ins_cost(INSN_COST);
18491   ins_encode %{
18492     __ cmeq(as_FloatRegister($dst$$reg), __ T2S,
18493             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18494     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
18495   %}
18496   ins_pipe(vdop64);
18497 %}
18498 
18499 instruct vcmne4I(vecX dst, vecX src1, vecX src2)
18500 %{
18501   predicate(n->as_Vector()->length() == 4 &&
18502             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
18503             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18504   match(Set dst (VectorMaskCmp src1 src2));
18505   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (4I)"
18506             "not   $dst,$dst\t" %}
18507   ins_cost(INSN_COST);
18508   ins_encode %{
18509     __ cmeq(as_FloatRegister($dst$$reg), __ T4S,
18510             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18511     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
18512   %}
18513   ins_pipe(vdop128);
18514 %}
18515 
18516 instruct vcmne2L(vecX dst, vecX src1, vecX src2)
18517 %{
18518   predicate(n->as_Vector()->length() == 2 &&
18519             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
18520             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
18521   match(Set dst (VectorMaskCmp src1 src2));
18522   format %{ "cmeq  $dst,$src1,$src2\n\t# vector cmp (2L)"
18523             "not   $dst,$dst\t" %}
18524   ins_cost(INSN_COST);
18525   ins_encode %{
18526     __ cmeq(as_FloatRegister($dst$$reg), __ T2D,
18527             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18528     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
18529   %}
18530   ins_pipe(vdop128);
18531 %}
18532 
18533 instruct vcmne2F(vecD dst, vecD src1, vecD src2)
18534 %{
18535   predicate(n->as_Vector()->length() == 2 &&
18536             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
18537             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18538   match(Set dst (VectorMaskCmp src1 src2));
18539   format %{ "fcmeq  $dst,$src1,$src2\n\t# vector cmp (2F)"
18540             "not    $dst,$dst\t" %}
18541   ins_cost(INSN_COST);
18542   ins_encode %{
18543     __ fcmeq(as_FloatRegister($dst$$reg), __ T2S,
18544              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18545     __ notr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
18546   %}
18547   ins_pipe(vdop_fp64);
18548 %}
18549 
18550 instruct vcmne4F(vecX dst, vecX src1, vecX src2)
18551 %{
18552   predicate(n->as_Vector()->length() == 4 &&
18553             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
18554             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18555   match(Set dst (VectorMaskCmp src1 src2));
18556   format %{ "fcmeq  $dst,$src1,$src2\n\t# vector cmp (4F)"
18557             "not    $dst,$dst\t" %}
18558   ins_cost(INSN_COST);
18559   ins_encode %{
18560     __ fcmeq(as_FloatRegister($dst$$reg), __ T4S,
18561              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18562     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
18563   %}
18564   ins_pipe(vdop_fp128);
18565 %}
18566 
18567 instruct vcmne2D(vecX dst, vecX src1, vecX src2)
18568 %{
18569   predicate(n->as_Vector()->length() == 2 &&
18570             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ne &&
18571             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18572   match(Set dst (VectorMaskCmp src1 src2));
18573   format %{ "fcmeq  $dst,$src1,$src2\n\t# vector cmp (2D)"
18574             "not    $dst,$dst\t" %}
18575   ins_cost(INSN_COST);
18576   ins_encode %{
18577     __ fcmeq(as_FloatRegister($dst$$reg), __ T2D,
18578              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18579     __ notr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
18580   %}
18581   ins_pipe(vdop_fp128);
18582 %}
18583 
18584 instruct vcmlt8B(vecD dst, vecD src1, vecD src2)
18585 %{
18586   predicate(n->as_Vector()->length() == 8 &&
18587             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
18588             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18589   match(Set dst (VectorMaskCmp src1 src2));
18590   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (8B)" %}
18591   ins_cost(INSN_COST);
18592   ins_encode %{
18593     __ cmgt(as_FloatRegister($dst$$reg), __ T8B,
18594             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18595   %}
18596   ins_pipe(vdop64);
18597 %}
18598 
18599 instruct vcmlt16B(vecX dst, vecX src1, vecX src2)
18600 %{
18601   predicate(n->as_Vector()->length() == 16 &&
18602             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
18603             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18604   match(Set dst (VectorMaskCmp src1 src2));
18605   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (16B)" %}
18606   ins_cost(INSN_COST);
18607   ins_encode %{
18608     __ cmgt(as_FloatRegister($dst$$reg), __ T16B,
18609             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18610   %}
18611   ins_pipe(vdop128);
18612 %}
18613 
18614 instruct vcmlt4S(vecD dst, vecD src1, vecD src2)
18615 %{
18616   predicate(n->as_Vector()->length() == 4 &&
18617             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
18618             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18619   match(Set dst (VectorMaskCmp src1 src2));
18620   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (4S)" %}
18621   ins_cost(INSN_COST);
18622   ins_encode %{
18623     __ cmgt(as_FloatRegister($dst$$reg), __ T4H,
18624             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18625   %}
18626   ins_pipe(vdop64);
18627 %}
18628 
18629 instruct vcmlt8S(vecX dst, vecX src1, vecX src2)
18630 %{
18631   predicate(n->as_Vector()->length() == 8 &&
18632             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
18633             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18634   match(Set dst (VectorMaskCmp src1 src2));
18635   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (8S)" %}
18636   ins_cost(INSN_COST);
18637   ins_encode %{
18638     __ cmgt(as_FloatRegister($dst$$reg), __ T8H,
18639             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18640   %}
18641   ins_pipe(vdop128);
18642 %}
18643 
18644 instruct vcmlt2I(vecD dst, vecD src1, vecD src2)
18645 %{
18646   predicate(n->as_Vector()->length() == 2 &&
18647             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
18648             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18649   match(Set dst (VectorMaskCmp src1 src2));
18650   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (2I)" %}
18651   ins_cost(INSN_COST);
18652   ins_encode %{
18653     __ cmgt(as_FloatRegister($dst$$reg), __ T2S,
18654             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18655   %}
18656   ins_pipe(vdop64);
18657 %}
18658 
18659 instruct vcmlt4I(vecX dst, vecX src1, vecX src2)
18660 %{
18661   predicate(n->as_Vector()->length() == 4 &&
18662             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
18663             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18664   match(Set dst (VectorMaskCmp src1 src2));
18665   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (4I)" %}
18666   ins_cost(INSN_COST);
18667   ins_encode %{
18668     __ cmgt(as_FloatRegister($dst$$reg), __ T4S,
18669             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18670   %}
18671   ins_pipe(vdop128);
18672 %}
18673 
18674 instruct vcmlt2L(vecX dst, vecX src1, vecX src2)
18675 %{
18676   predicate(n->as_Vector()->length() == 2 &&
18677             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
18678             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
18679   match(Set dst (VectorMaskCmp src1 src2));
18680   format %{ "cmgt  $dst,$src2,$src1\t# vector cmp (2L)" %}
18681   ins_cost(INSN_COST);
18682   ins_encode %{
18683     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
18684             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18685   %}
18686   ins_pipe(vdop128);
18687 %}
18688 
18689 instruct vcmlt2F(vecD dst, vecD src1, vecD src2)
18690 %{
18691   predicate(n->as_Vector()->length() == 2 &&
18692             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
18693             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18694   match(Set dst (VectorMaskCmp src1 src2));
18695   format %{ "fcmgt  $dst,$src2,$src1\t# vector cmp (2F)" %}
18696   ins_cost(INSN_COST);
18697   ins_encode %{
18698     __ fcmgt(as_FloatRegister($dst$$reg), __ T2S,
18699              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18700   %}
18701   ins_pipe(vdop_fp64);
18702 %}
18703 
18704 instruct vcmlt4F(vecX dst, vecX src1, vecX src2)
18705 %{
18706   predicate(n->as_Vector()->length() == 4 &&
18707             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
18708             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18709   match(Set dst (VectorMaskCmp src1 src2));
18710   format %{ "fcmgt  $dst,$src2,$src1\t# vector cmp (4F)" %}
18711   ins_cost(INSN_COST);
18712   ins_encode %{
18713     __ fcmgt(as_FloatRegister($dst$$reg), __ T4S,
18714              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18715   %}
18716   ins_pipe(vdop_fp128);
18717 %}
18718 
18719 instruct vcmlt2D(vecX dst, vecX src1, vecX src2)
18720 %{
18721   predicate(n->as_Vector()->length() == 2 &&
18722             n->as_VectorMaskCmp()->get_predicate() == BoolTest::lt &&
18723             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18724   match(Set dst (VectorMaskCmp src1 src2));
18725   format %{ "fcmgt  $dst,$src2,$src1\t# vector cmp (2D)" %}
18726   ins_cost(INSN_COST);
18727   ins_encode %{
18728     __ fcmgt(as_FloatRegister($dst$$reg), __ T2D,
18729              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18730   %}
18731   ins_pipe(vdop_fp128);
18732 %}
18733 
18734 instruct vcmle8B(vecD dst, vecD src1, vecD src2)
18735 %{
18736   predicate(n->as_Vector()->length() == 8 &&
18737             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
18738             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18739   match(Set dst (VectorMaskCmp src1 src2));
18740   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (8B)" %}
18741   ins_cost(INSN_COST);
18742   ins_encode %{
18743     __ cmge(as_FloatRegister($dst$$reg), __ T8B,
18744             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18745   %}
18746   ins_pipe(vdop64);
18747 %}
18748 
18749 instruct vcmle16B(vecX dst, vecX src1, vecX src2)
18750 %{
18751   predicate(n->as_Vector()->length() == 16 &&
18752             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
18753             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18754   match(Set dst (VectorMaskCmp src1 src2));
18755   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (16B)" %}
18756   ins_cost(INSN_COST);
18757   ins_encode %{
18758     __ cmge(as_FloatRegister($dst$$reg), __ T16B,
18759             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18760   %}
18761   ins_pipe(vdop128);
18762 %}
18763 
18764 instruct vcmle4S(vecD dst, vecD src1, vecD src2)
18765 %{
18766   predicate(n->as_Vector()->length() == 4 &&
18767             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
18768             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18769   match(Set dst (VectorMaskCmp src1 src2));
18770   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (4S)" %}
18771   ins_cost(INSN_COST);
18772   ins_encode %{
18773     __ cmge(as_FloatRegister($dst$$reg), __ T4H,
18774             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18775   %}
18776   ins_pipe(vdop64);
18777 %}
18778 
18779 instruct vcmle8S(vecX dst, vecX src1, vecX src2)
18780 %{
18781   predicate(n->as_Vector()->length() == 8 &&
18782             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
18783             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18784   match(Set dst (VectorMaskCmp src1 src2));
18785   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (8S)" %}
18786   ins_cost(INSN_COST);
18787   ins_encode %{
18788     __ cmge(as_FloatRegister($dst$$reg), __ T8H,
18789             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18790   %}
18791   ins_pipe(vdop128);
18792 %}
18793 
18794 instruct vcmle2I(vecD dst, vecD src1, vecD src2)
18795 %{
18796   predicate(n->as_Vector()->length() == 2 &&
18797             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
18798             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18799   match(Set dst (VectorMaskCmp src1 src2));
18800   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (2I)" %}
18801   ins_cost(INSN_COST);
18802   ins_encode %{
18803     __ cmge(as_FloatRegister($dst$$reg), __ T2S,
18804             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18805   %}
18806   ins_pipe(vdop64);
18807 %}
18808 
18809 instruct vcmle4I(vecX dst, vecX src1, vecX src2)
18810 %{
18811   predicate(n->as_Vector()->length() == 4 &&
18812             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
18813             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18814   match(Set dst (VectorMaskCmp src1 src2));
18815   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (4I)" %}
18816   ins_cost(INSN_COST);
18817   ins_encode %{
18818     __ cmge(as_FloatRegister($dst$$reg), __ T4S,
18819             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18820   %}
18821   ins_pipe(vdop128);
18822 %}
18823 
18824 instruct vcmle2L(vecX dst, vecX src1, vecX src2)
18825 %{
18826   predicate(n->as_Vector()->length() == 2 &&
18827             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
18828             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
18829   match(Set dst (VectorMaskCmp src1 src2));
18830   format %{ "cmge  $dst,$src2,$src1\t# vector cmp (2L)" %}
18831   ins_cost(INSN_COST);
18832   ins_encode %{
18833     __ cmge(as_FloatRegister($dst$$reg), __ T2D,
18834             as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18835   %}
18836   ins_pipe(vdop128);
18837 %}
18838 
18839 instruct vcmle2F(vecD dst, vecD src1, vecD src2)
18840 %{
18841   predicate(n->as_Vector()->length() == 2 &&
18842             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
18843             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18844   match(Set dst (VectorMaskCmp src1 src2));
18845   format %{ "fcmge  $dst,$src2,$src1\t# vector cmp (2F)" %}
18846   ins_cost(INSN_COST);
18847   ins_encode %{
18848     __ fcmge(as_FloatRegister($dst$$reg), __ T2S,
18849              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18850   %}
18851   ins_pipe(vdop_fp64);
18852 %}
18853 
18854 instruct vcmle4F(vecX dst, vecX src1, vecX src2)
18855 %{
18856   predicate(n->as_Vector()->length() == 4 &&
18857             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
18858             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18859   match(Set dst (VectorMaskCmp src1 src2));
18860   format %{ "fcmge  $dst,$src2,$src1\t# vector cmp (4F)" %}
18861   ins_cost(INSN_COST);
18862   ins_encode %{
18863     __ fcmge(as_FloatRegister($dst$$reg), __ T4S,
18864              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18865   %}
18866   ins_pipe(vdop_fp128);
18867 %}
18868 
18869 instruct vcmle2D(vecX dst, vecX src1, vecX src2)
18870 %{
18871   predicate(n->as_Vector()->length() == 2 &&
18872             n->as_VectorMaskCmp()->get_predicate() == BoolTest::le &&
18873             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
18874   match(Set dst (VectorMaskCmp src1 src2));
18875   format %{ "fcmge  $dst,$src2,$src1\t# vector cmp (2D)" %}
18876   ins_cost(INSN_COST);
18877   ins_encode %{
18878     __ fcmge(as_FloatRegister($dst$$reg), __ T2D,
18879              as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
18880   %}
18881   ins_pipe(vdop_fp128);
18882 %}
18883 
18884 instruct vcmgt8B(vecD dst, vecD src1, vecD src2)
18885 %{
18886   predicate(n->as_Vector()->length() == 8 &&
18887             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
18888             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18889   match(Set dst (VectorMaskCmp src1 src2));
18890   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (8B)" %}
18891   ins_cost(INSN_COST);
18892   ins_encode %{
18893     __ cmgt(as_FloatRegister($dst$$reg), __ T8B,
18894             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18895   %}
18896   ins_pipe(vdop64);
18897 %}
18898 
18899 instruct vcmgt16B(vecX dst, vecX src1, vecX src2)
18900 %{
18901   predicate(n->as_Vector()->length() == 16 &&
18902             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
18903             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
18904   match(Set dst (VectorMaskCmp src1 src2));
18905   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (16B)" %}
18906   ins_cost(INSN_COST);
18907   ins_encode %{
18908     __ cmgt(as_FloatRegister($dst$$reg), __ T16B,
18909             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18910   %}
18911   ins_pipe(vdop128);
18912 %}
18913 
18914 instruct vcmgt4S(vecD dst, vecD src1, vecD src2)
18915 %{
18916   predicate(n->as_Vector()->length() == 4 &&
18917             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
18918             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18919   match(Set dst (VectorMaskCmp src1 src2));
18920   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (4S)" %}
18921   ins_cost(INSN_COST);
18922   ins_encode %{
18923     __ cmgt(as_FloatRegister($dst$$reg), __ T4H,
18924             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18925   %}
18926   ins_pipe(vdop64);
18927 %}
18928 
18929 instruct vcmgt8S(vecX dst, vecX src1, vecX src2)
18930 %{
18931   predicate(n->as_Vector()->length() == 8 &&
18932             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
18933             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
18934   match(Set dst (VectorMaskCmp src1 src2));
18935   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (8S)" %}
18936   ins_cost(INSN_COST);
18937   ins_encode %{
18938     __ cmgt(as_FloatRegister($dst$$reg), __ T8H,
18939             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18940   %}
18941   ins_pipe(vdop128);
18942 %}
18943 
18944 instruct vcmgt2I(vecD dst, vecD src1, vecD src2)
18945 %{
18946   predicate(n->as_Vector()->length() == 2 &&
18947             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
18948             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18949   match(Set dst (VectorMaskCmp src1 src2));
18950   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (2I)" %}
18951   ins_cost(INSN_COST);
18952   ins_encode %{
18953     __ cmgt(as_FloatRegister($dst$$reg), __ T2S,
18954             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18955   %}
18956   ins_pipe(vdop64);
18957 %}
18958 
18959 instruct vcmgt4I(vecX dst, vecX src1, vecX src2)
18960 %{
18961   predicate(n->as_Vector()->length() == 4 &&
18962             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
18963             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
18964   match(Set dst (VectorMaskCmp src1 src2));
18965   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (4I)" %}
18966   ins_cost(INSN_COST);
18967   ins_encode %{
18968     __ cmgt(as_FloatRegister($dst$$reg), __ T4S,
18969             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18970   %}
18971   ins_pipe(vdop128);
18972 %}
18973 
18974 instruct vcmgt2L(vecX dst, vecX src1, vecX src2)
18975 %{
18976   predicate(n->as_Vector()->length() == 2 &&
18977             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
18978             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
18979   match(Set dst (VectorMaskCmp src1 src2));
18980   format %{ "cmgt  $dst,$src1,$src2\t# vector cmp (2L)" %}
18981   ins_cost(INSN_COST);
18982   ins_encode %{
18983     __ cmgt(as_FloatRegister($dst$$reg), __ T2D,
18984             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
18985   %}
18986   ins_pipe(vdop128);
18987 %}
18988 
18989 instruct vcmgt2F(vecD dst, vecD src1, vecD src2)
18990 %{
18991   predicate(n->as_Vector()->length() == 2 &&
18992             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
18993             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
18994   match(Set dst (VectorMaskCmp src1 src2));
18995   format %{ "fcmgt  $dst,$src1,$src2\t# vector cmp (2F)" %}
18996   ins_cost(INSN_COST);
18997   ins_encode %{
18998     __ fcmgt(as_FloatRegister($dst$$reg), __ T2S,
18999              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19000   %}
19001   ins_pipe(vdop_fp64);
19002 %}
19003 
19004 instruct vcmgt4F(vecX dst, vecX src1, vecX src2)
19005 %{
19006   predicate(n->as_Vector()->length() == 4 &&
19007             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
19008             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19009   match(Set dst (VectorMaskCmp src1 src2));
19010   format %{ "fcmgt  $dst,$src1,$src2\t# vector cmp (4F)" %}
19011   ins_cost(INSN_COST);
19012   ins_encode %{
19013     __ fcmgt(as_FloatRegister($dst$$reg), __ T4S,
19014              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19015   %}
19016   ins_pipe(vdop_fp128);
19017 %}
19018 
19019 instruct vcmgt2D(vecX dst, vecX src1, vecX src2)
19020 %{
19021   predicate(n->as_Vector()->length() == 2 &&
19022             n->as_VectorMaskCmp()->get_predicate() == BoolTest::gt &&
19023             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19024   match(Set dst (VectorMaskCmp src1 src2));
19025   format %{ "fcmgt  $dst,$src1,$src2\t# vector cmp (2D)" %}
19026   ins_cost(INSN_COST);
19027   ins_encode %{
19028     __ fcmgt(as_FloatRegister($dst$$reg), __ T2D,
19029              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19030   %}
19031   ins_pipe(vdop_fp128);
19032 %}
19033 
19034 instruct vcmge8B(vecD dst, vecD src1, vecD src2)
19035 %{
19036   predicate(n->as_Vector()->length() == 8 &&
19037             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
19038             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19039   match(Set dst (VectorMaskCmp src1 src2));
19040   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (8B)" %}
19041   ins_cost(INSN_COST);
19042   ins_encode %{
19043     __ cmge(as_FloatRegister($dst$$reg), __ T8B,
19044             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19045   %}
19046   ins_pipe(vdop64);
19047 %}
19048 
19049 instruct vcmge16B(vecX dst, vecX src1, vecX src2)
19050 %{
19051   predicate(n->as_Vector()->length() == 16 &&
19052             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
19053             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19054   match(Set dst (VectorMaskCmp src1 src2));
19055   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (16B)" %}
19056   ins_cost(INSN_COST);
19057   ins_encode %{
19058     __ cmge(as_FloatRegister($dst$$reg), __ T16B,
19059             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19060   %}
19061   ins_pipe(vdop128);
19062 %}
19063 
19064 instruct vcmge4S(vecD dst, vecD src1, vecD src2)
19065 %{
19066   predicate(n->as_Vector()->length() == 4 &&
19067             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
19068             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19069   match(Set dst (VectorMaskCmp src1 src2));
19070   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (4S)" %}
19071   ins_cost(INSN_COST);
19072   ins_encode %{
19073     __ cmge(as_FloatRegister($dst$$reg), __ T4H,
19074             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19075   %}
19076   ins_pipe(vdop64);
19077 %}
19078 
19079 instruct vcmge8S(vecX dst, vecX src1, vecX src2)
19080 %{
19081   predicate(n->as_Vector()->length() == 8 &&
19082             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
19083             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19084   match(Set dst (VectorMaskCmp src1 src2));
19085   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (8S)" %}
19086   ins_cost(INSN_COST);
19087   ins_encode %{
19088     __ cmge(as_FloatRegister($dst$$reg), __ T8H,
19089             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19090   %}
19091   ins_pipe(vdop128);
19092 %}
19093 
19094 instruct vcmge2I(vecD dst, vecD src1, vecD src2)
19095 %{
19096   predicate(n->as_Vector()->length() == 2 &&
19097             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
19098             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19099   match(Set dst (VectorMaskCmp src1 src2));
19100   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (2I)" %}
19101   ins_cost(INSN_COST);
19102   ins_encode %{
19103     __ cmge(as_FloatRegister($dst$$reg), __ T2S,
19104             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19105   %}
19106   ins_pipe(vdop64);
19107 %}
19108 
19109 instruct vcmge4I(vecX dst, vecX src1, vecX src2)
19110 %{
19111   predicate(n->as_Vector()->length() == 4 &&
19112             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
19113             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_INT);
19114   match(Set dst (VectorMaskCmp src1 src2));
19115   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (4I)" %}
19116   ins_cost(INSN_COST);
19117   ins_encode %{
19118     __ cmge(as_FloatRegister($dst$$reg), __ T4S,
19119             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19120   %}
19121   ins_pipe(vdop128);
19122 %}
19123 
19124 instruct vcmge2L(vecX dst, vecX src1, vecX src2)
19125 %{
19126   predicate(n->as_Vector()->length() == 2 &&
19127             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
19128             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_LONG);
19129   match(Set dst (VectorMaskCmp src1 src2));
19130   format %{ "cmge  $dst,$src1,$src2\t# vector cmp (2L)" %}
19131   ins_cost(INSN_COST);
19132   ins_encode %{
19133     __ cmge(as_FloatRegister($dst$$reg), __ T2D,
19134             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19135   %}
19136   ins_pipe(vdop128);
19137 %}
19138 
19139 instruct vcmge2F(vecD dst, vecD src1, vecD src2)
19140 %{
19141   predicate(n->as_Vector()->length() == 2 &&
19142             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
19143             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19144   match(Set dst (VectorMaskCmp src1 src2));
19145   format %{ "fcmge  $dst,$src1,$src2\t# vector cmp (2F)" %}
19146   ins_cost(INSN_COST);
19147   ins_encode %{
19148     __ fcmge(as_FloatRegister($dst$$reg), __ T2S,
19149              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19150   %}
19151   ins_pipe(vdop_fp64);
19152 %}
19153 
19154 instruct vcmge4F(vecX dst, vecX src1, vecX src2)
19155 %{
19156   predicate(n->as_Vector()->length() == 4 &&
19157             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
19158             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
19159   match(Set dst (VectorMaskCmp src1 src2));
19160   format %{ "fcmge  $dst,$src1,$src2\t# vector cmp (4F)" %}
19161   ins_cost(INSN_COST);
19162   ins_encode %{
19163     __ fcmge(as_FloatRegister($dst$$reg), __ T4S,
19164              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19165   %}
19166   ins_pipe(vdop_fp128);
19167 %}
19168 
19169 instruct vcmge2D(vecX dst, vecX src1, vecX src2)
19170 %{
19171   predicate(n->as_Vector()->length() == 2 &&
19172             n->as_VectorMaskCmp()->get_predicate() == BoolTest::ge &&
19173             n->in(1)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
19174   match(Set dst (VectorMaskCmp src1 src2));
19175   format %{ "fcmge  $dst,$src1,$src2\t# vector cmp (2D)" %}
19176   ins_cost(INSN_COST);
19177   ins_encode %{
19178     __ fcmge(as_FloatRegister($dst$$reg), __ T2D,
19179              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
19180   %}
19181   ins_pipe(vdop_fp128);
19182 %}
19183 
19184 // --------------------------------- blend (bsl) ----------------------------
19185 
19186 instruct vbsl8B(vecD dst, vecD src1, vecD src2)
19187 %{
19188   predicate(n->as_Vector()->length_in_bytes() == 8);
19189   match(Set dst (VectorBlend (Binary src1 src2) dst));
19190   ins_cost(INSN_COST);
19191   format %{ "bsl  $dst,$src2,$src1\t# vector (8B)" %}
19192   ins_encode %{
19193     __ bsl(as_FloatRegister($dst$$reg), __ T8B,
19194            as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19195   %}
19196   ins_pipe(vlogical64);
19197 %}
19198 
19199 instruct vbsl16B(vecX dst, vecX src1, vecX src2)
19200 %{
19201   predicate(n->as_Vector()->length_in_bytes() == 16);
19202   match(Set dst (VectorBlend (Binary src1 src2) dst));
19203   ins_cost(INSN_COST);
19204   format %{ "bsl  $dst,$src2,$src1\t# vector (16B)" %}
19205   ins_encode %{
19206     __ bsl(as_FloatRegister($dst$$reg), __ T16B,
19207            as_FloatRegister($src2$$reg), as_FloatRegister($src1$$reg));
19208   %}
19209   ins_pipe(vlogical128);
19210 %}
19211 
19212 instruct loadmask8B(vecD dst, vecD src) %{
19213   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19214   match(Set dst (VectorLoadMask src));
19215   ins_cost(INSN_COST);
19216   format %{ "neg   $dst,$src\t# load mask (8B to 8B)" %}
19217   ins_encode %{
19218     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
19219   %}
19220   ins_pipe(vdop64);
19221 %}
19222 
19223 instruct loadmask16B(vecX dst, vecX src) %{
19224   predicate(n->as_Vector()->length() == 16 && n->bottom_type()->is_vect()->element_basic_type() == T_BYTE);
19225   match(Set dst (VectorLoadMask src));
19226   ins_cost(INSN_COST);
19227   format %{ "neg   $dst,$src\t# load mask (16B to 16B)" %}
19228   ins_encode %{
19229     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
19230   %}
19231   ins_pipe(vdop128);
19232 %}
19233 
19234 instruct loadmask4S(vecD dst, vecD src) %{
19235   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19236   match(Set dst (VectorLoadMask src));
19237   ins_cost(INSN_COST);
19238   format %{ "uxtl  $dst,$dst\n\t"
19239             "neg   $dst,$src\t# load mask (4B to 4S)" %}
19240   ins_encode %{
19241     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
19242     __ negr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($dst$$reg));
19243   %}
19244   ins_pipe(vdop64);
19245 %}
19246 
19247 instruct loadmask8S(vecX dst, vecD src) %{
19248   predicate(n->as_Vector()->length() == 8 && n->bottom_type()->is_vect()->element_basic_type() == T_SHORT);
19249   match(Set dst (VectorLoadMask src));
19250   ins_cost(INSN_COST);
19251   format %{ "uxtl  $dst,$dst\n\t"
19252             "neg   $dst,$src\t# load mask (8B to 8S)" %}
19253   ins_encode %{
19254     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
19255     __ negr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($dst$$reg));
19256   %}
19257   ins_pipe(vdop128);
19258 %}
19259 
19260 instruct loadmask2I(vecD dst, vecD src) %{
19261   predicate(n->as_Vector()->length() == 2 &&
19262             (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
19263              n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
19264   match(Set dst (VectorLoadMask src));
19265   ins_cost(INSN_COST);
19266   format %{ "uxtl  $dst,$src\n\t# 2B to 2S"
19267             "uxtl  $dst,$dst\n\t# 2S to 2I"
19268             "neg   $dst,$dst\t# load mask (2B to 2I)" %}
19269   ins_encode %{
19270     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
19271     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
19272     __ negr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($dst$$reg));
19273   %}
19274   ins_pipe(vdop128);
19275 %}
19276 
19277 instruct loadmask4I(vecX dst, vecD src) %{
19278   predicate(n->as_Vector()->length() == 4 &&
19279             (n->bottom_type()->is_vect()->element_basic_type() == T_INT ||
19280              n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT));
19281   match(Set dst (VectorLoadMask src));
19282   ins_cost(INSN_COST);
19283   format %{ "uxtl  $dst,$src\n\t# 4B to 4S"
19284             "uxtl  $dst,$dst\n\t# 4S to 4I"
19285             "neg   $dst,$dst\t# load mask (4B to 4I)" %}
19286   ins_encode %{
19287     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
19288     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
19289     __ negr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg));
19290   %}
19291   ins_pipe(vdop128);
19292 %}
19293 
19294 instruct loadmask2L(vecX dst, vecD src) %{
19295   predicate(n->as_Vector()->length() == 2 &&
19296             (n->bottom_type()->is_vect()->element_basic_type() == T_LONG ||
19297              n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE));
19298   match(Set dst (VectorLoadMask src));
19299   ins_cost(INSN_COST);
19300   format %{ "uxtl  $dst,$src\n\t# 2B to 2S"
19301             "uxtl  $dst,$dst\n\t# 2S to 2I"
19302             "uxtl  $dst,$dst\n\t# 2I to 2L"
19303             "neg   $dst,$dst\t# load mask (2B to 2L)" %}
19304   ins_encode %{
19305     __ uxtl(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg), __ T8B);
19306     __ uxtl(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($dst$$reg), __ T4H);
19307     __ uxtl(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($dst$$reg), __ T2S);
19308     __ negr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($dst$$reg));
19309   %}
19310   ins_pipe(vdop128);
19311 %}
19312 
19313 instruct storemask8B(vecD dst, vecD src) %{
19314   predicate(n->as_Vector()->length() == 8 &&
19315             static_cast<const VectorStoreMaskNode*>(n)->GetInputMaskSize() == 1);
19316   match(Set dst (VectorStoreMask src));
19317   ins_cost(INSN_COST);
19318   format %{ "negr  $dst,$src\t# store mask (8B to 8B)" %}
19319   ins_encode %{
19320     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
19321   %}
19322   ins_pipe(vdop64);
19323 %}
19324 
19325 instruct storemask16B(vecX dst, vecX src) %{
19326   predicate(n->as_Vector()->length() == 16 &&
19327             static_cast<const VectorStoreMaskNode*>(n)->GetInputMaskSize() == 1);
19328   match(Set dst (VectorStoreMask src));
19329   ins_cost(INSN_COST);
19330   format %{ "negr  $dst,$src\t# store mask (16B to 16B)" %}
19331   ins_encode %{
19332     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
19333   %}
19334   ins_pipe(vdop128);
19335 %}
19336 
19337 instruct storemask4S(vecD dst, vecD src) %{
19338   predicate(n->as_Vector()->length() == 4 &&
19339             static_cast<const VectorStoreMaskNode*>(n)->GetInputMaskSize() == 2);
19340   match(Set dst (VectorStoreMask src));
19341   ins_cost(INSN_COST);
19342   format %{ "xtn  $dst,$src\n\t"
19343             "neg  $dst,$dst\t# store mask (4S to 4B)" %}
19344   ins_encode %{
19345     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
19346     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
19347   %}
19348   ins_pipe(vdop64);
19349 %}
19350 
19351 instruct storemask8S(vecD dst, vecX src) %{
19352   predicate(n->as_Vector()->length() == 8 &&
19353             static_cast<const VectorStoreMaskNode*>(n)->GetInputMaskSize() == 2);
19354   match(Set dst (VectorStoreMask src));
19355   ins_cost(INSN_COST);
19356   format %{ "xtn  $dst,$src\n\t"
19357             "neg  $dst,$dst\t# store mask (8S to 8B)" %}
19358   ins_encode %{
19359     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg), __ T8H);
19360     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
19361   %}
19362   ins_pipe(vdop128);
19363 %}
19364 
19365 instruct storemask2I(vecD dst, vecD src) %{
19366   predicate(n->as_Vector()->length() == 2 &&
19367             static_cast<const VectorStoreMaskNode*>(n)->GetInputMaskSize() == 4);
19368   match(Set dst (VectorStoreMask src));
19369   ins_cost(INSN_COST);
19370   format %{ "xtn  $dst,$src\n\t# 2I to 2S"
19371             "xtn  $dst,$dst\n\t# 2S to 2B"
19372             "neg  $dst,$dst\t# store mask (2I to 2B)" %}
19373   ins_encode %{
19374     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
19375     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
19376     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
19377   %}
19378   ins_pipe(vdop64);
19379 %}
19380 
19381 instruct storemask4I(vecD dst, vecX src) %{
19382   predicate(n->as_Vector()->length() == 4 &&
19383             static_cast<const VectorStoreMaskNode*>(n)->GetInputMaskSize() == 4);
19384   match(Set dst (VectorStoreMask src));
19385   ins_cost(INSN_COST);
19386   format %{ "xtn  $dst,$src\n\t# 4I to 4S"
19387             "xtn  $dst,$dst\n\t# 4S to 4B"
19388             "neg  $dst,$dst\t# store mask (4I to 4B)" %}
19389   ins_encode %{
19390     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg), __ T4S);
19391     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
19392     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
19393   %}
19394   ins_pipe(vdop128);
19395 %}
19396 
19397 instruct storemask2L(vecD dst, vecX src) %{
19398   predicate(n->as_Vector()->length() == 2 &&
19399             static_cast<const VectorStoreMaskNode*>(n)->GetInputMaskSize() == 8);
19400   match(Set dst (VectorStoreMask src));
19401   ins_cost(INSN_COST);
19402   format %{ "xtn  $dst,$src\n\t# 2L to 2I"
19403             "xtn  $dst,$dst\n\t# 2I to 2S"
19404             "xtn  $dst,$dst\n\t# 2S to 2B"
19405             "neg  $dst,$dst\t# store mask (2L to 2B)" %}
19406   ins_encode %{
19407     __ xtn(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg), __ T2D);
19408     __ xtn(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($dst$$reg), __ T4S);
19409     __ xtn(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg), __ T8H);
19410     __ negr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($dst$$reg));
19411   %}
19412   ins_pipe(vdop128);
19413 %}
19414 
19415 // ------------------------------ Shift ---------------------------------------
19416 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
19417   predicate(n->as_Vector()->length_in_bytes() == 4 ||
19418             n->as_Vector()->length_in_bytes() == 8);
19419   match(Set dst (LShiftCntV cnt));
19420   match(Set dst (RShiftCntV cnt));
19421   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
19422   ins_encode %{
19423     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
19424   %}
19425   ins_pipe(vdup_reg_reg64);
19426 %}
19427 
19428 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
19429   predicate(n->as_Vector()->length_in_bytes() == 16);
19430   match(Set dst (LShiftCntV cnt));
19431   match(Set dst (RShiftCntV cnt));
19432   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
19433   ins_encode %{
19434     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
19435   %}
19436   ins_pipe(vdup_reg_reg128);
19437 %}
19438 
19439 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
19440   predicate(n->as_Vector()->length() == 4 ||
19441             n->as_Vector()->length() == 8);
19442   match(Set dst (LShiftVB src shift));
19443   ins_cost(INSN_COST);
19444   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
19445   ins_encode %{
19446     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
19447             as_FloatRegister($src$$reg),
19448             as_FloatRegister($shift$$reg));
19449   %}
19450   ins_pipe(vshift64);
19451 %}
19452 
19453 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
19454   predicate(n->as_Vector()->length() == 16);
19455   match(Set dst (LShiftVB src shift));
19456   ins_cost(INSN_COST);
19457   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
19458   ins_encode %{
19459     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
19460             as_FloatRegister($src$$reg),
19461             as_FloatRegister($shift$$reg));
19462   %}
19463   ins_pipe(vshift128);
19464 %}
19465 
19466 // Right shifts with vector shift count on aarch64 SIMD are implemented
19467 // as left shift by negative shift count.
19468 // There are two cases for vector shift count.
19469 //
19470 // Case 1: The vector shift count is from replication.
19471 //        |            |
19472 //    LoadVector  RShiftCntV
19473 //        |       /
19474 //     RShiftVI
19475 // Note: In inner loop, multiple neg instructions are used, which can be
19476 // moved to outer loop and merge into one neg instruction.
19477 //
19478 // Case 2: The vector shift count is from loading.
19479 // This case isn't supported by middle-end now. But it's supported by
19480 // panama/vectorIntrinsics(JEP 338: Vector API).
19481 //        |            |
19482 //    LoadVector  LoadVector
19483 //        |       /
19484 //     RShiftVI
19485 //
19486 
19487 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
19488   predicate(n->as_Vector()->length() == 4 ||
19489             n->as_Vector()->length() == 8);
19490   match(Set dst (RShiftVB src shift));
19491   ins_cost(INSN_COST);
19492   effect(TEMP tmp);
19493   format %{ "negr  $tmp,$shift\t"
19494             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
19495   ins_encode %{
19496     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
19497             as_FloatRegister($shift$$reg));
19498     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
19499             as_FloatRegister($src$$reg),
19500             as_FloatRegister($tmp$$reg));
19501   %}
19502   ins_pipe(vshift64);
19503 %}
19504 
19505 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
19506   predicate(n->as_Vector()->length() == 16);
19507   match(Set dst (RShiftVB src shift));
19508   ins_cost(INSN_COST);
19509   effect(TEMP tmp);
19510   format %{ "negr  $tmp,$shift\t"
19511             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
19512   ins_encode %{
19513     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
19514             as_FloatRegister($shift$$reg));
19515     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
19516             as_FloatRegister($src$$reg),
19517             as_FloatRegister($tmp$$reg));
19518   %}
19519   ins_pipe(vshift128);
19520 %}
19521 
19522 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
19523   predicate(n->as_Vector()->length() == 4 ||
19524             n->as_Vector()->length() == 8);
19525   match(Set dst (URShiftVB src shift));
19526   ins_cost(INSN_COST);
19527   effect(TEMP tmp);
19528   format %{ "negr  $tmp,$shift\t"
19529             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
19530   ins_encode %{
19531     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
19532             as_FloatRegister($shift$$reg));
19533     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
19534             as_FloatRegister($src$$reg),
19535             as_FloatRegister($tmp$$reg));
19536   %}
19537   ins_pipe(vshift64);
19538 %}
19539 
19540 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
19541   predicate(n->as_Vector()->length() == 16);
19542   match(Set dst (URShiftVB src shift));
19543   ins_cost(INSN_COST);
19544   effect(TEMP tmp);
19545   format %{ "negr  $tmp,$shift\t"
19546             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
19547   ins_encode %{
19548     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
19549             as_FloatRegister($shift$$reg));
19550     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
19551             as_FloatRegister($src$$reg),
19552             as_FloatRegister($tmp$$reg));
19553   %}
19554   ins_pipe(vshift128);
19555 %}
19556 
19557 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
19558   predicate(n->as_Vector()->length() == 4 ||
19559             n->as_Vector()->length() == 8);
19560   match(Set dst (LShiftVB src shift));
19561   ins_cost(INSN_COST);
19562   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
19563   ins_encode %{
19564     int sh = (int)$shift$$constant;
19565     if (sh >= 8) {
19566       __ eor(as_FloatRegister($dst$$reg), __ T8B,
19567              as_FloatRegister($src$$reg),
19568              as_FloatRegister($src$$reg));
19569     } else {
19570       __ shl(as_FloatRegister($dst$$reg), __ T8B,
19571              as_FloatRegister($src$$reg), sh);
19572     }
19573   %}
19574   ins_pipe(vshift64_imm);
19575 %}
19576 
19577 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
19578   predicate(n->as_Vector()->length() == 16);
19579   match(Set dst (LShiftVB src shift));
19580   ins_cost(INSN_COST);
19581   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
19582   ins_encode %{
19583     int sh = (int)$shift$$constant;
19584     if (sh >= 8) {
19585       __ eor(as_FloatRegister($dst$$reg), __ T16B,
19586              as_FloatRegister($src$$reg),
19587              as_FloatRegister($src$$reg));
19588     } else {
19589       __ shl(as_FloatRegister($dst$$reg), __ T16B,
19590              as_FloatRegister($src$$reg), sh);
19591     }
19592   %}
19593   ins_pipe(vshift128_imm);
19594 %}
19595 
19596 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
19597   predicate(n->as_Vector()->length() == 4 ||
19598             n->as_Vector()->length() == 8);
19599   match(Set dst (RShiftVB src shift));
19600   ins_cost(INSN_COST);
19601   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
19602   ins_encode %{
19603     int sh = (int)$shift$$constant;
19604     if (sh >= 8) sh = 7;
19605     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
19606            as_FloatRegister($src$$reg), sh);
19607   %}
19608   ins_pipe(vshift64_imm);
19609 %}
19610 
19611 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
19612   predicate(n->as_Vector()->length() == 16);
19613   match(Set dst (RShiftVB src shift));
19614   ins_cost(INSN_COST);
19615   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
19616   ins_encode %{
19617     int sh = (int)$shift$$constant;
19618     if (sh >= 8) sh = 7;
19619     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
19620            as_FloatRegister($src$$reg), sh);
19621   %}
19622   ins_pipe(vshift128_imm);
19623 %}
19624 
19625 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
19626   predicate(n->as_Vector()->length() == 4 ||
19627             n->as_Vector()->length() == 8);
19628   match(Set dst (URShiftVB src shift));
19629   ins_cost(INSN_COST);
19630   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
19631   ins_encode %{
19632     int sh = (int)$shift$$constant;
19633     if (sh >= 8) {
19634       __ eor(as_FloatRegister($dst$$reg), __ T8B,
19635              as_FloatRegister($src$$reg),
19636              as_FloatRegister($src$$reg));
19637     } else {
19638       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
19639              as_FloatRegister($src$$reg), sh);
19640     }
19641   %}
19642   ins_pipe(vshift64_imm);
19643 %}
19644 
19645 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
19646   predicate(n->as_Vector()->length() == 16);
19647   match(Set dst (URShiftVB src shift));
19648   ins_cost(INSN_COST);
19649   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
19650   ins_encode %{
19651     int sh = (int)$shift$$constant;
19652     if (sh >= 8) {
19653       __ eor(as_FloatRegister($dst$$reg), __ T16B,
19654              as_FloatRegister($src$$reg),
19655              as_FloatRegister($src$$reg));
19656     } else {
19657       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
19658              as_FloatRegister($src$$reg), sh);
19659     }
19660   %}
19661   ins_pipe(vshift128_imm);
19662 %}
19663 
19664 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
19665   predicate(n->as_Vector()->length() == 2 ||
19666             n->as_Vector()->length() == 4);
19667   match(Set dst (LShiftVS src shift));
19668   ins_cost(INSN_COST);
19669   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
19670   ins_encode %{
19671     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
19672             as_FloatRegister($src$$reg),
19673             as_FloatRegister($shift$$reg));
19674   %}
19675   ins_pipe(vshift64);
19676 %}
19677 
19678 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
19679   predicate(n->as_Vector()->length() == 8);
19680   match(Set dst (LShiftVS src shift));
19681   ins_cost(INSN_COST);
19682   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
19683   ins_encode %{
19684     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
19685             as_FloatRegister($src$$reg),
19686             as_FloatRegister($shift$$reg));
19687   %}
19688   ins_pipe(vshift128);
19689 %}
19690 
19691 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
19692   predicate(n->as_Vector()->length() == 2 ||
19693             n->as_Vector()->length() == 4);
19694   match(Set dst (RShiftVS src shift));
19695   ins_cost(INSN_COST);
19696   effect(TEMP tmp);
19697   format %{ "negr  $tmp,$shift\t"
19698             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
19699   ins_encode %{
19700     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
19701             as_FloatRegister($shift$$reg));
19702     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
19703             as_FloatRegister($src$$reg),
19704             as_FloatRegister($tmp$$reg));
19705   %}
19706   ins_pipe(vshift64);
19707 %}
19708 
19709 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
19710   predicate(n->as_Vector()->length() == 8);
19711   match(Set dst (RShiftVS src shift));
19712   ins_cost(INSN_COST);
19713   effect(TEMP tmp);
19714   format %{ "negr  $tmp,$shift\t"
19715             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
19716   ins_encode %{
19717     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
19718             as_FloatRegister($shift$$reg));
19719     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
19720             as_FloatRegister($src$$reg),
19721             as_FloatRegister($tmp$$reg));
19722   %}
19723   ins_pipe(vshift128);
19724 %}
19725 
19726 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
19727   predicate(n->as_Vector()->length() == 2 ||
19728             n->as_Vector()->length() == 4);
19729   match(Set dst (URShiftVS src shift));
19730   ins_cost(INSN_COST);
19731   effect(TEMP tmp);
19732   format %{ "negr  $tmp,$shift\t"
19733             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
19734   ins_encode %{
19735     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
19736             as_FloatRegister($shift$$reg));
19737     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
19738             as_FloatRegister($src$$reg),
19739             as_FloatRegister($tmp$$reg));
19740   %}
19741   ins_pipe(vshift64);
19742 %}
19743 
19744 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
19745   predicate(n->as_Vector()->length() == 8);
19746   match(Set dst (URShiftVS src shift));
19747   ins_cost(INSN_COST);
19748   effect(TEMP tmp);
19749   format %{ "negr  $tmp,$shift\t"
19750             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
19751   ins_encode %{
19752     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
19753             as_FloatRegister($shift$$reg));
19754     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
19755             as_FloatRegister($src$$reg),
19756             as_FloatRegister($tmp$$reg));
19757   %}
19758   ins_pipe(vshift128);
19759 %}
19760 
19761 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
19762   predicate(n->as_Vector()->length() == 2 ||
19763             n->as_Vector()->length() == 4);
19764   match(Set dst (LShiftVS src shift));
19765   ins_cost(INSN_COST);
19766   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
19767   ins_encode %{
19768     int sh = (int)$shift$$constant;
19769     if (sh >= 16) {
19770       __ eor(as_FloatRegister($dst$$reg), __ T8B,
19771              as_FloatRegister($src$$reg),
19772              as_FloatRegister($src$$reg));
19773     } else {
19774       __ shl(as_FloatRegister($dst$$reg), __ T4H,
19775              as_FloatRegister($src$$reg), sh);
19776     }
19777   %}
19778   ins_pipe(vshift64_imm);
19779 %}
19780 
19781 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
19782   predicate(n->as_Vector()->length() == 8);
19783   match(Set dst (LShiftVS src shift));
19784   ins_cost(INSN_COST);
19785   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
19786   ins_encode %{
19787     int sh = (int)$shift$$constant;
19788     if (sh >= 16) {
19789       __ eor(as_FloatRegister($dst$$reg), __ T16B,
19790              as_FloatRegister($src$$reg),
19791              as_FloatRegister($src$$reg));
19792     } else {
19793       __ shl(as_FloatRegister($dst$$reg), __ T8H,
19794              as_FloatRegister($src$$reg), sh);
19795     }
19796   %}
19797   ins_pipe(vshift128_imm);
19798 %}
19799 
19800 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
19801   predicate(n->as_Vector()->length() == 2 ||
19802             n->as_Vector()->length() == 4);
19803   match(Set dst (RShiftVS src shift));
19804   ins_cost(INSN_COST);
19805   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
19806   ins_encode %{
19807     int sh = (int)$shift$$constant;
19808     if (sh >= 16) sh = 15;
19809     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
19810            as_FloatRegister($src$$reg), sh);
19811   %}
19812   ins_pipe(vshift64_imm);
19813 %}
19814 
19815 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
19816   predicate(n->as_Vector()->length() == 8);
19817   match(Set dst (RShiftVS src shift));
19818   ins_cost(INSN_COST);
19819   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
19820   ins_encode %{
19821     int sh = (int)$shift$$constant;
19822     if (sh >= 16) sh = 15;
19823     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
19824            as_FloatRegister($src$$reg), sh);
19825   %}
19826   ins_pipe(vshift128_imm);
19827 %}
19828 
19829 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
19830   predicate(n->as_Vector()->length() == 2 ||
19831             n->as_Vector()->length() == 4);
19832   match(Set dst (URShiftVS src shift));
19833   ins_cost(INSN_COST);
19834   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
19835   ins_encode %{
19836     int sh = (int)$shift$$constant;
19837     if (sh >= 16) {
19838       __ eor(as_FloatRegister($dst$$reg), __ T8B,
19839              as_FloatRegister($src$$reg),
19840              as_FloatRegister($src$$reg));
19841     } else {
19842       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
19843              as_FloatRegister($src$$reg), sh);
19844     }
19845   %}
19846   ins_pipe(vshift64_imm);
19847 %}
19848 
19849 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
19850   predicate(n->as_Vector()->length() == 8);
19851   match(Set dst (URShiftVS src shift));
19852   ins_cost(INSN_COST);
19853   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
19854   ins_encode %{
19855     int sh = (int)$shift$$constant;
19856     if (sh >= 16) {
19857       __ eor(as_FloatRegister($dst$$reg), __ T16B,
19858              as_FloatRegister($src$$reg),
19859              as_FloatRegister($src$$reg));
19860     } else {
19861       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
19862              as_FloatRegister($src$$reg), sh);
19863     }
19864   %}
19865   ins_pipe(vshift128_imm);
19866 %}
19867 
19868 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
19869   predicate(n->as_Vector()->length() == 2);
19870   match(Set dst (LShiftVI src shift));
19871   ins_cost(INSN_COST);
19872   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
19873   ins_encode %{
19874     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
19875             as_FloatRegister($src$$reg),
19876             as_FloatRegister($shift$$reg));
19877   %}
19878   ins_pipe(vshift64);
19879 %}
19880 
19881 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
19882   predicate(n->as_Vector()->length() == 4);
19883   match(Set dst (LShiftVI src shift));
19884   ins_cost(INSN_COST);
19885   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
19886   ins_encode %{
19887     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
19888             as_FloatRegister($src$$reg),
19889             as_FloatRegister($shift$$reg));
19890   %}
19891   ins_pipe(vshift128);
19892 %}
19893 
19894 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
19895   predicate(n->as_Vector()->length() == 2);
19896   match(Set dst (RShiftVI src shift));
19897   ins_cost(INSN_COST);
19898   effect(TEMP tmp);
19899   format %{ "negr  $tmp,$shift\t"
19900             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
19901   ins_encode %{
19902     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
19903             as_FloatRegister($shift$$reg));
19904     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
19905             as_FloatRegister($src$$reg),
19906             as_FloatRegister($tmp$$reg));
19907   %}
19908   ins_pipe(vshift64);
19909 %}
19910 
19911 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
19912   predicate(n->as_Vector()->length() == 4);
19913   match(Set dst (RShiftVI src shift));
19914   ins_cost(INSN_COST);
19915   effect(TEMP tmp);
19916   format %{ "negr  $tmp,$shift\t"
19917             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
19918   ins_encode %{
19919     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
19920             as_FloatRegister($shift$$reg));
19921     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
19922             as_FloatRegister($src$$reg),
19923             as_FloatRegister($tmp$$reg));
19924   %}
19925   ins_pipe(vshift128);
19926 %}
19927 
19928 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
19929   predicate(n->as_Vector()->length() == 2);
19930   match(Set dst (URShiftVI src shift));
19931   ins_cost(INSN_COST);
19932   effect(TEMP tmp);
19933   format %{ "negr  $tmp,$shift\t"
19934             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
19935   ins_encode %{
19936     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
19937             as_FloatRegister($shift$$reg));
19938     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
19939             as_FloatRegister($src$$reg),
19940             as_FloatRegister($tmp$$reg));
19941   %}
19942   ins_pipe(vshift64);
19943 %}
19944 
19945 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
19946   predicate(n->as_Vector()->length() == 4);
19947   match(Set dst (URShiftVI src shift));
19948   ins_cost(INSN_COST);
19949   effect(TEMP tmp);
19950   format %{ "negr  $tmp,$shift\t"
19951             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
19952   ins_encode %{
19953     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
19954             as_FloatRegister($shift$$reg));
19955     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
19956             as_FloatRegister($src$$reg),
19957             as_FloatRegister($tmp$$reg));
19958   %}
19959   ins_pipe(vshift128);
19960 %}
19961 
19962 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
19963   predicate(n->as_Vector()->length() == 2);
19964   match(Set dst (LShiftVI src shift));
19965   ins_cost(INSN_COST);
19966   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
19967   ins_encode %{
19968     __ shl(as_FloatRegister($dst$$reg), __ T2S,
19969            as_FloatRegister($src$$reg),
19970            (int)$shift$$constant);
19971   %}
19972   ins_pipe(vshift64_imm);
19973 %}
19974 
19975 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
19976   predicate(n->as_Vector()->length() == 4);
19977   match(Set dst (LShiftVI src shift));
19978   ins_cost(INSN_COST);
19979   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
19980   ins_encode %{
19981     __ shl(as_FloatRegister($dst$$reg), __ T4S,
19982            as_FloatRegister($src$$reg),
19983            (int)$shift$$constant);
19984   %}
19985   ins_pipe(vshift128_imm);
19986 %}
19987 
19988 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
19989   predicate(n->as_Vector()->length() == 2);
19990   match(Set dst (RShiftVI src shift));
19991   ins_cost(INSN_COST);
19992   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
19993   ins_encode %{
19994     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
19995             as_FloatRegister($src$$reg),
19996             (int)$shift$$constant);
19997   %}
19998   ins_pipe(vshift64_imm);
19999 %}
20000 
20001 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
20002   predicate(n->as_Vector()->length() == 4);
20003   match(Set dst (RShiftVI src shift));
20004   ins_cost(INSN_COST);
20005   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
20006   ins_encode %{
20007     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
20008             as_FloatRegister($src$$reg),
20009             (int)$shift$$constant);
20010   %}
20011   ins_pipe(vshift128_imm);
20012 %}
20013 
20014 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
20015   predicate(n->as_Vector()->length() == 2);
20016   match(Set dst (URShiftVI src shift));
20017   ins_cost(INSN_COST);
20018   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
20019   ins_encode %{
20020     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
20021             as_FloatRegister($src$$reg),
20022             (int)$shift$$constant);
20023   %}
20024   ins_pipe(vshift64_imm);
20025 %}
20026 
20027 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
20028   predicate(n->as_Vector()->length() == 4);
20029   match(Set dst (URShiftVI src shift));
20030   ins_cost(INSN_COST);
20031   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
20032   ins_encode %{
20033     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
20034             as_FloatRegister($src$$reg),
20035             (int)$shift$$constant);
20036   %}
20037   ins_pipe(vshift128_imm);
20038 %}
20039 
20040 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
20041   predicate(n->as_Vector()->length() == 2);
20042   match(Set dst (LShiftVL src shift));
20043   ins_cost(INSN_COST);
20044   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
20045   ins_encode %{
20046     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
20047             as_FloatRegister($src$$reg),
20048             as_FloatRegister($shift$$reg));
20049   %}
20050   ins_pipe(vshift128);
20051 %}
20052 
20053 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
20054   predicate(n->as_Vector()->length() == 2);
20055   match(Set dst (RShiftVL src shift));
20056   ins_cost(INSN_COST);
20057   effect(TEMP tmp);
20058   format %{ "negr  $tmp,$shift\t"
20059             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
20060   ins_encode %{
20061     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
20062             as_FloatRegister($shift$$reg));
20063     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
20064             as_FloatRegister($src$$reg),
20065             as_FloatRegister($tmp$$reg));
20066   %}
20067   ins_pipe(vshift128);
20068 %}
20069 
20070 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
20071   predicate(n->as_Vector()->length() == 2);
20072   match(Set dst (URShiftVL src shift));
20073   ins_cost(INSN_COST);
20074   effect(TEMP tmp);
20075   format %{ "negr  $tmp,$shift\t"
20076             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
20077   ins_encode %{
20078     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
20079             as_FloatRegister($shift$$reg));
20080     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
20081             as_FloatRegister($src$$reg),
20082             as_FloatRegister($tmp$$reg));
20083   %}
20084   ins_pipe(vshift128);
20085 %}
20086 
20087 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
20088   predicate(n->as_Vector()->length() == 2);
20089   match(Set dst (LShiftVL src shift));
20090   ins_cost(INSN_COST);
20091   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
20092   ins_encode %{
20093     __ shl(as_FloatRegister($dst$$reg), __ T2D,
20094            as_FloatRegister($src$$reg),
20095            (int)$shift$$constant);
20096   %}
20097   ins_pipe(vshift128_imm);
20098 %}
20099 
20100 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
20101   predicate(n->as_Vector()->length() == 2);
20102   match(Set dst (RShiftVL src shift));
20103   ins_cost(INSN_COST);
20104   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
20105   ins_encode %{
20106     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
20107             as_FloatRegister($src$$reg),
20108             (int)$shift$$constant);
20109   %}
20110   ins_pipe(vshift128_imm);
20111 %}
20112 
20113 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
20114   predicate(n->as_Vector()->length() == 2);
20115   match(Set dst (URShiftVL src shift));
20116   ins_cost(INSN_COST);
20117   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
20118   ins_encode %{
20119     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
20120             as_FloatRegister($src$$reg),
20121             (int)$shift$$constant);
20122   %}
20123   ins_pipe(vshift128_imm);
20124 %}
20125 
20126 //----------PEEPHOLE RULES-----------------------------------------------------
20127 // These must follow all instruction definitions as they use the names
20128 // defined in the instructions definitions.
20129 //
20130 // peepmatch ( root_instr_name [preceding_instruction]* );
20131 //
20132 // peepconstraint %{
20133 // (instruction_number.operand_name relational_op instruction_number.operand_name
20134 //  [, ...] );
20135 // // instruction numbers are zero-based using left to right order in peepmatch
20136 //
20137 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
20138 // // provide an instruction_number.operand_name for each operand that appears
20139 // // in the replacement instruction's match rule
20140 //
20141 // ---------VM FLAGS---------------------------------------------------------
20142 //
20143 // All peephole optimizations can be turned off using -XX:-OptoPeephole
20144 //
20145 // Each peephole rule is given an identifying number starting with zero and
20146 // increasing by one in the order seen by the parser.  An individual peephole
20147 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
20148 // on the command-line.
20149 //
20150 // ---------CURRENT LIMITATIONS----------------------------------------------
20151 //
20152 // Only match adjacent instructions in same basic block
20153 // Only equality constraints
20154 // Only constraints between operands, not (0.dest_reg == RAX_enc)
20155 // Only one replacement instruction
20156 //
20157 // ---------EXAMPLE----------------------------------------------------------
20158 //
20159 // // pertinent parts of existing instructions in architecture description
20160 // instruct movI(iRegINoSp dst, iRegI src)
20161 // %{
20162 //   match(Set dst (CopyI src));
20163 // %}
20164 //
20165 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
20166 // %{
20167 //   match(Set dst (AddI dst src));
20168 //   effect(KILL cr);
20169 // %}
20170 //
20171 // // Change (inc mov) to lea
20172 // peephole %{
20173 //   // increment preceeded by register-register move
20174 //   peepmatch ( incI_iReg movI );
20175 //   // require that the destination register of the increment
20176 //   // match the destination register of the move
20177 //   peepconstraint ( 0.dst == 1.dst );
20178 //   // construct a replacement instruction that sets
20179 //   // the destination to ( move's source register + one )
20180 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
20181 // %}
20182 //
20183 
20184 // Implementation no longer uses movX instructions since
20185 // machine-independent system no longer uses CopyX nodes.
20186 //
20187 // peephole
20188 // %{
20189 //   peepmatch (incI_iReg movI);
20190 //   peepconstraint (0.dst == 1.dst);
20191 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
20192 // %}
20193 
20194 // peephole
20195 // %{
20196 //   peepmatch (decI_iReg movI);
20197 //   peepconstraint (0.dst == 1.dst);
20198 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
20199 // %}
20200 
20201 // peephole
20202 // %{
20203 //   peepmatch (addI_iReg_imm movI);
20204 //   peepconstraint (0.dst == 1.dst);
20205 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
20206 // %}
20207 
20208 // peephole
20209 // %{
20210 //   peepmatch (incL_iReg movL);
20211 //   peepconstraint (0.dst == 1.dst);
20212 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
20213 // %}
20214 
20215 // peephole
20216 // %{
20217 //   peepmatch (decL_iReg movL);
20218 //   peepconstraint (0.dst == 1.dst);
20219 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
20220 // %}
20221 
20222 // peephole
20223 // %{
20224 //   peepmatch (addL_iReg_imm movL);
20225 //   peepconstraint (0.dst == 1.dst);
20226 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
20227 // %}
20228 
20229 // peephole
20230 // %{
20231 //   peepmatch (addP_iReg_imm movP);
20232 //   peepconstraint (0.dst == 1.dst);
20233 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
20234 // %}
20235 
20236 // // Change load of spilled value to only a spill
20237 // instruct storeI(memory mem, iRegI src)
20238 // %{
20239 //   match(Set mem (StoreI mem src));
20240 // %}
20241 //
20242 // instruct loadI(iRegINoSp dst, memory mem)
20243 // %{
20244 //   match(Set dst (LoadI mem));
20245 // %}
20246 //
20247 
20248 //----------SMARTSPILL RULES---------------------------------------------------
20249 // These must follow all instruction definitions as they use the names
20250 // defined in the instructions definitions.
20251 
20252 // Local Variables:
20253 // mode: c++
20254 // End: