1 //
   2 // Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Class for 128 bit register v4
 961 reg_class v4_reg(
 962     V4, V4_H
 963 );
 964 
 965 // Class for 128 bit register v5
 966 reg_class v5_reg(
 967     V5, V5_H
 968 );
 969 
 970 // Class for 128 bit register v6
 971 reg_class v6_reg(
 972     V6, V6_H
 973 );
 974 
 975 // Class for 128 bit register v7
 976 reg_class v7_reg(
 977     V7, V7_H
 978 );
 979 
 980 // Class for 128 bit register v8
 981 reg_class v8_reg(
 982     V8, V8_H
 983 );
 984 
 985 // Class for 128 bit register v9
 986 reg_class v9_reg(
 987     V9, V9_H
 988 );
 989 
 990 // Class for 128 bit register v10
 991 reg_class v10_reg(
 992     V10, V10_H
 993 );
 994 
 995 // Class for 128 bit register v11
 996 reg_class v11_reg(
 997     V11, V11_H
 998 );
 999 
1000 // Class for 128 bit register v12
1001 reg_class v12_reg(
1002     V12, V12_H
1003 );
1004 
1005 // Class for 128 bit register v13
1006 reg_class v13_reg(
1007     V13, V13_H
1008 );
1009 
1010 // Class for 128 bit register v14
1011 reg_class v14_reg(
1012     V14, V14_H
1013 );
1014 
1015 // Class for 128 bit register v15
1016 reg_class v15_reg(
1017     V15, V15_H
1018 );
1019 
1020 // Class for 128 bit register v16
1021 reg_class v16_reg(
1022     V16, V16_H
1023 );
1024 
1025 // Class for 128 bit register v17
1026 reg_class v17_reg(
1027     V17, V17_H
1028 );
1029 
1030 // Class for 128 bit register v18
1031 reg_class v18_reg(
1032     V18, V18_H
1033 );
1034 
1035 // Class for 128 bit register v19
1036 reg_class v19_reg(
1037     V19, V19_H
1038 );
1039 
1040 // Class for 128 bit register v20
1041 reg_class v20_reg(
1042     V20, V20_H
1043 );
1044 
1045 // Class for 128 bit register v21
1046 reg_class v21_reg(
1047     V21, V21_H
1048 );
1049 
1050 // Class for 128 bit register v22
1051 reg_class v22_reg(
1052     V22, V22_H
1053 );
1054 
1055 // Class for 128 bit register v23
1056 reg_class v23_reg(
1057     V23, V23_H
1058 );
1059 
1060 // Class for 128 bit register v24
1061 reg_class v24_reg(
1062     V24, V24_H
1063 );
1064 
1065 // Class for 128 bit register v25
1066 reg_class v25_reg(
1067     V25, V25_H
1068 );
1069 
1070 // Class for 128 bit register v26
1071 reg_class v26_reg(
1072     V26, V26_H
1073 );
1074 
1075 // Class for 128 bit register v27
1076 reg_class v27_reg(
1077     V27, V27_H
1078 );
1079 
1080 // Class for 128 bit register v28
1081 reg_class v28_reg(
1082     V28, V28_H
1083 );
1084 
1085 // Class for 128 bit register v29
1086 reg_class v29_reg(
1087     V29, V29_H
1088 );
1089 
1090 // Class for 128 bit register v30
1091 reg_class v30_reg(
1092     V30, V30_H
1093 );
1094 
1095 // Class for 128 bit register v31
1096 reg_class v31_reg(
1097     V31, V31_H
1098 );
1099 
1100 // Singleton class for condition codes
1101 reg_class int_flags(RFLAGS);
1102 
1103 %}
1104 
1105 //----------DEFINITION BLOCK---------------------------------------------------
1106 // Define name --> value mappings to inform the ADLC of an integer valued name
1107 // Current support includes integer values in the range [0, 0x7FFFFFFF]
1108 // Format:
1109 //        int_def  <name>         ( <int_value>, <expression>);
1110 // Generated Code in ad_<arch>.hpp
1111 //        #define  <name>   (<expression>)
1112 //        // value == <int_value>
1113 // Generated code in ad_<arch>.cpp adlc_verification()
1114 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
1115 //
1116 
1117 // we follow the ppc-aix port in using a simple cost model which ranks
1118 // register operations as cheap, memory ops as more expensive and
1119 // branches as most expensive. the first two have a low as well as a
1120 // normal cost. huge cost appears to be a way of saying don't do
1121 // something
1122 
1123 definitions %{
1124   // The default cost (of a register move instruction).
1125   int_def INSN_COST            (    100,     100);
1126   int_def BRANCH_COST          (    200,     2 * INSN_COST);
1127   int_def CALL_COST            (    200,     2 * INSN_COST);
1128   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
1129 %}
1130 
1131 
1132 //----------SOURCE BLOCK-------------------------------------------------------
1133 // This is a block of C++ code which provides values, functions, and
1134 // definitions necessary in the rest of the architecture description
1135 
1136 source_hpp %{
1137 
1138 #include "asm/macroAssembler.hpp"
1139 #include "gc/shared/cardTable.hpp"
1140 #include "gc/shared/cardTableBarrierSet.hpp"
1141 #include "gc/shared/collectedHeap.hpp"
1142 #include "opto/addnode.hpp"
1143 
1144 class CallStubImpl {
1145 
1146   //--------------------------------------------------------------
1147   //---<  Used for optimization in Compile::shorten_branches  >---
1148   //--------------------------------------------------------------
1149 
1150  public:
1151   // Size of call trampoline stub.
1152   static uint size_call_trampoline() {
1153     return 0; // no call trampolines on this platform
1154   }
1155 
1156   // number of relocations needed by a call trampoline stub
1157   static uint reloc_call_trampoline() {
1158     return 0; // no call trampolines on this platform
1159   }
1160 };
1161 
1162 class HandlerImpl {
1163 
1164  public:
1165 
1166   static int emit_exception_handler(CodeBuffer &cbuf);
1167   static int emit_deopt_handler(CodeBuffer& cbuf);
1168 
1169   static uint size_exception_handler() {
1170     return MacroAssembler::far_branch_size();
1171   }
1172 
1173   static uint size_deopt_handler() {
1174     // count one adr and one far branch instruction
1175     return 4 * NativeInstruction::instruction_size;
1176   }
1177 };
1178 
1179  bool is_CAS(int opcode, bool maybe_volatile);
1180 
1181   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1182 
1183   bool unnecessary_acquire(const Node *barrier);
1184   bool needs_acquiring_load(const Node *load);
1185 
1186   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1187 
1188   bool unnecessary_release(const Node *barrier);
1189   bool unnecessary_volatile(const Node *barrier);
1190   bool needs_releasing_store(const Node *store);
1191 
1192   // predicate controlling translation of CompareAndSwapX
1193   bool needs_acquiring_load_exclusive(const Node *load);
1194 
1195   // predicate controlling addressing modes
1196   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1197 %}
1198 
1199 source %{
1200 
1201   // Optimizaton of volatile gets and puts
1202   // -------------------------------------
1203   //
1204   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1205   // use to implement volatile reads and writes. For a volatile read
1206   // we simply need
1207   //
1208   //   ldar<x>
1209   //
1210   // and for a volatile write we need
1211   //
1212   //   stlr<x>
1213   //
1214   // Alternatively, we can implement them by pairing a normal
1215   // load/store with a memory barrier. For a volatile read we need
1216   //
1217   //   ldr<x>
1218   //   dmb ishld
1219   //
1220   // for a volatile write
1221   //
1222   //   dmb ish
1223   //   str<x>
1224   //   dmb ish
1225   //
1226   // We can also use ldaxr and stlxr to implement compare and swap CAS
1227   // sequences. These are normally translated to an instruction
1228   // sequence like the following
1229   //
1230   //   dmb      ish
1231   // retry:
1232   //   ldxr<x>   rval raddr
1233   //   cmp       rval rold
1234   //   b.ne done
1235   //   stlxr<x>  rval, rnew, rold
1236   //   cbnz      rval retry
1237   // done:
1238   //   cset      r0, eq
1239   //   dmb ishld
1240   //
1241   // Note that the exclusive store is already using an stlxr
1242   // instruction. That is required to ensure visibility to other
1243   // threads of the exclusive write (assuming it succeeds) before that
1244   // of any subsequent writes.
1245   //
1246   // The following instruction sequence is an improvement on the above
1247   //
1248   // retry:
1249   //   ldaxr<x>  rval raddr
1250   //   cmp       rval rold
1251   //   b.ne done
1252   //   stlxr<x>  rval, rnew, rold
1253   //   cbnz      rval retry
1254   // done:
1255   //   cset      r0, eq
1256   //
1257   // We don't need the leading dmb ish since the stlxr guarantees
1258   // visibility of prior writes in the case that the swap is
1259   // successful. Crucially we don't have to worry about the case where
1260   // the swap is not successful since no valid program should be
1261   // relying on visibility of prior changes by the attempting thread
1262   // in the case where the CAS fails.
1263   //
1264   // Similarly, we don't need the trailing dmb ishld if we substitute
1265   // an ldaxr instruction since that will provide all the guarantees we
1266   // require regarding observation of changes made by other threads
1267   // before any change to the CAS address observed by the load.
1268   //
1269   // In order to generate the desired instruction sequence we need to
1270   // be able to identify specific 'signature' ideal graph node
1271   // sequences which i) occur as a translation of a volatile reads or
1272   // writes or CAS operations and ii) do not occur through any other
1273   // translation or graph transformation. We can then provide
1274   // alternative aldc matching rules which translate these node
1275   // sequences to the desired machine code sequences. Selection of the
1276   // alternative rules can be implemented by predicates which identify
1277   // the relevant node sequences.
1278   //
1279   // The ideal graph generator translates a volatile read to the node
1280   // sequence
1281   //
1282   //   LoadX[mo_acquire]
1283   //   MemBarAcquire
1284   //
1285   // As a special case when using the compressed oops optimization we
1286   // may also see this variant
1287   //
1288   //   LoadN[mo_acquire]
1289   //   DecodeN
1290   //   MemBarAcquire
1291   //
1292   // A volatile write is translated to the node sequence
1293   //
1294   //   MemBarRelease
1295   //   StoreX[mo_release] {CardMark}-optional
1296   //   MemBarVolatile
1297   //
1298   // n.b. the above node patterns are generated with a strict
1299   // 'signature' configuration of input and output dependencies (see
1300   // the predicates below for exact details). The card mark may be as
1301   // simple as a few extra nodes or, in a few GC configurations, may
1302   // include more complex control flow between the leading and
1303   // trailing memory barriers. However, whatever the card mark
1304   // configuration these signatures are unique to translated volatile
1305   // reads/stores -- they will not appear as a result of any other
1306   // bytecode translation or inlining nor as a consequence of
1307   // optimizing transforms.
1308   //
1309   // We also want to catch inlined unsafe volatile gets and puts and
1310   // be able to implement them using either ldar<x>/stlr<x> or some
1311   // combination of ldr<x>/stlr<x> and dmb instructions.
1312   //
1313   // Inlined unsafe volatiles puts manifest as a minor variant of the
1314   // normal volatile put node sequence containing an extra cpuorder
1315   // membar
1316   //
1317   //   MemBarRelease
1318   //   MemBarCPUOrder
1319   //   StoreX[mo_release] {CardMark}-optional
1320   //   MemBarCPUOrder
1321   //   MemBarVolatile
1322   //
1323   // n.b. as an aside, a cpuorder membar is not itself subject to
1324   // matching and translation by adlc rules.  However, the rule
1325   // predicates need to detect its presence in order to correctly
1326   // select the desired adlc rules.
1327   //
1328   // Inlined unsafe volatile gets manifest as a slightly different
1329   // node sequence to a normal volatile get because of the
1330   // introduction of some CPUOrder memory barriers to bracket the
1331   // Load. However, but the same basic skeleton of a LoadX feeding a
1332   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1333   // present
1334   //
1335   //   MemBarCPUOrder
1336   //        ||       \\
1337   //   MemBarCPUOrder LoadX[mo_acquire]
1338   //        ||            |
1339   //        ||       {DecodeN} optional
1340   //        ||       /
1341   //     MemBarAcquire
1342   //
1343   // In this case the acquire membar does not directly depend on the
1344   // load. However, we can be sure that the load is generated from an
1345   // inlined unsafe volatile get if we see it dependent on this unique
1346   // sequence of membar nodes. Similarly, given an acquire membar we
1347   // can know that it was added because of an inlined unsafe volatile
1348   // get if it is fed and feeds a cpuorder membar and if its feed
1349   // membar also feeds an acquiring load.
1350   //
1351   // Finally an inlined (Unsafe) CAS operation is translated to the
1352   // following ideal graph
1353   //
1354   //   MemBarRelease
1355   //   MemBarCPUOrder
1356   //   CompareAndSwapX {CardMark}-optional
1357   //   MemBarCPUOrder
1358   //   MemBarAcquire
1359   //
1360   // So, where we can identify these volatile read and write
1361   // signatures we can choose to plant either of the above two code
1362   // sequences. For a volatile read we can simply plant a normal
1363   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1364   // also choose to inhibit translation of the MemBarAcquire and
1365   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1366   //
1367   // When we recognise a volatile store signature we can choose to
1368   // plant at a dmb ish as a translation for the MemBarRelease, a
1369   // normal str<x> and then a dmb ish for the MemBarVolatile.
1370   // Alternatively, we can inhibit translation of the MemBarRelease
1371   // and MemBarVolatile and instead plant a simple stlr<x>
1372   // instruction.
1373   //
1374   // when we recognise a CAS signature we can choose to plant a dmb
1375   // ish as a translation for the MemBarRelease, the conventional
1376   // macro-instruction sequence for the CompareAndSwap node (which
1377   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1378   // Alternatively, we can elide generation of the dmb instructions
1379   // and plant the alternative CompareAndSwap macro-instruction
1380   // sequence (which uses ldaxr<x>).
1381   //
1382   // Of course, the above only applies when we see these signature
1383   // configurations. We still want to plant dmb instructions in any
1384   // other cases where we may see a MemBarAcquire, MemBarRelease or
1385   // MemBarVolatile. For example, at the end of a constructor which
1386   // writes final/volatile fields we will see a MemBarRelease
1387   // instruction and this needs a 'dmb ish' lest we risk the
1388   // constructed object being visible without making the
1389   // final/volatile field writes visible.
1390   //
1391   // n.b. the translation rules below which rely on detection of the
1392   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1393   // If we see anything other than the signature configurations we
1394   // always just translate the loads and stores to ldr<x> and str<x>
1395   // and translate acquire, release and volatile membars to the
1396   // relevant dmb instructions.
1397   //
1398 
1399   // is_CAS(int opcode, bool maybe_volatile)
1400   //
1401   // return true if opcode is one of the possible CompareAndSwapX
1402   // values otherwise false.
1403 
1404   bool is_CAS(int opcode, bool maybe_volatile)
1405   {
1406     switch(opcode) {
1407       // We handle these
1408     case Op_CompareAndSwapI:
1409     case Op_CompareAndSwapL:
1410     case Op_CompareAndSwapP:
1411     case Op_CompareAndSwapN:
1412     case Op_ShenandoahCompareAndSwapP:
1413     case Op_ShenandoahCompareAndSwapN:
1414     case Op_CompareAndSwapB:
1415     case Op_CompareAndSwapS:
1416     case Op_GetAndSetI:
1417     case Op_GetAndSetL:
1418     case Op_GetAndSetP:
1419     case Op_GetAndSetN:
1420     case Op_GetAndAddI:
1421     case Op_GetAndAddL:
1422       return true;
1423     case Op_CompareAndExchangeI:
1424     case Op_CompareAndExchangeN:
1425     case Op_CompareAndExchangeB:
1426     case Op_CompareAndExchangeS:
1427     case Op_CompareAndExchangeL:
1428     case Op_CompareAndExchangeP:
1429     case Op_WeakCompareAndSwapB:
1430     case Op_WeakCompareAndSwapS:
1431     case Op_WeakCompareAndSwapI:
1432     case Op_WeakCompareAndSwapL:
1433     case Op_WeakCompareAndSwapP:
1434     case Op_WeakCompareAndSwapN:
1435     case Op_ShenandoahWeakCompareAndSwapP:
1436     case Op_ShenandoahWeakCompareAndSwapN:
1437     case Op_ShenandoahCompareAndExchangeP:
1438     case Op_ShenandoahCompareAndExchangeN:
1439       return maybe_volatile;
1440     default:
1441       return false;
1442     }
1443   }
1444 
1445   // helper to determine the maximum number of Phi nodes we may need to
1446   // traverse when searching from a card mark membar for the merge mem
1447   // feeding a trailing membar or vice versa
1448 
1449 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1450 
1451 bool unnecessary_acquire(const Node *barrier)
1452 {
1453   assert(barrier->is_MemBar(), "expecting a membar");
1454 
1455   if (UseBarriersForVolatile) {
1456     // we need to plant a dmb
1457     return false;
1458   }
1459 
1460   MemBarNode* mb = barrier->as_MemBar();
1461 
1462   if (mb->trailing_load()) {
1463     return true;
1464   }
1465 
1466   if (mb->trailing_load_store()) {
1467     Node* load_store = mb->in(MemBarNode::Precedent);
1468     assert(load_store->is_LoadStore(), "unexpected graph shape");
1469     return is_CAS(load_store->Opcode(), true);
1470   }
1471 
1472   return false;
1473 }
1474 
1475 bool needs_acquiring_load(const Node *n)
1476 {
1477   assert(n->is_Load(), "expecting a load");
1478   if (UseBarriersForVolatile) {
1479     // we use a normal load and a dmb
1480     return false;
1481   }
1482 
1483   LoadNode *ld = n->as_Load();
1484 
1485   return ld->is_acquire();
1486 }
1487 
1488 bool unnecessary_release(const Node *n)
1489 {
1490   assert((n->is_MemBar() &&
1491           n->Opcode() == Op_MemBarRelease),
1492          "expecting a release membar");
1493 
1494   if (UseBarriersForVolatile) {
1495     // we need to plant a dmb
1496     return false;
1497   }
1498 
1499   MemBarNode *barrier = n->as_MemBar();
1500   if (!barrier->leading()) {
1501     return false;
1502   } else {
1503     Node* trailing = barrier->trailing_membar();
1504     MemBarNode* trailing_mb = trailing->as_MemBar();
1505     assert(trailing_mb->trailing(), "Not a trailing membar?");
1506     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1507 
1508     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1509     if (mem->is_Store()) {
1510       assert(mem->as_Store()->is_release(), "");
1511       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1512       return true;
1513     } else {
1514       assert(mem->is_LoadStore(), "");
1515       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1516       return is_CAS(mem->Opcode(), true);
1517     }
1518   }
1519   return false;
1520 }
1521 
1522 bool unnecessary_volatile(const Node *n)
1523 {
1524   // assert n->is_MemBar();
1525   if (UseBarriersForVolatile) {
1526     // we need to plant a dmb
1527     return false;
1528   }
1529 
1530   MemBarNode *mbvol = n->as_MemBar();
1531 
1532   bool release = mbvol->trailing_store();
1533   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1534 #ifdef ASSERT
1535   if (release) {
1536     Node* leading = mbvol->leading_membar();
1537     assert(leading->Opcode() == Op_MemBarRelease, "");
1538     assert(leading->as_MemBar()->leading_store(), "");
1539     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1540   }
1541 #endif
1542 
1543   return release;
1544 }
1545 
1546 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1547 
1548 bool needs_releasing_store(const Node *n)
1549 {
1550   // assert n->is_Store();
1551   if (UseBarriersForVolatile) {
1552     // we use a normal store and dmb combination
1553     return false;
1554   }
1555 
1556   StoreNode *st = n->as_Store();
1557 
1558   return st->trailing_membar() != NULL;
1559 }
1560 
1561 // predicate controlling translation of CAS
1562 //
1563 // returns true if CAS needs to use an acquiring load otherwise false
1564 
1565 bool needs_acquiring_load_exclusive(const Node *n)
1566 {
1567   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1568   if (UseBarriersForVolatile) {
1569     return false;
1570   }
1571 
1572   LoadStoreNode* ldst = n->as_LoadStore();
1573   if (is_CAS(n->Opcode(), false)) {
1574     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1575   } else {
1576     return ldst->trailing_membar() != NULL;
1577   }
1578 
1579   // so we can just return true here
1580   return true;
1581 }
1582 
1583 #define __ _masm.
1584 
1585 // advance declarations for helper functions to convert register
1586 // indices to register objects
1587 
1588 // the ad file has to provide implementations of certain methods
1589 // expected by the generic code
1590 //
1591 // REQUIRED FUNCTIONALITY
1592 
1593 //=============================================================================
1594 
1595 // !!!!! Special hack to get all types of calls to specify the byte offset
1596 //       from the start of the call to the point where the return address
1597 //       will point.
1598 
1599 int MachCallStaticJavaNode::ret_addr_offset()
1600 {
1601   // call should be a simple bl
1602   int off = 4;
1603   return off;
1604 }
1605 
1606 int MachCallDynamicJavaNode::ret_addr_offset()
1607 {
1608   return 16; // movz, movk, movk, bl
1609 }
1610 
1611 int MachCallRuntimeNode::ret_addr_offset() {
1612   // for generated stubs the call will be
1613   //   far_call(addr)
1614   // for real runtime callouts it will be six instructions
1615   // see aarch64_enc_java_to_runtime
1616   //   adr(rscratch2, retaddr)
1617   //   lea(rscratch1, RuntimeAddress(addr)
1618   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1619   //   blr(rscratch1)
1620   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1621   if (cb) {
1622     return MacroAssembler::far_branch_size();
1623   } else {
1624     return 6 * NativeInstruction::instruction_size;
1625   }
1626 }
1627 
1628 // Indicate if the safepoint node needs the polling page as an input
1629 
1630 // the shared code plants the oop data at the start of the generated
1631 // code for the safepoint node and that needs ot be at the load
1632 // instruction itself. so we cannot plant a mov of the safepoint poll
1633 // address followed by a load. setting this to true means the mov is
1634 // scheduled as a prior instruction. that's better for scheduling
1635 // anyway.
1636 
1637 bool SafePointNode::needs_polling_address_input()
1638 {
1639   return true;
1640 }
1641 
1642 //=============================================================================
1643 
1644 #ifndef PRODUCT
1645 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1646   st->print("BREAKPOINT");
1647 }
1648 #endif
1649 
1650 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1651   MacroAssembler _masm(&cbuf);
1652   __ brk(0);
1653 }
1654 
1655 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1656   return MachNode::size(ra_);
1657 }
1658 
1659 //=============================================================================
1660 
1661 #ifndef PRODUCT
1662   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1663     st->print("nop \t# %d bytes pad for loops and calls", _count);
1664   }
1665 #endif
1666 
1667   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1668     MacroAssembler _masm(&cbuf);
1669     for (int i = 0; i < _count; i++) {
1670       __ nop();
1671     }
1672   }
1673 
1674   uint MachNopNode::size(PhaseRegAlloc*) const {
1675     return _count * NativeInstruction::instruction_size;
1676   }
1677 
1678 //=============================================================================
1679 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1680 
1681 int Compile::ConstantTable::calculate_table_base_offset() const {
1682   return 0;  // absolute addressing, no offset
1683 }
1684 
1685 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1686 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1687   ShouldNotReachHere();
1688 }
1689 
1690 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1691   // Empty encoding
1692 }
1693 
1694 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1695   return 0;
1696 }
1697 
1698 #ifndef PRODUCT
1699 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1700   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1701 }
1702 #endif
1703 
1704 #ifndef PRODUCT
1705 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1706   Compile* C = ra_->C;
1707 
1708   int framesize = C->frame_slots() << LogBytesPerInt;
1709 
1710   if (C->need_stack_bang(framesize))
1711     st->print("# stack bang size=%d\n\t", framesize);
1712 
1713   if (framesize < ((1 << 9) + 2 * wordSize)) {
1714     st->print("sub  sp, sp, #%d\n\t", framesize);
1715     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1716     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1717   } else {
1718     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1719     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1720     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1721     st->print("sub  sp, sp, rscratch1");
1722   }
1723 }
1724 #endif
1725 
1726 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1727   Compile* C = ra_->C;
1728   MacroAssembler _masm(&cbuf);
1729 
1730   // n.b. frame size includes space for return pc and rfp
1731   const long framesize = C->frame_size_in_bytes();
1732   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1733 
1734   // insert a nop at the start of the prolog so we can patch in a
1735   // branch if we need to invalidate the method later
1736   __ nop();
1737 
1738   if (C->clinit_barrier_on_entry()) {
1739     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
1740 
1741     Label L_skip_barrier;
1742 
1743     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
1744     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1745     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1746     __ bind(L_skip_barrier);
1747   }
1748 
1749   int bangsize = C->bang_size_in_bytes();
1750   if (C->need_stack_bang(bangsize) && UseStackBanging)
1751     __ generate_stack_overflow_check(bangsize);
1752 
1753   __ build_frame(framesize);
1754 
1755   if (VerifyStackAtCalls) {
1756     Unimplemented();
1757   }
1758 
1759   C->set_frame_complete(cbuf.insts_size());
1760 
1761   if (C->has_mach_constant_base_node()) {
1762     // NOTE: We set the table base offset here because users might be
1763     // emitted before MachConstantBaseNode.
1764     Compile::ConstantTable& constant_table = C->constant_table();
1765     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1766   }
1767 }
1768 
1769 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1770 {
1771   return MachNode::size(ra_); // too many variables; just compute it
1772                               // the hard way
1773 }
1774 
1775 int MachPrologNode::reloc() const
1776 {
1777   return 0;
1778 }
1779 
1780 //=============================================================================
1781 
1782 #ifndef PRODUCT
1783 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1784   Compile* C = ra_->C;
1785   int framesize = C->frame_slots() << LogBytesPerInt;
1786 
1787   st->print("# pop frame %d\n\t",framesize);
1788 
1789   if (framesize == 0) {
1790     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1791   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1792     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1793     st->print("add  sp, sp, #%d\n\t", framesize);
1794   } else {
1795     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1796     st->print("add  sp, sp, rscratch1\n\t");
1797     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1798   }
1799 
1800   if (do_polling() && C->is_method_compilation()) {
1801     st->print("# touch polling page\n\t");
1802     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1803     st->print("ldr zr, [rscratch1]");
1804   }
1805 }
1806 #endif
1807 
1808 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1809   Compile* C = ra_->C;
1810   MacroAssembler _masm(&cbuf);
1811   int framesize = C->frame_slots() << LogBytesPerInt;
1812 
1813   __ remove_frame(framesize);
1814 
1815   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1816     __ reserved_stack_check();
1817   }
1818 
1819   if (do_polling() && C->is_method_compilation()) {
1820     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1821   }
1822 }
1823 
1824 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1825   // Variable size. Determine dynamically.
1826   return MachNode::size(ra_);
1827 }
1828 
1829 int MachEpilogNode::reloc() const {
1830   // Return number of relocatable values contained in this instruction.
1831   return 1; // 1 for polling page.
1832 }
1833 
1834 const Pipeline * MachEpilogNode::pipeline() const {
1835   return MachNode::pipeline_class();
1836 }
1837 
1838 // This method seems to be obsolete. It is declared in machnode.hpp
1839 // and defined in all *.ad files, but it is never called. Should we
1840 // get rid of it?
1841 int MachEpilogNode::safepoint_offset() const {
1842   assert(do_polling(), "no return for this epilog node");
1843   return 4;
1844 }
1845 
1846 //=============================================================================
1847 
1848 // Figure out which register class each belongs in: rc_int, rc_float or
1849 // rc_stack.
1850 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1851 
1852 static enum RC rc_class(OptoReg::Name reg) {
1853 
1854   if (reg == OptoReg::Bad) {
1855     return rc_bad;
1856   }
1857 
1858   // we have 30 int registers * 2 halves
1859   // (rscratch1 and rscratch2 are omitted)
1860   int slots_of_int_registers = RegisterImpl::max_slots_per_register * (RegisterImpl::number_of_registers - 2);
1861 
1862   if (reg < slots_of_int_registers) {
1863     return rc_int;
1864   }
1865 
1866   // we have 32 float register * 4 halves
1867   if (reg < slots_of_int_registers + FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers) {
1868     return rc_float;
1869   }
1870 
1871   // Between float regs & stack is the flags regs.
1872   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1873 
1874   return rc_stack;
1875 }
1876 
1877 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1878   Compile* C = ra_->C;
1879 
1880   // Get registers to move.
1881   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1882   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1883   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1884   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1885 
1886   enum RC src_hi_rc = rc_class(src_hi);
1887   enum RC src_lo_rc = rc_class(src_lo);
1888   enum RC dst_hi_rc = rc_class(dst_hi);
1889   enum RC dst_lo_rc = rc_class(dst_lo);
1890 
1891   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1892 
1893   if (src_hi != OptoReg::Bad) {
1894     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1895            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1896            "expected aligned-adjacent pairs");
1897   }
1898 
1899   if (src_lo == dst_lo && src_hi == dst_hi) {
1900     return 0;            // Self copy, no move.
1901   }
1902 
1903   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1904               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1905   int src_offset = ra_->reg2offset(src_lo);
1906   int dst_offset = ra_->reg2offset(dst_lo);
1907 
1908   if (bottom_type()->isa_vect() != NULL) {
1909     uint ireg = ideal_reg();
1910     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1911     if (cbuf) {
1912       MacroAssembler _masm(cbuf);
1913       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1914       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1915         // stack->stack
1916         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1917         if (ireg == Op_VecD) {
1918           __ unspill(rscratch1, true, src_offset);
1919           __ spill(rscratch1, true, dst_offset);
1920         } else {
1921           __ spill_copy128(src_offset, dst_offset);
1922         }
1923       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1924         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1925                ireg == Op_VecD ? __ T8B : __ T16B,
1926                as_FloatRegister(Matcher::_regEncode[src_lo]));
1927       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1928         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1929                        ireg == Op_VecD ? __ D : __ Q,
1930                        ra_->reg2offset(dst_lo));
1931       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1932         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1933                        ireg == Op_VecD ? __ D : __ Q,
1934                        ra_->reg2offset(src_lo));
1935       } else {
1936         ShouldNotReachHere();
1937       }
1938     }
1939   } else if (cbuf) {
1940     MacroAssembler _masm(cbuf);
1941     switch (src_lo_rc) {
1942     case rc_int:
1943       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1944         if (is64) {
1945             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1946                    as_Register(Matcher::_regEncode[src_lo]));
1947         } else {
1948             MacroAssembler _masm(cbuf);
1949             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1950                     as_Register(Matcher::_regEncode[src_lo]));
1951         }
1952       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1953         if (is64) {
1954             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1955                      as_Register(Matcher::_regEncode[src_lo]));
1956         } else {
1957             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1958                      as_Register(Matcher::_regEncode[src_lo]));
1959         }
1960       } else {                    // gpr --> stack spill
1961         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1962         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1963       }
1964       break;
1965     case rc_float:
1966       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1967         if (is64) {
1968             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1969                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1970         } else {
1971             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1972                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1973         }
1974       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1975           if (cbuf) {
1976             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1977                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1978         } else {
1979             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1980                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1981         }
1982       } else {                    // fpr --> stack spill
1983         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1984         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1985                  is64 ? __ D : __ S, dst_offset);
1986       }
1987       break;
1988     case rc_stack:
1989       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1990         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1991       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1992         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1993                    is64 ? __ D : __ S, src_offset);
1994       } else {                    // stack --> stack copy
1995         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1996         __ unspill(rscratch1, is64, src_offset);
1997         __ spill(rscratch1, is64, dst_offset);
1998       }
1999       break;
2000     default:
2001       assert(false, "bad rc_class for spill");
2002       ShouldNotReachHere();
2003     }
2004   }
2005 
2006   if (st) {
2007     st->print("spill ");
2008     if (src_lo_rc == rc_stack) {
2009       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
2010     } else {
2011       st->print("%s -> ", Matcher::regName[src_lo]);
2012     }
2013     if (dst_lo_rc == rc_stack) {
2014       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
2015     } else {
2016       st->print("%s", Matcher::regName[dst_lo]);
2017     }
2018     if (bottom_type()->isa_vect() != NULL) {
2019       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
2020     } else {
2021       st->print("\t# spill size = %d", is64 ? 64:32);
2022     }
2023   }
2024 
2025   return 0;
2026 
2027 }
2028 
2029 #ifndef PRODUCT
2030 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2031   if (!ra_)
2032     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
2033   else
2034     implementation(NULL, ra_, false, st);
2035 }
2036 #endif
2037 
2038 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2039   implementation(&cbuf, ra_, false, NULL);
2040 }
2041 
2042 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
2043   return MachNode::size(ra_);
2044 }
2045 
2046 //=============================================================================
2047 
2048 #ifndef PRODUCT
2049 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2050   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2051   int reg = ra_->get_reg_first(this);
2052   st->print("add %s, rsp, #%d]\t# box lock",
2053             Matcher::regName[reg], offset);
2054 }
2055 #endif
2056 
2057 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2058   MacroAssembler _masm(&cbuf);
2059 
2060   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2061   int reg    = ra_->get_encode(this);
2062 
2063   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
2064     __ add(as_Register(reg), sp, offset);
2065   } else {
2066     ShouldNotReachHere();
2067   }
2068 }
2069 
2070 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
2071   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
2072   return 4;
2073 }
2074 
2075 //=============================================================================
2076 
2077 #ifndef PRODUCT
2078 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2079 {
2080   st->print_cr("# MachUEPNode");
2081   if (UseCompressedClassPointers) {
2082     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2083     if (CompressedKlassPointers::shift() != 0) {
2084       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
2085     }
2086   } else {
2087    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2088   }
2089   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
2090   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2091 }
2092 #endif
2093 
2094 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2095 {
2096   // This is the unverified entry point.
2097   MacroAssembler _masm(&cbuf);
2098 
2099   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2100   Label skip;
2101   // TODO
2102   // can we avoid this skip and still use a reloc?
2103   __ br(Assembler::EQ, skip);
2104   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2105   __ bind(skip);
2106 }
2107 
2108 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2109 {
2110   return MachNode::size(ra_);
2111 }
2112 
2113 // REQUIRED EMIT CODE
2114 
2115 //=============================================================================
2116 
2117 // Emit exception handler code.
2118 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2119 {
2120   // mov rscratch1 #exception_blob_entry_point
2121   // br rscratch1
2122   // Note that the code buffer's insts_mark is always relative to insts.
2123   // That's why we must use the macroassembler to generate a handler.
2124   MacroAssembler _masm(&cbuf);
2125   address base = __ start_a_stub(size_exception_handler());
2126   if (base == NULL) {
2127     ciEnv::current()->record_failure("CodeCache is full");
2128     return 0;  // CodeBuffer::expand failed
2129   }
2130   int offset = __ offset();
2131   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2132   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2133   __ end_a_stub();
2134   return offset;
2135 }
2136 
2137 // Emit deopt handler code.
2138 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2139 {
2140   // Note that the code buffer's insts_mark is always relative to insts.
2141   // That's why we must use the macroassembler to generate a handler.
2142   MacroAssembler _masm(&cbuf);
2143   address base = __ start_a_stub(size_deopt_handler());
2144   if (base == NULL) {
2145     ciEnv::current()->record_failure("CodeCache is full");
2146     return 0;  // CodeBuffer::expand failed
2147   }
2148   int offset = __ offset();
2149 
2150   __ adr(lr, __ pc());
2151   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2152 
2153   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2154   __ end_a_stub();
2155   return offset;
2156 }
2157 
2158 // REQUIRED MATCHER CODE
2159 
2160 //=============================================================================
2161 
2162 const bool Matcher::match_rule_supported(int opcode) {
2163   if (!has_match_rule(opcode))
2164     return false;
2165 
2166   bool ret_value = true;
2167   switch (opcode) {
2168     case Op_CacheWB:
2169     case Op_CacheWBPreSync:
2170     case Op_CacheWBPostSync:
2171       if (!VM_Version::supports_data_cache_line_flush()) {
2172         ret_value = false;
2173       }
2174       break;
2175   }
2176 
2177   return ret_value; // Per default match rules are supported.
2178 }
2179 
2180 const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
2181 
2182   // TODO
2183   // identify extra cases that we might want to provide match rules for
2184   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2185   bool ret_value = match_rule_supported(opcode);
2186   // Add rules here.
2187 
2188   return ret_value;  // Per default match rules are supported.
2189 }
2190 
2191 const bool Matcher::has_predicated_vectors(void) {
2192   return false;
2193 }
2194 
2195 const int Matcher::float_pressure(int default_pressure_threshold) {
2196   return default_pressure_threshold;
2197 }
2198 
2199 int Matcher::regnum_to_fpu_offset(int regnum)
2200 {
2201   Unimplemented();
2202   return 0;
2203 }
2204 
2205 // Is this branch offset short enough that a short branch can be used?
2206 //
2207 // NOTE: If the platform does not provide any short branch variants, then
2208 //       this method should return false for offset 0.
2209 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2210   // The passed offset is relative to address of the branch.
2211 
2212   return (-32768 <= offset && offset < 32768);
2213 }
2214 
2215 const bool Matcher::isSimpleConstant64(jlong value) {
2216   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2217   // Probably always true, even if a temp register is required.
2218   return true;
2219 }
2220 
2221 // true just means we have fast l2f conversion
2222 const bool Matcher::convL2FSupported(void) {
2223   return true;
2224 }
2225 
2226 // Vector width in bytes.
2227 const int Matcher::vector_width_in_bytes(BasicType bt) {
2228   int size = MIN2(16,(int)MaxVectorSize);
2229   // Minimum 2 values in vector
2230   if (size < 2*type2aelembytes(bt)) size = 0;
2231   // But never < 4
2232   if (size < 4) size = 0;
2233   return size;
2234 }
2235 
2236 // Limits on vector size (number of elements) loaded into vector.
2237 const int Matcher::max_vector_size(const BasicType bt) {
2238   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2239 }
2240 const int Matcher::min_vector_size(const BasicType bt) {
2241 //  For the moment limit the vector size to 8 bytes
2242     int size = 8 / type2aelembytes(bt);
2243     if (size < 2) size = 2;
2244     return size;
2245 }
2246 
2247 // Vector ideal reg.
2248 const uint Matcher::vector_ideal_reg(int len) {
2249   switch(len) {
2250     case  8: return Op_VecD;
2251     case 16: return Op_VecX;
2252   }
2253   ShouldNotReachHere();
2254   return 0;
2255 }
2256 
2257 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2258   switch(size) {
2259     case  8: return Op_VecD;
2260     case 16: return Op_VecX;
2261   }
2262   ShouldNotReachHere();
2263   return 0;
2264 }
2265 
2266 // AES support not yet implemented
2267 const bool Matcher::pass_original_key_for_aes() {
2268   return false;
2269 }
2270 
2271 // aarch64 supports misaligned vectors store/load.
2272 const bool Matcher::misaligned_vectors_ok() {
2273   return true;
2274 }
2275 
2276 // false => size gets scaled to BytesPerLong, ok.
2277 const bool Matcher::init_array_count_is_in_bytes = false;
2278 
2279 // Use conditional move (CMOVL)
2280 const int Matcher::long_cmove_cost() {
2281   // long cmoves are no more expensive than int cmoves
2282   return 0;
2283 }
2284 
2285 const int Matcher::float_cmove_cost() {
2286   // float cmoves are no more expensive than int cmoves
2287   return 0;
2288 }
2289 
2290 // Does the CPU require late expand (see block.cpp for description of late expand)?
2291 const bool Matcher::require_postalloc_expand = false;
2292 
2293 // Do we need to mask the count passed to shift instructions or does
2294 // the cpu only look at the lower 5/6 bits anyway?
2295 const bool Matcher::need_masked_shift_count = false;
2296 
2297 // No support for generic vector operands.
2298 const bool Matcher::supports_generic_vector_operands  = false;
2299 
2300 MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg) {
2301   ShouldNotReachHere(); // generic vector operands not supported
2302   return NULL;
2303 }
2304 
2305 bool Matcher::is_generic_reg2reg_move(MachNode* m) {
2306   ShouldNotReachHere();  // generic vector operands not supported
2307   return false;
2308 }
2309 
2310 bool Matcher::is_generic_vector(MachOper* opnd)  {
2311   ShouldNotReachHere();  // generic vector operands not supported
2312   return false;
2313 }
2314 
2315 // This affects two different things:
2316 //  - how Decode nodes are matched
2317 //  - how ImplicitNullCheck opportunities are recognized
2318 // If true, the matcher will try to remove all Decodes and match them
2319 // (as operands) into nodes. NullChecks are not prepared to deal with
2320 // Decodes by final_graph_reshaping().
2321 // If false, final_graph_reshaping() forces the decode behind the Cmp
2322 // for a NullCheck. The matcher matches the Decode node into a register.
2323 // Implicit_null_check optimization moves the Decode along with the
2324 // memory operation back up before the NullCheck.
2325 bool Matcher::narrow_oop_use_complex_address() {
2326   return CompressedOops::shift() == 0;
2327 }
2328 
2329 bool Matcher::narrow_klass_use_complex_address() {
2330 // TODO
2331 // decide whether we need to set this to true
2332   return false;
2333 }
2334 
2335 bool Matcher::const_oop_prefer_decode() {
2336   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2337   return CompressedOops::base() == NULL;
2338 }
2339 
2340 bool Matcher::const_klass_prefer_decode() {
2341   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2342   return CompressedKlassPointers::base() == NULL;
2343 }
2344 
2345 // Is it better to copy float constants, or load them directly from
2346 // memory?  Intel can load a float constant from a direct address,
2347 // requiring no extra registers.  Most RISCs will have to materialize
2348 // an address into a register first, so they would do better to copy
2349 // the constant from stack.
2350 const bool Matcher::rematerialize_float_constants = false;
2351 
2352 // If CPU can load and store mis-aligned doubles directly then no
2353 // fixup is needed.  Else we split the double into 2 integer pieces
2354 // and move it piece-by-piece.  Only happens when passing doubles into
2355 // C code as the Java calling convention forces doubles to be aligned.
2356 const bool Matcher::misaligned_doubles_ok = true;
2357 
2358 // No-op on amd64
2359 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2360   Unimplemented();
2361 }
2362 
2363 // Advertise here if the CPU requires explicit rounding operations to
2364 // implement the UseStrictFP mode.
2365 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2366 
2367 // Are floats converted to double when stored to stack during
2368 // deoptimization?
2369 bool Matcher::float_in_double() { return false; }
2370 
2371 // Do ints take an entire long register or just half?
2372 // The relevant question is how the int is callee-saved:
2373 // the whole long is written but de-opt'ing will have to extract
2374 // the relevant 32 bits.
2375 const bool Matcher::int_in_long = true;
2376 
2377 // Return whether or not this register is ever used as an argument.
2378 // This function is used on startup to build the trampoline stubs in
2379 // generateOptoStub.  Registers not mentioned will be killed by the VM
2380 // call in the trampoline, and arguments in those registers not be
2381 // available to the callee.
2382 bool Matcher::can_be_java_arg(int reg)
2383 {
2384   return
2385     reg ==  R0_num || reg == R0_H_num ||
2386     reg ==  R1_num || reg == R1_H_num ||
2387     reg ==  R2_num || reg == R2_H_num ||
2388     reg ==  R3_num || reg == R3_H_num ||
2389     reg ==  R4_num || reg == R4_H_num ||
2390     reg ==  R5_num || reg == R5_H_num ||
2391     reg ==  R6_num || reg == R6_H_num ||
2392     reg ==  R7_num || reg == R7_H_num ||
2393     reg ==  V0_num || reg == V0_H_num ||
2394     reg ==  V1_num || reg == V1_H_num ||
2395     reg ==  V2_num || reg == V2_H_num ||
2396     reg ==  V3_num || reg == V3_H_num ||
2397     reg ==  V4_num || reg == V4_H_num ||
2398     reg ==  V5_num || reg == V5_H_num ||
2399     reg ==  V6_num || reg == V6_H_num ||
2400     reg ==  V7_num || reg == V7_H_num;
2401 }
2402 
2403 bool Matcher::is_spillable_arg(int reg)
2404 {
2405   return can_be_java_arg(reg);
2406 }
2407 
2408 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2409   return false;
2410 }
2411 
2412 RegMask Matcher::divI_proj_mask() {
2413   ShouldNotReachHere();
2414   return RegMask();
2415 }
2416 
2417 // Register for MODI projection of divmodI.
2418 RegMask Matcher::modI_proj_mask() {
2419   ShouldNotReachHere();
2420   return RegMask();
2421 }
2422 
2423 // Register for DIVL projection of divmodL.
2424 RegMask Matcher::divL_proj_mask() {
2425   ShouldNotReachHere();
2426   return RegMask();
2427 }
2428 
2429 // Register for MODL projection of divmodL.
2430 RegMask Matcher::modL_proj_mask() {
2431   ShouldNotReachHere();
2432   return RegMask();
2433 }
2434 
2435 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2436   return FP_REG_mask();
2437 }
2438 
2439 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2440   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2441     Node* u = addp->fast_out(i);
2442     if (u->is_Mem()) {
2443       int opsize = u->as_Mem()->memory_size();
2444       assert(opsize > 0, "unexpected memory operand size");
2445       if (u->as_Mem()->memory_size() != (1<<shift)) {
2446         return false;
2447       }
2448     }
2449   }
2450   return true;
2451 }
2452 
2453 const bool Matcher::convi2l_type_required = false;
2454 
2455 // Should the Matcher clone shifts on addressing modes, expecting them
2456 // to be subsumed into complex addressing expressions or compute them
2457 // into registers?
2458 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2459   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2460     return true;
2461   }
2462 
2463   Node *off = m->in(AddPNode::Offset);
2464   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2465       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2466       // Are there other uses besides address expressions?
2467       !is_visited(off)) {
2468     address_visited.set(off->_idx); // Flag as address_visited
2469     mstack.push(off->in(2), Visit);
2470     Node *conv = off->in(1);
2471     if (conv->Opcode() == Op_ConvI2L &&
2472         // Are there other uses besides address expressions?
2473         !is_visited(conv)) {
2474       address_visited.set(conv->_idx); // Flag as address_visited
2475       mstack.push(conv->in(1), Pre_Visit);
2476     } else {
2477       mstack.push(conv, Pre_Visit);
2478     }
2479     address_visited.test_set(m->_idx); // Flag as address_visited
2480     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2481     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2482     return true;
2483   } else if (off->Opcode() == Op_ConvI2L &&
2484              // Are there other uses besides address expressions?
2485              !is_visited(off)) {
2486     address_visited.test_set(m->_idx); // Flag as address_visited
2487     address_visited.set(off->_idx); // Flag as address_visited
2488     mstack.push(off->in(1), Pre_Visit);
2489     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2490     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2491     return true;
2492   }
2493   return false;
2494 }
2495 
2496 void Compile::reshape_address(AddPNode* addp) {
2497 }
2498 
2499 
2500 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2501   MacroAssembler _masm(&cbuf);                                          \
2502   {                                                                     \
2503     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2504     guarantee(DISP == 0, "mode not permitted for volatile");            \
2505     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2506     __ INSN(REG, as_Register(BASE));                                    \
2507   }
2508 
2509 
2510 static Address mem2address(int opcode, Register base, int index, int size, int disp)
2511   {
2512     Address::extend scale;
2513 
2514     // Hooboy, this is fugly.  We need a way to communicate to the
2515     // encoder that the index needs to be sign extended, so we have to
2516     // enumerate all the cases.
2517     switch (opcode) {
2518     case INDINDEXSCALEDI2L:
2519     case INDINDEXSCALEDI2LN:
2520     case INDINDEXI2L:
2521     case INDINDEXI2LN:
2522       scale = Address::sxtw(size);
2523       break;
2524     default:
2525       scale = Address::lsl(size);
2526     }
2527 
2528     if (index == -1) {
2529       return Address(base, disp);
2530     } else {
2531       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2532       return Address(base, as_Register(index), scale);
2533     }
2534   }
2535 
2536 
2537 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2538 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
2539 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2540 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2541                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2542 
2543   // Used for all non-volatile memory accesses.  The use of
2544   // $mem->opcode() to discover whether this pattern uses sign-extended
2545   // offsets is something of a kludge.
2546   static void loadStore(MacroAssembler masm, mem_insn insn,
2547                         Register reg, int opcode,
2548                         Register base, int index, int size, int disp)
2549   {
2550     Address addr = mem2address(opcode, base, index, size, disp);
2551     (masm.*insn)(reg, addr);
2552   }
2553 
2554   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2555                         FloatRegister reg, int opcode,
2556                         Register base, int index, int size, int disp)
2557   {
2558     Address::extend scale;
2559 
2560     switch (opcode) {
2561     case INDINDEXSCALEDI2L:
2562     case INDINDEXSCALEDI2LN:
2563       scale = Address::sxtw(size);
2564       break;
2565     default:
2566       scale = Address::lsl(size);
2567     }
2568 
2569      if (index == -1) {
2570       (masm.*insn)(reg, Address(base, disp));
2571     } else {
2572       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2573       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2574     }
2575   }
2576 
2577   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2578                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2579                         int opcode, Register base, int index, int size, int disp)
2580   {
2581     if (index == -1) {
2582       (masm.*insn)(reg, T, Address(base, disp));
2583     } else {
2584       assert(disp == 0, "unsupported address mode");
2585       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2586     }
2587   }
2588 
2589 %}
2590 
2591 
2592 
2593 //----------ENCODING BLOCK-----------------------------------------------------
2594 // This block specifies the encoding classes used by the compiler to
2595 // output byte streams.  Encoding classes are parameterized macros
2596 // used by Machine Instruction Nodes in order to generate the bit
2597 // encoding of the instruction.  Operands specify their base encoding
2598 // interface with the interface keyword.  There are currently
2599 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2600 // COND_INTER.  REG_INTER causes an operand to generate a function
2601 // which returns its register number when queried.  CONST_INTER causes
2602 // an operand to generate a function which returns the value of the
2603 // constant when queried.  MEMORY_INTER causes an operand to generate
2604 // four functions which return the Base Register, the Index Register,
2605 // the Scale Value, and the Offset Value of the operand when queried.
2606 // COND_INTER causes an operand to generate six functions which return
2607 // the encoding code (ie - encoding bits for the instruction)
2608 // associated with each basic boolean condition for a conditional
2609 // instruction.
2610 //
2611 // Instructions specify two basic values for encoding.  Again, a
2612 // function is available to check if the constant displacement is an
2613 // oop. They use the ins_encode keyword to specify their encoding
2614 // classes (which must be a sequence of enc_class names, and their
2615 // parameters, specified in the encoding block), and they use the
2616 // opcode keyword to specify, in order, their primary, secondary, and
2617 // tertiary opcode.  Only the opcode sections which a particular
2618 // instruction needs for encoding need to be specified.
2619 encode %{
2620   // Build emit functions for each basic byte or larger field in the
2621   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2622   // from C++ code in the enc_class source block.  Emit functions will
2623   // live in the main source block for now.  In future, we can
2624   // generalize this by adding a syntax that specifies the sizes of
2625   // fields in an order, so that the adlc can build the emit functions
2626   // automagically
2627 
2628   // catch all for unimplemented encodings
2629   enc_class enc_unimplemented %{
2630     MacroAssembler _masm(&cbuf);
2631     __ unimplemented("C2 catch all");
2632   %}
2633 
2634   // BEGIN Non-volatile memory access
2635 
2636   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2637     Register dst_reg = as_Register($dst$$reg);
2638     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2639                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2640   %}
2641 
2642   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2643     Register dst_reg = as_Register($dst$$reg);
2644     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2645                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2646   %}
2647 
2648   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2649     Register dst_reg = as_Register($dst$$reg);
2650     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2651                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2652   %}
2653 
2654   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2655     Register dst_reg = as_Register($dst$$reg);
2656     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2657                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2658   %}
2659 
2660   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2661     Register dst_reg = as_Register($dst$$reg);
2662     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2663                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2664   %}
2665 
2666   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2667     Register dst_reg = as_Register($dst$$reg);
2668     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2669                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2670   %}
2671 
2672   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2673     Register dst_reg = as_Register($dst$$reg);
2674     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2675                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2676   %}
2677 
2678   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2679     Register dst_reg = as_Register($dst$$reg);
2680     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2681                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2682   %}
2683 
2684   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2685     Register dst_reg = as_Register($dst$$reg);
2686     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2687                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2688   %}
2689 
2690   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2691     Register dst_reg = as_Register($dst$$reg);
2692     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2693                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2694   %}
2695 
2696   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2697     Register dst_reg = as_Register($dst$$reg);
2698     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2699                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2700   %}
2701 
2702   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2703     Register dst_reg = as_Register($dst$$reg);
2704     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2705                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2706   %}
2707 
2708   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2709     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2710     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2711                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2712   %}
2713 
2714   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2715     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2716     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2717                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2718   %}
2719 
2720   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2721     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2722     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2723        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2724   %}
2725 
2726   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2727     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2728     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2729        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2730   %}
2731 
2732   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2733     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2734     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2735        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2736   %}
2737 
2738   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2739     Register src_reg = as_Register($src$$reg);
2740     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2741                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2742   %}
2743 
2744   enc_class aarch64_enc_strb0(memory mem) %{
2745     MacroAssembler _masm(&cbuf);
2746     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2747                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2748   %}
2749 
2750   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2751     MacroAssembler _masm(&cbuf);
2752     __ membar(Assembler::StoreStore);
2753     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2754                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2755   %}
2756 
2757   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2758     Register src_reg = as_Register($src$$reg);
2759     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2760                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2761   %}
2762 
2763   enc_class aarch64_enc_strh0(memory mem) %{
2764     MacroAssembler _masm(&cbuf);
2765     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2766                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2767   %}
2768 
2769   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2770     Register src_reg = as_Register($src$$reg);
2771     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2772                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2773   %}
2774 
2775   enc_class aarch64_enc_strw0(memory mem) %{
2776     MacroAssembler _masm(&cbuf);
2777     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2778                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2779   %}
2780 
2781   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2782     Register src_reg = as_Register($src$$reg);
2783     // we sometimes get asked to store the stack pointer into the
2784     // current thread -- we cannot do that directly on AArch64
2785     if (src_reg == r31_sp) {
2786       MacroAssembler _masm(&cbuf);
2787       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2788       __ mov(rscratch2, sp);
2789       src_reg = rscratch2;
2790     }
2791     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2792                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2793   %}
2794 
2795   enc_class aarch64_enc_str0(memory mem) %{
2796     MacroAssembler _masm(&cbuf);
2797     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2798                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2799   %}
2800 
2801   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2802     FloatRegister src_reg = as_FloatRegister($src$$reg);
2803     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2804                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2805   %}
2806 
2807   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2808     FloatRegister src_reg = as_FloatRegister($src$$reg);
2809     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2810                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2811   %}
2812 
2813   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2814     FloatRegister src_reg = as_FloatRegister($src$$reg);
2815     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2816        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2817   %}
2818 
2819   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2820     FloatRegister src_reg = as_FloatRegister($src$$reg);
2821     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2822        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2823   %}
2824 
2825   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2826     FloatRegister src_reg = as_FloatRegister($src$$reg);
2827     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2828        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2829   %}
2830 
2831   // END Non-volatile memory access
2832 
2833   // volatile loads and stores
2834 
2835   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2836     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2837                  rscratch1, stlrb);
2838   %}
2839 
2840   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2841     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2842                  rscratch1, stlrh);
2843   %}
2844 
2845   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2846     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2847                  rscratch1, stlrw);
2848   %}
2849 
2850 
2851   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2852     Register dst_reg = as_Register($dst$$reg);
2853     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2854              rscratch1, ldarb);
2855     __ sxtbw(dst_reg, dst_reg);
2856   %}
2857 
2858   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2859     Register dst_reg = as_Register($dst$$reg);
2860     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2861              rscratch1, ldarb);
2862     __ sxtb(dst_reg, dst_reg);
2863   %}
2864 
2865   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2866     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2867              rscratch1, ldarb);
2868   %}
2869 
2870   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2871     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2872              rscratch1, ldarb);
2873   %}
2874 
2875   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2876     Register dst_reg = as_Register($dst$$reg);
2877     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2878              rscratch1, ldarh);
2879     __ sxthw(dst_reg, dst_reg);
2880   %}
2881 
2882   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2883     Register dst_reg = as_Register($dst$$reg);
2884     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2885              rscratch1, ldarh);
2886     __ sxth(dst_reg, dst_reg);
2887   %}
2888 
2889   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2890     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2891              rscratch1, ldarh);
2892   %}
2893 
2894   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2895     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2896              rscratch1, ldarh);
2897   %}
2898 
2899   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2900     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2901              rscratch1, ldarw);
2902   %}
2903 
2904   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2905     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2906              rscratch1, ldarw);
2907   %}
2908 
2909   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2910     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2911              rscratch1, ldar);
2912   %}
2913 
2914   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2915     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2916              rscratch1, ldarw);
2917     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2918   %}
2919 
2920   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2921     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2922              rscratch1, ldar);
2923     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2924   %}
2925 
2926   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2927     Register src_reg = as_Register($src$$reg);
2928     // we sometimes get asked to store the stack pointer into the
2929     // current thread -- we cannot do that directly on AArch64
2930     if (src_reg == r31_sp) {
2931         MacroAssembler _masm(&cbuf);
2932       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2933       __ mov(rscratch2, sp);
2934       src_reg = rscratch2;
2935     }
2936     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2937                  rscratch1, stlr);
2938   %}
2939 
2940   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2941     {
2942       MacroAssembler _masm(&cbuf);
2943       FloatRegister src_reg = as_FloatRegister($src$$reg);
2944       __ fmovs(rscratch2, src_reg);
2945     }
2946     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2947                  rscratch1, stlrw);
2948   %}
2949 
2950   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2951     {
2952       MacroAssembler _masm(&cbuf);
2953       FloatRegister src_reg = as_FloatRegister($src$$reg);
2954       __ fmovd(rscratch2, src_reg);
2955     }
2956     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2957                  rscratch1, stlr);
2958   %}
2959 
2960   // synchronized read/update encodings
2961 
2962   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2963     MacroAssembler _masm(&cbuf);
2964     Register dst_reg = as_Register($dst$$reg);
2965     Register base = as_Register($mem$$base);
2966     int index = $mem$$index;
2967     int scale = $mem$$scale;
2968     int disp = $mem$$disp;
2969     if (index == -1) {
2970        if (disp != 0) {
2971         __ lea(rscratch1, Address(base, disp));
2972         __ ldaxr(dst_reg, rscratch1);
2973       } else {
2974         // TODO
2975         // should we ever get anything other than this case?
2976         __ ldaxr(dst_reg, base);
2977       }
2978     } else {
2979       Register index_reg = as_Register(index);
2980       if (disp == 0) {
2981         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2982         __ ldaxr(dst_reg, rscratch1);
2983       } else {
2984         __ lea(rscratch1, Address(base, disp));
2985         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2986         __ ldaxr(dst_reg, rscratch1);
2987       }
2988     }
2989   %}
2990 
2991   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2992     MacroAssembler _masm(&cbuf);
2993     Register src_reg = as_Register($src$$reg);
2994     Register base = as_Register($mem$$base);
2995     int index = $mem$$index;
2996     int scale = $mem$$scale;
2997     int disp = $mem$$disp;
2998     if (index == -1) {
2999        if (disp != 0) {
3000         __ lea(rscratch2, Address(base, disp));
3001         __ stlxr(rscratch1, src_reg, rscratch2);
3002       } else {
3003         // TODO
3004         // should we ever get anything other than this case?
3005         __ stlxr(rscratch1, src_reg, base);
3006       }
3007     } else {
3008       Register index_reg = as_Register(index);
3009       if (disp == 0) {
3010         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3011         __ stlxr(rscratch1, src_reg, rscratch2);
3012       } else {
3013         __ lea(rscratch2, Address(base, disp));
3014         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3015         __ stlxr(rscratch1, src_reg, rscratch2);
3016       }
3017     }
3018     __ cmpw(rscratch1, zr);
3019   %}
3020 
3021   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3022     MacroAssembler _masm(&cbuf);
3023     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3024     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3025                Assembler::xword, /*acquire*/ false, /*release*/ true,
3026                /*weak*/ false, noreg);
3027   %}
3028 
3029   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3030     MacroAssembler _masm(&cbuf);
3031     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3032     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3033                Assembler::word, /*acquire*/ false, /*release*/ true,
3034                /*weak*/ false, noreg);
3035   %}
3036 
3037   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3038     MacroAssembler _masm(&cbuf);
3039     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3040     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3041                Assembler::halfword, /*acquire*/ false, /*release*/ true,
3042                /*weak*/ false, noreg);
3043   %}
3044 
3045   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3046     MacroAssembler _masm(&cbuf);
3047     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3048     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3049                Assembler::byte, /*acquire*/ false, /*release*/ true,
3050                /*weak*/ false, noreg);
3051   %}
3052 
3053 
3054   // The only difference between aarch64_enc_cmpxchg and
3055   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
3056   // CompareAndSwap sequence to serve as a barrier on acquiring a
3057   // lock.
3058   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3059     MacroAssembler _masm(&cbuf);
3060     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3061     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3062                Assembler::xword, /*acquire*/ true, /*release*/ true,
3063                /*weak*/ false, noreg);
3064   %}
3065 
3066   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3067     MacroAssembler _masm(&cbuf);
3068     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3069     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3070                Assembler::word, /*acquire*/ true, /*release*/ true,
3071                /*weak*/ false, noreg);
3072   %}
3073 
3074   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3075     MacroAssembler _masm(&cbuf);
3076     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3077     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3078                Assembler::halfword, /*acquire*/ true, /*release*/ true,
3079                /*weak*/ false, noreg);
3080   %}
3081 
3082   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3083     MacroAssembler _masm(&cbuf);
3084     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
3085     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
3086                Assembler::byte, /*acquire*/ true, /*release*/ true,
3087                /*weak*/ false, noreg);
3088   %}
3089 
3090   // auxiliary used for CompareAndSwapX to set result register
3091   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3092     MacroAssembler _masm(&cbuf);
3093     Register res_reg = as_Register($res$$reg);
3094     __ cset(res_reg, Assembler::EQ);
3095   %}
3096 
3097   // prefetch encodings
3098 
3099   enc_class aarch64_enc_prefetchw(memory mem) %{
3100     MacroAssembler _masm(&cbuf);
3101     Register base = as_Register($mem$$base);
3102     int index = $mem$$index;
3103     int scale = $mem$$scale;
3104     int disp = $mem$$disp;
3105     if (index == -1) {
3106       __ prfm(Address(base, disp), PSTL1KEEP);
3107     } else {
3108       Register index_reg = as_Register(index);
3109       if (disp == 0) {
3110         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3111       } else {
3112         __ lea(rscratch1, Address(base, disp));
3113         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3114       }
3115     }
3116   %}
3117 
3118   /// mov envcodings
3119 
3120   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3121     MacroAssembler _masm(&cbuf);
3122     u_int32_t con = (u_int32_t)$src$$constant;
3123     Register dst_reg = as_Register($dst$$reg);
3124     if (con == 0) {
3125       __ movw(dst_reg, zr);
3126     } else {
3127       __ movw(dst_reg, con);
3128     }
3129   %}
3130 
3131   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3132     MacroAssembler _masm(&cbuf);
3133     Register dst_reg = as_Register($dst$$reg);
3134     u_int64_t con = (u_int64_t)$src$$constant;
3135     if (con == 0) {
3136       __ mov(dst_reg, zr);
3137     } else {
3138       __ mov(dst_reg, con);
3139     }
3140   %}
3141 
3142   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3143     MacroAssembler _masm(&cbuf);
3144     Register dst_reg = as_Register($dst$$reg);
3145     address con = (address)$src$$constant;
3146     if (con == NULL || con == (address)1) {
3147       ShouldNotReachHere();
3148     } else {
3149       relocInfo::relocType rtype = $src->constant_reloc();
3150       if (rtype == relocInfo::oop_type) {
3151         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3152       } else if (rtype == relocInfo::metadata_type) {
3153         __ mov_metadata(dst_reg, (Metadata*)con);
3154       } else {
3155         assert(rtype == relocInfo::none, "unexpected reloc type");
3156         if (con < (address)(uintptr_t)os::vm_page_size()) {
3157           __ mov(dst_reg, con);
3158         } else {
3159           unsigned long offset;
3160           __ adrp(dst_reg, con, offset);
3161           __ add(dst_reg, dst_reg, offset);
3162         }
3163       }
3164     }
3165   %}
3166 
3167   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3168     MacroAssembler _masm(&cbuf);
3169     Register dst_reg = as_Register($dst$$reg);
3170     __ mov(dst_reg, zr);
3171   %}
3172 
3173   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3174     MacroAssembler _masm(&cbuf);
3175     Register dst_reg = as_Register($dst$$reg);
3176     __ mov(dst_reg, (u_int64_t)1);
3177   %}
3178 
3179   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3180     MacroAssembler _masm(&cbuf);
3181     address page = (address)$src$$constant;
3182     Register dst_reg = as_Register($dst$$reg);
3183     unsigned long off;
3184     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3185     assert(off == 0, "assumed offset == 0");
3186   %}
3187 
3188   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3189     MacroAssembler _masm(&cbuf);
3190     __ load_byte_map_base($dst$$Register);
3191   %}
3192 
3193   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3194     MacroAssembler _masm(&cbuf);
3195     Register dst_reg = as_Register($dst$$reg);
3196     address con = (address)$src$$constant;
3197     if (con == NULL) {
3198       ShouldNotReachHere();
3199     } else {
3200       relocInfo::relocType rtype = $src->constant_reloc();
3201       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3202       __ set_narrow_oop(dst_reg, (jobject)con);
3203     }
3204   %}
3205 
3206   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3207     MacroAssembler _masm(&cbuf);
3208     Register dst_reg = as_Register($dst$$reg);
3209     __ mov(dst_reg, zr);
3210   %}
3211 
3212   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3213     MacroAssembler _masm(&cbuf);
3214     Register dst_reg = as_Register($dst$$reg);
3215     address con = (address)$src$$constant;
3216     if (con == NULL) {
3217       ShouldNotReachHere();
3218     } else {
3219       relocInfo::relocType rtype = $src->constant_reloc();
3220       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3221       __ set_narrow_klass(dst_reg, (Klass *)con);
3222     }
3223   %}
3224 
3225   // arithmetic encodings
3226 
3227   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3228     MacroAssembler _masm(&cbuf);
3229     Register dst_reg = as_Register($dst$$reg);
3230     Register src_reg = as_Register($src1$$reg);
3231     int32_t con = (int32_t)$src2$$constant;
3232     // add has primary == 0, subtract has primary == 1
3233     if ($primary) { con = -con; }
3234     if (con < 0) {
3235       __ subw(dst_reg, src_reg, -con);
3236     } else {
3237       __ addw(dst_reg, src_reg, con);
3238     }
3239   %}
3240 
3241   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3242     MacroAssembler _masm(&cbuf);
3243     Register dst_reg = as_Register($dst$$reg);
3244     Register src_reg = as_Register($src1$$reg);
3245     int32_t con = (int32_t)$src2$$constant;
3246     // add has primary == 0, subtract has primary == 1
3247     if ($primary) { con = -con; }
3248     if (con < 0) {
3249       __ sub(dst_reg, src_reg, -con);
3250     } else {
3251       __ add(dst_reg, src_reg, con);
3252     }
3253   %}
3254 
3255   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3256     MacroAssembler _masm(&cbuf);
3257    Register dst_reg = as_Register($dst$$reg);
3258    Register src1_reg = as_Register($src1$$reg);
3259    Register src2_reg = as_Register($src2$$reg);
3260     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3261   %}
3262 
3263   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3264     MacroAssembler _masm(&cbuf);
3265    Register dst_reg = as_Register($dst$$reg);
3266    Register src1_reg = as_Register($src1$$reg);
3267    Register src2_reg = as_Register($src2$$reg);
3268     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3269   %}
3270 
3271   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3272     MacroAssembler _masm(&cbuf);
3273    Register dst_reg = as_Register($dst$$reg);
3274    Register src1_reg = as_Register($src1$$reg);
3275    Register src2_reg = as_Register($src2$$reg);
3276     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3277   %}
3278 
3279   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3280     MacroAssembler _masm(&cbuf);
3281    Register dst_reg = as_Register($dst$$reg);
3282    Register src1_reg = as_Register($src1$$reg);
3283    Register src2_reg = as_Register($src2$$reg);
3284     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3285   %}
3286 
3287   // compare instruction encodings
3288 
3289   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3290     MacroAssembler _masm(&cbuf);
3291     Register reg1 = as_Register($src1$$reg);
3292     Register reg2 = as_Register($src2$$reg);
3293     __ cmpw(reg1, reg2);
3294   %}
3295 
3296   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3297     MacroAssembler _masm(&cbuf);
3298     Register reg = as_Register($src1$$reg);
3299     int32_t val = $src2$$constant;
3300     if (val >= 0) {
3301       __ subsw(zr, reg, val);
3302     } else {
3303       __ addsw(zr, reg, -val);
3304     }
3305   %}
3306 
3307   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3308     MacroAssembler _masm(&cbuf);
3309     Register reg1 = as_Register($src1$$reg);
3310     u_int32_t val = (u_int32_t)$src2$$constant;
3311     __ movw(rscratch1, val);
3312     __ cmpw(reg1, rscratch1);
3313   %}
3314 
3315   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3316     MacroAssembler _masm(&cbuf);
3317     Register reg1 = as_Register($src1$$reg);
3318     Register reg2 = as_Register($src2$$reg);
3319     __ cmp(reg1, reg2);
3320   %}
3321 
3322   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3323     MacroAssembler _masm(&cbuf);
3324     Register reg = as_Register($src1$$reg);
3325     int64_t val = $src2$$constant;
3326     if (val >= 0) {
3327       __ subs(zr, reg, val);
3328     } else if (val != -val) {
3329       __ adds(zr, reg, -val);
3330     } else {
3331     // aargh, Long.MIN_VALUE is a special case
3332       __ orr(rscratch1, zr, (u_int64_t)val);
3333       __ subs(zr, reg, rscratch1);
3334     }
3335   %}
3336 
3337   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3338     MacroAssembler _masm(&cbuf);
3339     Register reg1 = as_Register($src1$$reg);
3340     u_int64_t val = (u_int64_t)$src2$$constant;
3341     __ mov(rscratch1, val);
3342     __ cmp(reg1, rscratch1);
3343   %}
3344 
3345   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3346     MacroAssembler _masm(&cbuf);
3347     Register reg1 = as_Register($src1$$reg);
3348     Register reg2 = as_Register($src2$$reg);
3349     __ cmp(reg1, reg2);
3350   %}
3351 
3352   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3353     MacroAssembler _masm(&cbuf);
3354     Register reg1 = as_Register($src1$$reg);
3355     Register reg2 = as_Register($src2$$reg);
3356     __ cmpw(reg1, reg2);
3357   %}
3358 
3359   enc_class aarch64_enc_testp(iRegP src) %{
3360     MacroAssembler _masm(&cbuf);
3361     Register reg = as_Register($src$$reg);
3362     __ cmp(reg, zr);
3363   %}
3364 
3365   enc_class aarch64_enc_testn(iRegN src) %{
3366     MacroAssembler _masm(&cbuf);
3367     Register reg = as_Register($src$$reg);
3368     __ cmpw(reg, zr);
3369   %}
3370 
3371   enc_class aarch64_enc_b(label lbl) %{
3372     MacroAssembler _masm(&cbuf);
3373     Label *L = $lbl$$label;
3374     __ b(*L);
3375   %}
3376 
3377   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3378     MacroAssembler _masm(&cbuf);
3379     Label *L = $lbl$$label;
3380     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3381   %}
3382 
3383   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3384     MacroAssembler _masm(&cbuf);
3385     Label *L = $lbl$$label;
3386     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3387   %}
3388 
3389   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3390   %{
3391      Register sub_reg = as_Register($sub$$reg);
3392      Register super_reg = as_Register($super$$reg);
3393      Register temp_reg = as_Register($temp$$reg);
3394      Register result_reg = as_Register($result$$reg);
3395 
3396      Label miss;
3397      MacroAssembler _masm(&cbuf);
3398      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3399                                      NULL, &miss,
3400                                      /*set_cond_codes:*/ true);
3401      if ($primary) {
3402        __ mov(result_reg, zr);
3403      }
3404      __ bind(miss);
3405   %}
3406 
3407   enc_class aarch64_enc_java_static_call(method meth) %{
3408     MacroAssembler _masm(&cbuf);
3409 
3410     address addr = (address)$meth$$method;
3411     address call;
3412     if (!_method) {
3413       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3414       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3415     } else {
3416       int method_index = resolved_method_index(cbuf);
3417       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3418                                                   : static_call_Relocation::spec(method_index);
3419       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3420 
3421       // Emit stub for static call
3422       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3423       if (stub == NULL) {
3424         ciEnv::current()->record_failure("CodeCache is full");
3425         return;
3426       }
3427     }
3428     if (call == NULL) {
3429       ciEnv::current()->record_failure("CodeCache is full");
3430       return;
3431     }
3432   %}
3433 
3434   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3435     MacroAssembler _masm(&cbuf);
3436     int method_index = resolved_method_index(cbuf);
3437     address call = __ ic_call((address)$meth$$method, method_index);
3438     if (call == NULL) {
3439       ciEnv::current()->record_failure("CodeCache is full");
3440       return;
3441     }
3442   %}
3443 
3444   enc_class aarch64_enc_call_epilog() %{
3445     MacroAssembler _masm(&cbuf);
3446     if (VerifyStackAtCalls) {
3447       // Check that stack depth is unchanged: find majik cookie on stack
3448       __ call_Unimplemented();
3449     }
3450   %}
3451 
3452   enc_class aarch64_enc_java_to_runtime(method meth) %{
3453     MacroAssembler _masm(&cbuf);
3454 
3455     // some calls to generated routines (arraycopy code) are scheduled
3456     // by C2 as runtime calls. if so we can call them using a br (they
3457     // will be in a reachable segment) otherwise we have to use a blr
3458     // which loads the absolute address into a register.
3459     address entry = (address)$meth$$method;
3460     CodeBlob *cb = CodeCache::find_blob(entry);
3461     if (cb) {
3462       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3463       if (call == NULL) {
3464         ciEnv::current()->record_failure("CodeCache is full");
3465         return;
3466       }
3467     } else {
3468       Label retaddr;
3469       __ adr(rscratch2, retaddr);
3470       __ lea(rscratch1, RuntimeAddress(entry));
3471       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3472       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3473       __ blr(rscratch1);
3474       __ bind(retaddr);
3475       __ add(sp, sp, 2 * wordSize);
3476     }
3477   %}
3478 
3479   enc_class aarch64_enc_rethrow() %{
3480     MacroAssembler _masm(&cbuf);
3481     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3482   %}
3483 
3484   enc_class aarch64_enc_ret() %{
3485     MacroAssembler _masm(&cbuf);
3486     __ ret(lr);
3487   %}
3488 
3489   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3490     MacroAssembler _masm(&cbuf);
3491     Register target_reg = as_Register($jump_target$$reg);
3492     __ br(target_reg);
3493   %}
3494 
3495   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3496     MacroAssembler _masm(&cbuf);
3497     Register target_reg = as_Register($jump_target$$reg);
3498     // exception oop should be in r0
3499     // ret addr has been popped into lr
3500     // callee expects it in r3
3501     __ mov(r3, lr);
3502     __ br(target_reg);
3503   %}
3504 
3505   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3506     MacroAssembler _masm(&cbuf);
3507     Register oop = as_Register($object$$reg);
3508     Register box = as_Register($box$$reg);
3509     Register disp_hdr = as_Register($tmp$$reg);
3510     Register tmp = as_Register($tmp2$$reg);
3511     Label cont;
3512     Label object_has_monitor;
3513     Label cas_failed;
3514 
3515     assert_different_registers(oop, box, tmp, disp_hdr);
3516 
3517     // Load markWord from object into displaced_header.
3518     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3519 
3520     if (UseBiasedLocking && !UseOptoBiasInlining) {
3521       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3522     }
3523 
3524     // Check for existing monitor
3525     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3526 
3527     // Set tmp to be (markWord of object | UNLOCK_VALUE).
3528     __ orr(tmp, disp_hdr, markWord::unlocked_value);
3529 
3530     // Initialize the box. (Must happen before we update the object mark!)
3531     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3532 
3533     // Compare object markWord with an unlocked value (tmp) and if
3534     // equal exchange the stack address of our box with object markWord.
3535     // On failure disp_hdr contains the possibly locked markWord.
3536     __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
3537                /*release*/ true, /*weak*/ false, disp_hdr);
3538     __ br(Assembler::EQ, cont);
3539 
3540     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3541 
3542     // If the compare-and-exchange succeeded, then we found an unlocked
3543     // object, will have now locked it will continue at label cont
3544 
3545     __ bind(cas_failed);
3546     // We did not see an unlocked object so try the fast recursive case.
3547 
3548     // Check if the owner is self by comparing the value in the
3549     // markWord of object (disp_hdr) with the stack pointer.
3550     __ mov(rscratch1, sp);
3551     __ sub(disp_hdr, disp_hdr, rscratch1);
3552     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
3553     // If condition is true we are cont and hence we can store 0 as the
3554     // displaced header in the box, which indicates that it is a recursive lock.
3555     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
3556     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3557 
3558     __ b(cont);
3559 
3560     // Handle existing monitor.
3561     __ bind(object_has_monitor);
3562 
3563     // The object's monitor m is unlocked iff m->owner == NULL,
3564     // otherwise m->owner may contain a thread or a stack address.
3565     //
3566     // Try to CAS m->owner from NULL to current thread.
3567     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
3568     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
3569                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
3570 
3571     // Store a non-null value into the box to avoid looking like a re-entrant
3572     // lock. The fast-path monitor unlock code checks for
3573     // markWord::monitor_value so use markWord::unused_mark which has the
3574     // relevant bit set, and also matches ObjectSynchronizer::enter.
3575     __ mov(tmp, (address)markWord::unused_mark().value());
3576     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3577 
3578     __ bind(cont);
3579     // flag == EQ indicates success
3580     // flag == NE indicates failure
3581   %}
3582 
3583   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3584     MacroAssembler _masm(&cbuf);
3585     Register oop = as_Register($object$$reg);
3586     Register box = as_Register($box$$reg);
3587     Register disp_hdr = as_Register($tmp$$reg);
3588     Register tmp = as_Register($tmp2$$reg);
3589     Label cont;
3590     Label object_has_monitor;
3591 
3592     assert_different_registers(oop, box, tmp, disp_hdr);
3593 
3594     if (UseBiasedLocking && !UseOptoBiasInlining) {
3595       __ biased_locking_exit(oop, tmp, cont);
3596     }
3597 
3598     // Find the lock address and load the displaced header from the stack.
3599     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3600 
3601     // If the displaced header is 0, we have a recursive unlock.
3602     __ cmp(disp_hdr, zr);
3603     __ br(Assembler::EQ, cont);
3604 
3605     // Handle existing monitor.
3606     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3607     __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
3608 
3609     // Check if it is still a light weight lock, this is is true if we
3610     // see the stack address of the basicLock in the markWord of the
3611     // object.
3612 
3613     __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
3614                /*release*/ true, /*weak*/ false, tmp);
3615     __ b(cont);
3616 
3617     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3618 
3619     // Handle existing monitor.
3620     __ bind(object_has_monitor);
3621     STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
3622     __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
3623     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3624     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3625     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3626     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3627     __ cmp(rscratch1, zr); // Sets flags for result
3628     __ br(Assembler::NE, cont);
3629 
3630     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3631     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3632     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3633     __ cmp(rscratch1, zr); // Sets flags for result
3634     __ cbnz(rscratch1, cont);
3635     // need a release store here
3636     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3637     __ stlr(zr, tmp); // set unowned
3638 
3639     __ bind(cont);
3640     // flag == EQ indicates success
3641     // flag == NE indicates failure
3642   %}
3643 
3644 %}
3645 
3646 //----------FRAME--------------------------------------------------------------
3647 // Definition of frame structure and management information.
3648 //
3649 //  S T A C K   L A Y O U T    Allocators stack-slot number
3650 //                             |   (to get allocators register number
3651 //  G  Owned by    |        |  v    add OptoReg::stack0())
3652 //  r   CALLER     |        |
3653 //  o     |        +--------+      pad to even-align allocators stack-slot
3654 //  w     V        |  pad0  |        numbers; owned by CALLER
3655 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3656 //  h     ^        |   in   |  5
3657 //        |        |  args  |  4   Holes in incoming args owned by SELF
3658 //  |     |        |        |  3
3659 //  |     |        +--------+
3660 //  V     |        | old out|      Empty on Intel, window on Sparc
3661 //        |    old |preserve|      Must be even aligned.
3662 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3663 //        |        |   in   |  3   area for Intel ret address
3664 //     Owned by    |preserve|      Empty on Sparc.
3665 //       SELF      +--------+
3666 //        |        |  pad2  |  2   pad to align old SP
3667 //        |        +--------+  1
3668 //        |        | locks  |  0
3669 //        |        +--------+----> OptoReg::stack0(), even aligned
3670 //        |        |  pad1  | 11   pad to align new SP
3671 //        |        +--------+
3672 //        |        |        | 10
3673 //        |        | spills |  9   spills
3674 //        V        |        |  8   (pad0 slot for callee)
3675 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3676 //        ^        |  out   |  7
3677 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3678 //     Owned by    +--------+
3679 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3680 //        |    new |preserve|      Must be even-aligned.
3681 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3682 //        |        |        |
3683 //
3684 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3685 //         known from SELF's arguments and the Java calling convention.
3686 //         Region 6-7 is determined per call site.
3687 // Note 2: If the calling convention leaves holes in the incoming argument
3688 //         area, those holes are owned by SELF.  Holes in the outgoing area
3689 //         are owned by the CALLEE.  Holes should not be nessecary in the
3690 //         incoming area, as the Java calling convention is completely under
3691 //         the control of the AD file.  Doubles can be sorted and packed to
3692 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3693 //         varargs C calling conventions.
3694 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3695 //         even aligned with pad0 as needed.
3696 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3697 //           (the latter is true on Intel but is it false on AArch64?)
3698 //         region 6-11 is even aligned; it may be padded out more so that
3699 //         the region from SP to FP meets the minimum stack alignment.
3700 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3701 //         alignment.  Region 11, pad1, may be dynamically extended so that
3702 //         SP meets the minimum alignment.
3703 
3704 frame %{
3705   // What direction does stack grow in (assumed to be same for C & Java)
3706   stack_direction(TOWARDS_LOW);
3707 
3708   // These three registers define part of the calling convention
3709   // between compiled code and the interpreter.
3710 
3711   // Inline Cache Register or methodOop for I2C.
3712   inline_cache_reg(R12);
3713 
3714   // Method Oop Register when calling interpreter.
3715   interpreter_method_oop_reg(R12);
3716 
3717   // Number of stack slots consumed by locking an object
3718   sync_stack_slots(2);
3719 
3720   // Compiled code's Frame Pointer
3721   frame_pointer(R31);
3722 
3723   // Interpreter stores its frame pointer in a register which is
3724   // stored to the stack by I2CAdaptors.
3725   // I2CAdaptors convert from interpreted java to compiled java.
3726   interpreter_frame_pointer(R29);
3727 
3728   // Stack alignment requirement
3729   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3730 
3731   // Number of stack slots between incoming argument block and the start of
3732   // a new frame.  The PROLOG must add this many slots to the stack.  The
3733   // EPILOG must remove this many slots. aarch64 needs two slots for
3734   // return address and fp.
3735   // TODO think this is correct but check
3736   in_preserve_stack_slots(4);
3737 
3738   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3739   // for calls to C.  Supports the var-args backing area for register parms.
3740   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3741 
3742   // The after-PROLOG location of the return address.  Location of
3743   // return address specifies a type (REG or STACK) and a number
3744   // representing the register number (i.e. - use a register name) or
3745   // stack slot.
3746   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3747   // Otherwise, it is above the locks and verification slot and alignment word
3748   // TODO this may well be correct but need to check why that - 2 is there
3749   // ppc port uses 0 but we definitely need to allow for fixed_slots
3750   // which folds in the space used for monitors
3751   return_addr(STACK - 2 +
3752               align_up((Compile::current()->in_preserve_stack_slots() +
3753                         Compile::current()->fixed_slots()),
3754                        stack_alignment_in_slots()));
3755 
3756   // Body of function which returns an integer array locating
3757   // arguments either in registers or in stack slots.  Passed an array
3758   // of ideal registers called "sig" and a "length" count.  Stack-slot
3759   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3760   // arguments for a CALLEE.  Incoming stack arguments are
3761   // automatically biased by the preserve_stack_slots field above.
3762 
3763   calling_convention
3764   %{
3765     // No difference between ingoing/outgoing just pass false
3766     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3767   %}
3768 
3769   c_calling_convention
3770   %{
3771     // This is obviously always outgoing
3772     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3773   %}
3774 
3775   // Location of compiled Java return values.  Same as C for now.
3776   return_value
3777   %{
3778     // TODO do we allow ideal_reg == Op_RegN???
3779     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3780            "only return normal values");
3781 
3782     static const int lo[Op_RegL + 1] = { // enum name
3783       0,                                 // Op_Node
3784       0,                                 // Op_Set
3785       R0_num,                            // Op_RegN
3786       R0_num,                            // Op_RegI
3787       R0_num,                            // Op_RegP
3788       V0_num,                            // Op_RegF
3789       V0_num,                            // Op_RegD
3790       R0_num                             // Op_RegL
3791     };
3792 
3793     static const int hi[Op_RegL + 1] = { // enum name
3794       0,                                 // Op_Node
3795       0,                                 // Op_Set
3796       OptoReg::Bad,                      // Op_RegN
3797       OptoReg::Bad,                      // Op_RegI
3798       R0_H_num,                          // Op_RegP
3799       OptoReg::Bad,                      // Op_RegF
3800       V0_H_num,                          // Op_RegD
3801       R0_H_num                           // Op_RegL
3802     };
3803 
3804     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3805   %}
3806 %}
3807 
3808 //----------ATTRIBUTES---------------------------------------------------------
3809 //----------Operand Attributes-------------------------------------------------
3810 op_attrib op_cost(1);        // Required cost attribute
3811 
3812 //----------Instruction Attributes---------------------------------------------
3813 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3814 ins_attrib ins_size(32);        // Required size attribute (in bits)
3815 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3816                                 // a non-matching short branch variant
3817                                 // of some long branch?
3818 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3819                                 // be a power of 2) specifies the
3820                                 // alignment that some part of the
3821                                 // instruction (not necessarily the
3822                                 // start) requires.  If > 1, a
3823                                 // compute_padding() function must be
3824                                 // provided for the instruction
3825 
3826 //----------OPERANDS-----------------------------------------------------------
3827 // Operand definitions must precede instruction definitions for correct parsing
3828 // in the ADLC because operands constitute user defined types which are used in
3829 // instruction definitions.
3830 
3831 //----------Simple Operands----------------------------------------------------
3832 
3833 // Integer operands 32 bit
3834 // 32 bit immediate
3835 operand immI()
3836 %{
3837   match(ConI);
3838 
3839   op_cost(0);
3840   format %{ %}
3841   interface(CONST_INTER);
3842 %}
3843 
3844 // 32 bit zero
3845 operand immI0()
3846 %{
3847   predicate(n->get_int() == 0);
3848   match(ConI);
3849 
3850   op_cost(0);
3851   format %{ %}
3852   interface(CONST_INTER);
3853 %}
3854 
3855 // 32 bit unit increment
3856 operand immI_1()
3857 %{
3858   predicate(n->get_int() == 1);
3859   match(ConI);
3860 
3861   op_cost(0);
3862   format %{ %}
3863   interface(CONST_INTER);
3864 %}
3865 
3866 // 32 bit unit decrement
3867 operand immI_M1()
3868 %{
3869   predicate(n->get_int() == -1);
3870   match(ConI);
3871 
3872   op_cost(0);
3873   format %{ %}
3874   interface(CONST_INTER);
3875 %}
3876 
3877 // Shift values for add/sub extension shift
3878 operand immIExt()
3879 %{
3880   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3881   match(ConI);
3882 
3883   op_cost(0);
3884   format %{ %}
3885   interface(CONST_INTER);
3886 %}
3887 
3888 operand immI_le_4()
3889 %{
3890   predicate(n->get_int() <= 4);
3891   match(ConI);
3892 
3893   op_cost(0);
3894   format %{ %}
3895   interface(CONST_INTER);
3896 %}
3897 
3898 operand immI_31()
3899 %{
3900   predicate(n->get_int() == 31);
3901   match(ConI);
3902 
3903   op_cost(0);
3904   format %{ %}
3905   interface(CONST_INTER);
3906 %}
3907 
3908 operand immI_8()
3909 %{
3910   predicate(n->get_int() == 8);
3911   match(ConI);
3912 
3913   op_cost(0);
3914   format %{ %}
3915   interface(CONST_INTER);
3916 %}
3917 
3918 operand immI_16()
3919 %{
3920   predicate(n->get_int() == 16);
3921   match(ConI);
3922 
3923   op_cost(0);
3924   format %{ %}
3925   interface(CONST_INTER);
3926 %}
3927 
3928 operand immI_24()
3929 %{
3930   predicate(n->get_int() == 24);
3931   match(ConI);
3932 
3933   op_cost(0);
3934   format %{ %}
3935   interface(CONST_INTER);
3936 %}
3937 
3938 operand immI_32()
3939 %{
3940   predicate(n->get_int() == 32);
3941   match(ConI);
3942 
3943   op_cost(0);
3944   format %{ %}
3945   interface(CONST_INTER);
3946 %}
3947 
3948 operand immI_48()
3949 %{
3950   predicate(n->get_int() == 48);
3951   match(ConI);
3952 
3953   op_cost(0);
3954   format %{ %}
3955   interface(CONST_INTER);
3956 %}
3957 
3958 operand immI_56()
3959 %{
3960   predicate(n->get_int() == 56);
3961   match(ConI);
3962 
3963   op_cost(0);
3964   format %{ %}
3965   interface(CONST_INTER);
3966 %}
3967 
3968 operand immI_63()
3969 %{
3970   predicate(n->get_int() == 63);
3971   match(ConI);
3972 
3973   op_cost(0);
3974   format %{ %}
3975   interface(CONST_INTER);
3976 %}
3977 
3978 operand immI_64()
3979 %{
3980   predicate(n->get_int() == 64);
3981   match(ConI);
3982 
3983   op_cost(0);
3984   format %{ %}
3985   interface(CONST_INTER);
3986 %}
3987 
3988 operand immI_255()
3989 %{
3990   predicate(n->get_int() == 255);
3991   match(ConI);
3992 
3993   op_cost(0);
3994   format %{ %}
3995   interface(CONST_INTER);
3996 %}
3997 
3998 operand immI_65535()
3999 %{
4000   predicate(n->get_int() == 65535);
4001   match(ConI);
4002 
4003   op_cost(0);
4004   format %{ %}
4005   interface(CONST_INTER);
4006 %}
4007 
4008 operand immL_255()
4009 %{
4010   predicate(n->get_long() == 255L);
4011   match(ConL);
4012 
4013   op_cost(0);
4014   format %{ %}
4015   interface(CONST_INTER);
4016 %}
4017 
4018 operand immL_65535()
4019 %{
4020   predicate(n->get_long() == 65535L);
4021   match(ConL);
4022 
4023   op_cost(0);
4024   format %{ %}
4025   interface(CONST_INTER);
4026 %}
4027 
4028 operand immL_4294967295()
4029 %{
4030   predicate(n->get_long() == 4294967295L);
4031   match(ConL);
4032 
4033   op_cost(0);
4034   format %{ %}
4035   interface(CONST_INTER);
4036 %}
4037 
4038 operand immL_bitmask()
4039 %{
4040   predicate((n->get_long() != 0)
4041             && ((n->get_long() & 0xc000000000000000l) == 0)
4042             && is_power_of_2(n->get_long() + 1));
4043   match(ConL);
4044 
4045   op_cost(0);
4046   format %{ %}
4047   interface(CONST_INTER);
4048 %}
4049 
4050 operand immI_bitmask()
4051 %{
4052   predicate((n->get_int() != 0)
4053             && ((n->get_int() & 0xc0000000) == 0)
4054             && is_power_of_2(n->get_int() + 1));
4055   match(ConI);
4056 
4057   op_cost(0);
4058   format %{ %}
4059   interface(CONST_INTER);
4060 %}
4061 
4062 // Scale values for scaled offset addressing modes (up to long but not quad)
4063 operand immIScale()
4064 %{
4065   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4066   match(ConI);
4067 
4068   op_cost(0);
4069   format %{ %}
4070   interface(CONST_INTER);
4071 %}
4072 
4073 // 26 bit signed offset -- for pc-relative branches
4074 operand immI26()
4075 %{
4076   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4077   match(ConI);
4078 
4079   op_cost(0);
4080   format %{ %}
4081   interface(CONST_INTER);
4082 %}
4083 
4084 // 19 bit signed offset -- for pc-relative loads
4085 operand immI19()
4086 %{
4087   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4088   match(ConI);
4089 
4090   op_cost(0);
4091   format %{ %}
4092   interface(CONST_INTER);
4093 %}
4094 
4095 // 12 bit unsigned offset -- for base plus immediate loads
4096 operand immIU12()
4097 %{
4098   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4099   match(ConI);
4100 
4101   op_cost(0);
4102   format %{ %}
4103   interface(CONST_INTER);
4104 %}
4105 
4106 operand immLU12()
4107 %{
4108   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4109   match(ConL);
4110 
4111   op_cost(0);
4112   format %{ %}
4113   interface(CONST_INTER);
4114 %}
4115 
4116 // Offset for scaled or unscaled immediate loads and stores
4117 operand immIOffset()
4118 %{
4119   predicate(Address::offset_ok_for_immed(n->get_int()));
4120   match(ConI);
4121 
4122   op_cost(0);
4123   format %{ %}
4124   interface(CONST_INTER);
4125 %}
4126 
4127 operand immIOffset4()
4128 %{
4129   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4130   match(ConI);
4131 
4132   op_cost(0);
4133   format %{ %}
4134   interface(CONST_INTER);
4135 %}
4136 
4137 operand immIOffset8()
4138 %{
4139   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4140   match(ConI);
4141 
4142   op_cost(0);
4143   format %{ %}
4144   interface(CONST_INTER);
4145 %}
4146 
4147 operand immIOffset16()
4148 %{
4149   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4150   match(ConI);
4151 
4152   op_cost(0);
4153   format %{ %}
4154   interface(CONST_INTER);
4155 %}
4156 
4157 operand immLoffset()
4158 %{
4159   predicate(Address::offset_ok_for_immed(n->get_long()));
4160   match(ConL);
4161 
4162   op_cost(0);
4163   format %{ %}
4164   interface(CONST_INTER);
4165 %}
4166 
4167 operand immLoffset4()
4168 %{
4169   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4170   match(ConL);
4171 
4172   op_cost(0);
4173   format %{ %}
4174   interface(CONST_INTER);
4175 %}
4176 
4177 operand immLoffset8()
4178 %{
4179   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4180   match(ConL);
4181 
4182   op_cost(0);
4183   format %{ %}
4184   interface(CONST_INTER);
4185 %}
4186 
4187 operand immLoffset16()
4188 %{
4189   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4190   match(ConL);
4191 
4192   op_cost(0);
4193   format %{ %}
4194   interface(CONST_INTER);
4195 %}
4196 
4197 // 32 bit integer valid for add sub immediate
4198 operand immIAddSub()
4199 %{
4200   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4201   match(ConI);
4202   op_cost(0);
4203   format %{ %}
4204   interface(CONST_INTER);
4205 %}
4206 
4207 // 32 bit unsigned integer valid for logical immediate
4208 // TODO -- check this is right when e.g the mask is 0x80000000
4209 operand immILog()
4210 %{
4211   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4212   match(ConI);
4213 
4214   op_cost(0);
4215   format %{ %}
4216   interface(CONST_INTER);
4217 %}
4218 
4219 // Integer operands 64 bit
4220 // 64 bit immediate
4221 operand immL()
4222 %{
4223   match(ConL);
4224 
4225   op_cost(0);
4226   format %{ %}
4227   interface(CONST_INTER);
4228 %}
4229 
4230 // 64 bit zero
4231 operand immL0()
4232 %{
4233   predicate(n->get_long() == 0);
4234   match(ConL);
4235 
4236   op_cost(0);
4237   format %{ %}
4238   interface(CONST_INTER);
4239 %}
4240 
4241 // 64 bit unit increment
4242 operand immL_1()
4243 %{
4244   predicate(n->get_long() == 1);
4245   match(ConL);
4246 
4247   op_cost(0);
4248   format %{ %}
4249   interface(CONST_INTER);
4250 %}
4251 
4252 // 64 bit unit decrement
4253 operand immL_M1()
4254 %{
4255   predicate(n->get_long() == -1);
4256   match(ConL);
4257 
4258   op_cost(0);
4259   format %{ %}
4260   interface(CONST_INTER);
4261 %}
4262 
4263 // 32 bit offset of pc in thread anchor
4264 
4265 operand immL_pc_off()
4266 %{
4267   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4268                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4269   match(ConL);
4270 
4271   op_cost(0);
4272   format %{ %}
4273   interface(CONST_INTER);
4274 %}
4275 
4276 // 64 bit integer valid for add sub immediate
4277 operand immLAddSub()
4278 %{
4279   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4280   match(ConL);
4281   op_cost(0);
4282   format %{ %}
4283   interface(CONST_INTER);
4284 %}
4285 
4286 // 64 bit integer valid for logical immediate
4287 operand immLLog()
4288 %{
4289   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4290   match(ConL);
4291   op_cost(0);
4292   format %{ %}
4293   interface(CONST_INTER);
4294 %}
4295 
4296 // Long Immediate: low 32-bit mask
4297 operand immL_32bits()
4298 %{
4299   predicate(n->get_long() == 0xFFFFFFFFL);
4300   match(ConL);
4301   op_cost(0);
4302   format %{ %}
4303   interface(CONST_INTER);
4304 %}
4305 
4306 // Pointer operands
4307 // Pointer Immediate
4308 operand immP()
4309 %{
4310   match(ConP);
4311 
4312   op_cost(0);
4313   format %{ %}
4314   interface(CONST_INTER);
4315 %}
4316 
4317 // NULL Pointer Immediate
4318 operand immP0()
4319 %{
4320   predicate(n->get_ptr() == 0);
4321   match(ConP);
4322 
4323   op_cost(0);
4324   format %{ %}
4325   interface(CONST_INTER);
4326 %}
4327 
4328 // Pointer Immediate One
4329 // this is used in object initialization (initial object header)
4330 operand immP_1()
4331 %{
4332   predicate(n->get_ptr() == 1);
4333   match(ConP);
4334 
4335   op_cost(0);
4336   format %{ %}
4337   interface(CONST_INTER);
4338 %}
4339 
4340 // Polling Page Pointer Immediate
4341 operand immPollPage()
4342 %{
4343   predicate((address)n->get_ptr() == os::get_polling_page());
4344   match(ConP);
4345 
4346   op_cost(0);
4347   format %{ %}
4348   interface(CONST_INTER);
4349 %}
4350 
4351 // Card Table Byte Map Base
4352 operand immByteMapBase()
4353 %{
4354   // Get base of card map
4355   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4356             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4357   match(ConP);
4358 
4359   op_cost(0);
4360   format %{ %}
4361   interface(CONST_INTER);
4362 %}
4363 
4364 // Pointer Immediate Minus One
4365 // this is used when we want to write the current PC to the thread anchor
4366 operand immP_M1()
4367 %{
4368   predicate(n->get_ptr() == -1);
4369   match(ConP);
4370 
4371   op_cost(0);
4372   format %{ %}
4373   interface(CONST_INTER);
4374 %}
4375 
4376 // Pointer Immediate Minus Two
4377 // this is used when we want to write the current PC to the thread anchor
4378 operand immP_M2()
4379 %{
4380   predicate(n->get_ptr() == -2);
4381   match(ConP);
4382 
4383   op_cost(0);
4384   format %{ %}
4385   interface(CONST_INTER);
4386 %}
4387 
4388 // Float and Double operands
4389 // Double Immediate
4390 operand immD()
4391 %{
4392   match(ConD);
4393   op_cost(0);
4394   format %{ %}
4395   interface(CONST_INTER);
4396 %}
4397 
4398 // Double Immediate: +0.0d
4399 operand immD0()
4400 %{
4401   predicate(jlong_cast(n->getd()) == 0);
4402   match(ConD);
4403 
4404   op_cost(0);
4405   format %{ %}
4406   interface(CONST_INTER);
4407 %}
4408 
4409 // constant 'double +0.0'.
4410 operand immDPacked()
4411 %{
4412   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4413   match(ConD);
4414   op_cost(0);
4415   format %{ %}
4416   interface(CONST_INTER);
4417 %}
4418 
4419 // Float Immediate
4420 operand immF()
4421 %{
4422   match(ConF);
4423   op_cost(0);
4424   format %{ %}
4425   interface(CONST_INTER);
4426 %}
4427 
4428 // Float Immediate: +0.0f.
4429 operand immF0()
4430 %{
4431   predicate(jint_cast(n->getf()) == 0);
4432   match(ConF);
4433 
4434   op_cost(0);
4435   format %{ %}
4436   interface(CONST_INTER);
4437 %}
4438 
4439 //
4440 operand immFPacked()
4441 %{
4442   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4443   match(ConF);
4444   op_cost(0);
4445   format %{ %}
4446   interface(CONST_INTER);
4447 %}
4448 
4449 // Narrow pointer operands
4450 // Narrow Pointer Immediate
4451 operand immN()
4452 %{
4453   match(ConN);
4454 
4455   op_cost(0);
4456   format %{ %}
4457   interface(CONST_INTER);
4458 %}
4459 
4460 // Narrow NULL Pointer Immediate
4461 operand immN0()
4462 %{
4463   predicate(n->get_narrowcon() == 0);
4464   match(ConN);
4465 
4466   op_cost(0);
4467   format %{ %}
4468   interface(CONST_INTER);
4469 %}
4470 
4471 operand immNKlass()
4472 %{
4473   match(ConNKlass);
4474 
4475   op_cost(0);
4476   format %{ %}
4477   interface(CONST_INTER);
4478 %}
4479 
4480 // Integer 32 bit Register Operands
4481 // Integer 32 bitRegister (excludes SP)
4482 operand iRegI()
4483 %{
4484   constraint(ALLOC_IN_RC(any_reg32));
4485   match(RegI);
4486   match(iRegINoSp);
4487   op_cost(0);
4488   format %{ %}
4489   interface(REG_INTER);
4490 %}
4491 
4492 // Integer 32 bit Register not Special
4493 operand iRegINoSp()
4494 %{
4495   constraint(ALLOC_IN_RC(no_special_reg32));
4496   match(RegI);
4497   op_cost(0);
4498   format %{ %}
4499   interface(REG_INTER);
4500 %}
4501 
4502 // Integer 64 bit Register Operands
4503 // Integer 64 bit Register (includes SP)
4504 operand iRegL()
4505 %{
4506   constraint(ALLOC_IN_RC(any_reg));
4507   match(RegL);
4508   match(iRegLNoSp);
4509   op_cost(0);
4510   format %{ %}
4511   interface(REG_INTER);
4512 %}
4513 
4514 // Integer 64 bit Register not Special
4515 operand iRegLNoSp()
4516 %{
4517   constraint(ALLOC_IN_RC(no_special_reg));
4518   match(RegL);
4519   match(iRegL_R0);
4520   format %{ %}
4521   interface(REG_INTER);
4522 %}
4523 
4524 // Pointer Register Operands
4525 // Pointer Register
4526 operand iRegP()
4527 %{
4528   constraint(ALLOC_IN_RC(ptr_reg));
4529   match(RegP);
4530   match(iRegPNoSp);
4531   match(iRegP_R0);
4532   //match(iRegP_R2);
4533   //match(iRegP_R4);
4534   //match(iRegP_R5);
4535   match(thread_RegP);
4536   op_cost(0);
4537   format %{ %}
4538   interface(REG_INTER);
4539 %}
4540 
4541 // Pointer 64 bit Register not Special
4542 operand iRegPNoSp()
4543 %{
4544   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4545   match(RegP);
4546   // match(iRegP);
4547   // match(iRegP_R0);
4548   // match(iRegP_R2);
4549   // match(iRegP_R4);
4550   // match(iRegP_R5);
4551   // match(thread_RegP);
4552   op_cost(0);
4553   format %{ %}
4554   interface(REG_INTER);
4555 %}
4556 
4557 // Pointer 64 bit Register R0 only
4558 operand iRegP_R0()
4559 %{
4560   constraint(ALLOC_IN_RC(r0_reg));
4561   match(RegP);
4562   // match(iRegP);
4563   match(iRegPNoSp);
4564   op_cost(0);
4565   format %{ %}
4566   interface(REG_INTER);
4567 %}
4568 
4569 // Pointer 64 bit Register R1 only
4570 operand iRegP_R1()
4571 %{
4572   constraint(ALLOC_IN_RC(r1_reg));
4573   match(RegP);
4574   // match(iRegP);
4575   match(iRegPNoSp);
4576   op_cost(0);
4577   format %{ %}
4578   interface(REG_INTER);
4579 %}
4580 
4581 // Pointer 64 bit Register R2 only
4582 operand iRegP_R2()
4583 %{
4584   constraint(ALLOC_IN_RC(r2_reg));
4585   match(RegP);
4586   // match(iRegP);
4587   match(iRegPNoSp);
4588   op_cost(0);
4589   format %{ %}
4590   interface(REG_INTER);
4591 %}
4592 
4593 // Pointer 64 bit Register R3 only
4594 operand iRegP_R3()
4595 %{
4596   constraint(ALLOC_IN_RC(r3_reg));
4597   match(RegP);
4598   // match(iRegP);
4599   match(iRegPNoSp);
4600   op_cost(0);
4601   format %{ %}
4602   interface(REG_INTER);
4603 %}
4604 
4605 // Pointer 64 bit Register R4 only
4606 operand iRegP_R4()
4607 %{
4608   constraint(ALLOC_IN_RC(r4_reg));
4609   match(RegP);
4610   // match(iRegP);
4611   match(iRegPNoSp);
4612   op_cost(0);
4613   format %{ %}
4614   interface(REG_INTER);
4615 %}
4616 
4617 // Pointer 64 bit Register R5 only
4618 operand iRegP_R5()
4619 %{
4620   constraint(ALLOC_IN_RC(r5_reg));
4621   match(RegP);
4622   // match(iRegP);
4623   match(iRegPNoSp);
4624   op_cost(0);
4625   format %{ %}
4626   interface(REG_INTER);
4627 %}
4628 
4629 // Pointer 64 bit Register R10 only
4630 operand iRegP_R10()
4631 %{
4632   constraint(ALLOC_IN_RC(r10_reg));
4633   match(RegP);
4634   // match(iRegP);
4635   match(iRegPNoSp);
4636   op_cost(0);
4637   format %{ %}
4638   interface(REG_INTER);
4639 %}
4640 
4641 // Long 64 bit Register R0 only
4642 operand iRegL_R0()
4643 %{
4644   constraint(ALLOC_IN_RC(r0_reg));
4645   match(RegL);
4646   match(iRegLNoSp);
4647   op_cost(0);
4648   format %{ %}
4649   interface(REG_INTER);
4650 %}
4651 
4652 // Long 64 bit Register R2 only
4653 operand iRegL_R2()
4654 %{
4655   constraint(ALLOC_IN_RC(r2_reg));
4656   match(RegL);
4657   match(iRegLNoSp);
4658   op_cost(0);
4659   format %{ %}
4660   interface(REG_INTER);
4661 %}
4662 
4663 // Long 64 bit Register R3 only
4664 operand iRegL_R3()
4665 %{
4666   constraint(ALLOC_IN_RC(r3_reg));
4667   match(RegL);
4668   match(iRegLNoSp);
4669   op_cost(0);
4670   format %{ %}
4671   interface(REG_INTER);
4672 %}
4673 
4674 // Long 64 bit Register R11 only
4675 operand iRegL_R11()
4676 %{
4677   constraint(ALLOC_IN_RC(r11_reg));
4678   match(RegL);
4679   match(iRegLNoSp);
4680   op_cost(0);
4681   format %{ %}
4682   interface(REG_INTER);
4683 %}
4684 
4685 // Pointer 64 bit Register FP only
4686 operand iRegP_FP()
4687 %{
4688   constraint(ALLOC_IN_RC(fp_reg));
4689   match(RegP);
4690   // match(iRegP);
4691   op_cost(0);
4692   format %{ %}
4693   interface(REG_INTER);
4694 %}
4695 
4696 // Register R0 only
4697 operand iRegI_R0()
4698 %{
4699   constraint(ALLOC_IN_RC(int_r0_reg));
4700   match(RegI);
4701   match(iRegINoSp);
4702   op_cost(0);
4703   format %{ %}
4704   interface(REG_INTER);
4705 %}
4706 
4707 // Register R2 only
4708 operand iRegI_R2()
4709 %{
4710   constraint(ALLOC_IN_RC(int_r2_reg));
4711   match(RegI);
4712   match(iRegINoSp);
4713   op_cost(0);
4714   format %{ %}
4715   interface(REG_INTER);
4716 %}
4717 
4718 // Register R3 only
4719 operand iRegI_R3()
4720 %{
4721   constraint(ALLOC_IN_RC(int_r3_reg));
4722   match(RegI);
4723   match(iRegINoSp);
4724   op_cost(0);
4725   format %{ %}
4726   interface(REG_INTER);
4727 %}
4728 
4729 
4730 // Register R4 only
4731 operand iRegI_R4()
4732 %{
4733   constraint(ALLOC_IN_RC(int_r4_reg));
4734   match(RegI);
4735   match(iRegINoSp);
4736   op_cost(0);
4737   format %{ %}
4738   interface(REG_INTER);
4739 %}
4740 
4741 
4742 // Pointer Register Operands
4743 // Narrow Pointer Register
4744 operand iRegN()
4745 %{
4746   constraint(ALLOC_IN_RC(any_reg32));
4747   match(RegN);
4748   match(iRegNNoSp);
4749   op_cost(0);
4750   format %{ %}
4751   interface(REG_INTER);
4752 %}
4753 
4754 operand iRegN_R0()
4755 %{
4756   constraint(ALLOC_IN_RC(r0_reg));
4757   match(iRegN);
4758   op_cost(0);
4759   format %{ %}
4760   interface(REG_INTER);
4761 %}
4762 
4763 operand iRegN_R2()
4764 %{
4765   constraint(ALLOC_IN_RC(r2_reg));
4766   match(iRegN);
4767   op_cost(0);
4768   format %{ %}
4769   interface(REG_INTER);
4770 %}
4771 
4772 operand iRegN_R3()
4773 %{
4774   constraint(ALLOC_IN_RC(r3_reg));
4775   match(iRegN);
4776   op_cost(0);
4777   format %{ %}
4778   interface(REG_INTER);
4779 %}
4780 
4781 // Integer 64 bit Register not Special
4782 operand iRegNNoSp()
4783 %{
4784   constraint(ALLOC_IN_RC(no_special_reg32));
4785   match(RegN);
4786   op_cost(0);
4787   format %{ %}
4788   interface(REG_INTER);
4789 %}
4790 
4791 // heap base register -- used for encoding immN0
4792 
4793 operand iRegIHeapbase()
4794 %{
4795   constraint(ALLOC_IN_RC(heapbase_reg));
4796   match(RegI);
4797   op_cost(0);
4798   format %{ %}
4799   interface(REG_INTER);
4800 %}
4801 
4802 // Float Register
4803 // Float register operands
4804 operand vRegF()
4805 %{
4806   constraint(ALLOC_IN_RC(float_reg));
4807   match(RegF);
4808 
4809   op_cost(0);
4810   format %{ %}
4811   interface(REG_INTER);
4812 %}
4813 
4814 // Double Register
4815 // Double register operands
4816 operand vRegD()
4817 %{
4818   constraint(ALLOC_IN_RC(double_reg));
4819   match(RegD);
4820 
4821   op_cost(0);
4822   format %{ %}
4823   interface(REG_INTER);
4824 %}
4825 
4826 operand vecD()
4827 %{
4828   constraint(ALLOC_IN_RC(vectord_reg));
4829   match(VecD);
4830 
4831   op_cost(0);
4832   format %{ %}
4833   interface(REG_INTER);
4834 %}
4835 
4836 operand vecX()
4837 %{
4838   constraint(ALLOC_IN_RC(vectorx_reg));
4839   match(VecX);
4840 
4841   op_cost(0);
4842   format %{ %}
4843   interface(REG_INTER);
4844 %}
4845 
4846 operand vRegD_V0()
4847 %{
4848   constraint(ALLOC_IN_RC(v0_reg));
4849   match(RegD);
4850   op_cost(0);
4851   format %{ %}
4852   interface(REG_INTER);
4853 %}
4854 
4855 operand vRegD_V1()
4856 %{
4857   constraint(ALLOC_IN_RC(v1_reg));
4858   match(RegD);
4859   op_cost(0);
4860   format %{ %}
4861   interface(REG_INTER);
4862 %}
4863 
4864 operand vRegD_V2()
4865 %{
4866   constraint(ALLOC_IN_RC(v2_reg));
4867   match(RegD);
4868   op_cost(0);
4869   format %{ %}
4870   interface(REG_INTER);
4871 %}
4872 
4873 operand vRegD_V3()
4874 %{
4875   constraint(ALLOC_IN_RC(v3_reg));
4876   match(RegD);
4877   op_cost(0);
4878   format %{ %}
4879   interface(REG_INTER);
4880 %}
4881 
4882 operand vRegD_V4()
4883 %{
4884   constraint(ALLOC_IN_RC(v4_reg));
4885   match(RegD);
4886   op_cost(0);
4887   format %{ %}
4888   interface(REG_INTER);
4889 %}
4890 
4891 operand vRegD_V5()
4892 %{
4893   constraint(ALLOC_IN_RC(v5_reg));
4894   match(RegD);
4895   op_cost(0);
4896   format %{ %}
4897   interface(REG_INTER);
4898 %}
4899 
4900 operand vRegD_V6()
4901 %{
4902   constraint(ALLOC_IN_RC(v6_reg));
4903   match(RegD);
4904   op_cost(0);
4905   format %{ %}
4906   interface(REG_INTER);
4907 %}
4908 
4909 operand vRegD_V7()
4910 %{
4911   constraint(ALLOC_IN_RC(v7_reg));
4912   match(RegD);
4913   op_cost(0);
4914   format %{ %}
4915   interface(REG_INTER);
4916 %}
4917 
4918 operand vRegD_V8()
4919 %{
4920   constraint(ALLOC_IN_RC(v8_reg));
4921   match(RegD);
4922   op_cost(0);
4923   format %{ %}
4924   interface(REG_INTER);
4925 %}
4926 
4927 operand vRegD_V9()
4928 %{
4929   constraint(ALLOC_IN_RC(v9_reg));
4930   match(RegD);
4931   op_cost(0);
4932   format %{ %}
4933   interface(REG_INTER);
4934 %}
4935 
4936 operand vRegD_V10()
4937 %{
4938   constraint(ALLOC_IN_RC(v10_reg));
4939   match(RegD);
4940   op_cost(0);
4941   format %{ %}
4942   interface(REG_INTER);
4943 %}
4944 
4945 operand vRegD_V11()
4946 %{
4947   constraint(ALLOC_IN_RC(v11_reg));
4948   match(RegD);
4949   op_cost(0);
4950   format %{ %}
4951   interface(REG_INTER);
4952 %}
4953 
4954 operand vRegD_V12()
4955 %{
4956   constraint(ALLOC_IN_RC(v12_reg));
4957   match(RegD);
4958   op_cost(0);
4959   format %{ %}
4960   interface(REG_INTER);
4961 %}
4962 
4963 operand vRegD_V13()
4964 %{
4965   constraint(ALLOC_IN_RC(v13_reg));
4966   match(RegD);
4967   op_cost(0);
4968   format %{ %}
4969   interface(REG_INTER);
4970 %}
4971 
4972 operand vRegD_V14()
4973 %{
4974   constraint(ALLOC_IN_RC(v14_reg));
4975   match(RegD);
4976   op_cost(0);
4977   format %{ %}
4978   interface(REG_INTER);
4979 %}
4980 
4981 operand vRegD_V15()
4982 %{
4983   constraint(ALLOC_IN_RC(v15_reg));
4984   match(RegD);
4985   op_cost(0);
4986   format %{ %}
4987   interface(REG_INTER);
4988 %}
4989 
4990 operand vRegD_V16()
4991 %{
4992   constraint(ALLOC_IN_RC(v16_reg));
4993   match(RegD);
4994   op_cost(0);
4995   format %{ %}
4996   interface(REG_INTER);
4997 %}
4998 
4999 operand vRegD_V17()
5000 %{
5001   constraint(ALLOC_IN_RC(v17_reg));
5002   match(RegD);
5003   op_cost(0);
5004   format %{ %}
5005   interface(REG_INTER);
5006 %}
5007 
5008 operand vRegD_V18()
5009 %{
5010   constraint(ALLOC_IN_RC(v18_reg));
5011   match(RegD);
5012   op_cost(0);
5013   format %{ %}
5014   interface(REG_INTER);
5015 %}
5016 
5017 operand vRegD_V19()
5018 %{
5019   constraint(ALLOC_IN_RC(v19_reg));
5020   match(RegD);
5021   op_cost(0);
5022   format %{ %}
5023   interface(REG_INTER);
5024 %}
5025 
5026 operand vRegD_V20()
5027 %{
5028   constraint(ALLOC_IN_RC(v20_reg));
5029   match(RegD);
5030   op_cost(0);
5031   format %{ %}
5032   interface(REG_INTER);
5033 %}
5034 
5035 operand vRegD_V21()
5036 %{
5037   constraint(ALLOC_IN_RC(v21_reg));
5038   match(RegD);
5039   op_cost(0);
5040   format %{ %}
5041   interface(REG_INTER);
5042 %}
5043 
5044 operand vRegD_V22()
5045 %{
5046   constraint(ALLOC_IN_RC(v22_reg));
5047   match(RegD);
5048   op_cost(0);
5049   format %{ %}
5050   interface(REG_INTER);
5051 %}
5052 
5053 operand vRegD_V23()
5054 %{
5055   constraint(ALLOC_IN_RC(v23_reg));
5056   match(RegD);
5057   op_cost(0);
5058   format %{ %}
5059   interface(REG_INTER);
5060 %}
5061 
5062 operand vRegD_V24()
5063 %{
5064   constraint(ALLOC_IN_RC(v24_reg));
5065   match(RegD);
5066   op_cost(0);
5067   format %{ %}
5068   interface(REG_INTER);
5069 %}
5070 
5071 operand vRegD_V25()
5072 %{
5073   constraint(ALLOC_IN_RC(v25_reg));
5074   match(RegD);
5075   op_cost(0);
5076   format %{ %}
5077   interface(REG_INTER);
5078 %}
5079 
5080 operand vRegD_V26()
5081 %{
5082   constraint(ALLOC_IN_RC(v26_reg));
5083   match(RegD);
5084   op_cost(0);
5085   format %{ %}
5086   interface(REG_INTER);
5087 %}
5088 
5089 operand vRegD_V27()
5090 %{
5091   constraint(ALLOC_IN_RC(v27_reg));
5092   match(RegD);
5093   op_cost(0);
5094   format %{ %}
5095   interface(REG_INTER);
5096 %}
5097 
5098 operand vRegD_V28()
5099 %{
5100   constraint(ALLOC_IN_RC(v28_reg));
5101   match(RegD);
5102   op_cost(0);
5103   format %{ %}
5104   interface(REG_INTER);
5105 %}
5106 
5107 operand vRegD_V29()
5108 %{
5109   constraint(ALLOC_IN_RC(v29_reg));
5110   match(RegD);
5111   op_cost(0);
5112   format %{ %}
5113   interface(REG_INTER);
5114 %}
5115 
5116 operand vRegD_V30()
5117 %{
5118   constraint(ALLOC_IN_RC(v30_reg));
5119   match(RegD);
5120   op_cost(0);
5121   format %{ %}
5122   interface(REG_INTER);
5123 %}
5124 
5125 operand vRegD_V31()
5126 %{
5127   constraint(ALLOC_IN_RC(v31_reg));
5128   match(RegD);
5129   op_cost(0);
5130   format %{ %}
5131   interface(REG_INTER);
5132 %}
5133 
5134 // Flags register, used as output of signed compare instructions
5135 
5136 // note that on AArch64 we also use this register as the output for
5137 // for floating point compare instructions (CmpF CmpD). this ensures
5138 // that ordered inequality tests use GT, GE, LT or LE none of which
5139 // pass through cases where the result is unordered i.e. one or both
5140 // inputs to the compare is a NaN. this means that the ideal code can
5141 // replace e.g. a GT with an LE and not end up capturing the NaN case
5142 // (where the comparison should always fail). EQ and NE tests are
5143 // always generated in ideal code so that unordered folds into the NE
5144 // case, matching the behaviour of AArch64 NE.
5145 //
5146 // This differs from x86 where the outputs of FP compares use a
5147 // special FP flags registers and where compares based on this
5148 // register are distinguished into ordered inequalities (cmpOpUCF) and
5149 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5150 // to explicitly handle the unordered case in branches. x86 also has
5151 // to include extra CMoveX rules to accept a cmpOpUCF input.
5152 
5153 operand rFlagsReg()
5154 %{
5155   constraint(ALLOC_IN_RC(int_flags));
5156   match(RegFlags);
5157 
5158   op_cost(0);
5159   format %{ "RFLAGS" %}
5160   interface(REG_INTER);
5161 %}
5162 
5163 // Flags register, used as output of unsigned compare instructions
5164 operand rFlagsRegU()
5165 %{
5166   constraint(ALLOC_IN_RC(int_flags));
5167   match(RegFlags);
5168 
5169   op_cost(0);
5170   format %{ "RFLAGSU" %}
5171   interface(REG_INTER);
5172 %}
5173 
5174 // Special Registers
5175 
5176 // Method Register
5177 operand inline_cache_RegP(iRegP reg)
5178 %{
5179   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5180   match(reg);
5181   match(iRegPNoSp);
5182   op_cost(0);
5183   format %{ %}
5184   interface(REG_INTER);
5185 %}
5186 
5187 operand interpreter_method_oop_RegP(iRegP reg)
5188 %{
5189   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5190   match(reg);
5191   match(iRegPNoSp);
5192   op_cost(0);
5193   format %{ %}
5194   interface(REG_INTER);
5195 %}
5196 
5197 // Thread Register
5198 operand thread_RegP(iRegP reg)
5199 %{
5200   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5201   match(reg);
5202   op_cost(0);
5203   format %{ %}
5204   interface(REG_INTER);
5205 %}
5206 
5207 operand lr_RegP(iRegP reg)
5208 %{
5209   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5210   match(reg);
5211   op_cost(0);
5212   format %{ %}
5213   interface(REG_INTER);
5214 %}
5215 
5216 //----------Memory Operands----------------------------------------------------
5217 
5218 operand indirect(iRegP reg)
5219 %{
5220   constraint(ALLOC_IN_RC(ptr_reg));
5221   match(reg);
5222   op_cost(0);
5223   format %{ "[$reg]" %}
5224   interface(MEMORY_INTER) %{
5225     base($reg);
5226     index(0xffffffff);
5227     scale(0x0);
5228     disp(0x0);
5229   %}
5230 %}
5231 
5232 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5233 %{
5234   constraint(ALLOC_IN_RC(ptr_reg));
5235   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5236   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5237   op_cost(0);
5238   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5239   interface(MEMORY_INTER) %{
5240     base($reg);
5241     index($ireg);
5242     scale($scale);
5243     disp(0x0);
5244   %}
5245 %}
5246 
5247 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5248 %{
5249   constraint(ALLOC_IN_RC(ptr_reg));
5250   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5251   match(AddP reg (LShiftL lreg scale));
5252   op_cost(0);
5253   format %{ "$reg, $lreg lsl($scale)" %}
5254   interface(MEMORY_INTER) %{
5255     base($reg);
5256     index($lreg);
5257     scale($scale);
5258     disp(0x0);
5259   %}
5260 %}
5261 
5262 operand indIndexI2L(iRegP reg, iRegI ireg)
5263 %{
5264   constraint(ALLOC_IN_RC(ptr_reg));
5265   match(AddP reg (ConvI2L ireg));
5266   op_cost(0);
5267   format %{ "$reg, $ireg, 0, I2L" %}
5268   interface(MEMORY_INTER) %{
5269     base($reg);
5270     index($ireg);
5271     scale(0x0);
5272     disp(0x0);
5273   %}
5274 %}
5275 
5276 operand indIndex(iRegP reg, iRegL lreg)
5277 %{
5278   constraint(ALLOC_IN_RC(ptr_reg));
5279   match(AddP reg lreg);
5280   op_cost(0);
5281   format %{ "$reg, $lreg" %}
5282   interface(MEMORY_INTER) %{
5283     base($reg);
5284     index($lreg);
5285     scale(0x0);
5286     disp(0x0);
5287   %}
5288 %}
5289 
5290 operand indOffI(iRegP reg, immIOffset off)
5291 %{
5292   constraint(ALLOC_IN_RC(ptr_reg));
5293   match(AddP reg off);
5294   op_cost(0);
5295   format %{ "[$reg, $off]" %}
5296   interface(MEMORY_INTER) %{
5297     base($reg);
5298     index(0xffffffff);
5299     scale(0x0);
5300     disp($off);
5301   %}
5302 %}
5303 
5304 operand indOffI4(iRegP reg, immIOffset4 off)
5305 %{
5306   constraint(ALLOC_IN_RC(ptr_reg));
5307   match(AddP reg off);
5308   op_cost(0);
5309   format %{ "[$reg, $off]" %}
5310   interface(MEMORY_INTER) %{
5311     base($reg);
5312     index(0xffffffff);
5313     scale(0x0);
5314     disp($off);
5315   %}
5316 %}
5317 
5318 operand indOffI8(iRegP reg, immIOffset8 off)
5319 %{
5320   constraint(ALLOC_IN_RC(ptr_reg));
5321   match(AddP reg off);
5322   op_cost(0);
5323   format %{ "[$reg, $off]" %}
5324   interface(MEMORY_INTER) %{
5325     base($reg);
5326     index(0xffffffff);
5327     scale(0x0);
5328     disp($off);
5329   %}
5330 %}
5331 
5332 operand indOffI16(iRegP reg, immIOffset16 off)
5333 %{
5334   constraint(ALLOC_IN_RC(ptr_reg));
5335   match(AddP reg off);
5336   op_cost(0);
5337   format %{ "[$reg, $off]" %}
5338   interface(MEMORY_INTER) %{
5339     base($reg);
5340     index(0xffffffff);
5341     scale(0x0);
5342     disp($off);
5343   %}
5344 %}
5345 
5346 operand indOffL(iRegP reg, immLoffset off)
5347 %{
5348   constraint(ALLOC_IN_RC(ptr_reg));
5349   match(AddP reg off);
5350   op_cost(0);
5351   format %{ "[$reg, $off]" %}
5352   interface(MEMORY_INTER) %{
5353     base($reg);
5354     index(0xffffffff);
5355     scale(0x0);
5356     disp($off);
5357   %}
5358 %}
5359 
5360 operand indOffL4(iRegP reg, immLoffset4 off)
5361 %{
5362   constraint(ALLOC_IN_RC(ptr_reg));
5363   match(AddP reg off);
5364   op_cost(0);
5365   format %{ "[$reg, $off]" %}
5366   interface(MEMORY_INTER) %{
5367     base($reg);
5368     index(0xffffffff);
5369     scale(0x0);
5370     disp($off);
5371   %}
5372 %}
5373 
5374 operand indOffL8(iRegP reg, immLoffset8 off)
5375 %{
5376   constraint(ALLOC_IN_RC(ptr_reg));
5377   match(AddP reg off);
5378   op_cost(0);
5379   format %{ "[$reg, $off]" %}
5380   interface(MEMORY_INTER) %{
5381     base($reg);
5382     index(0xffffffff);
5383     scale(0x0);
5384     disp($off);
5385   %}
5386 %}
5387 
5388 operand indOffL16(iRegP reg, immLoffset16 off)
5389 %{
5390   constraint(ALLOC_IN_RC(ptr_reg));
5391   match(AddP reg off);
5392   op_cost(0);
5393   format %{ "[$reg, $off]" %}
5394   interface(MEMORY_INTER) %{
5395     base($reg);
5396     index(0xffffffff);
5397     scale(0x0);
5398     disp($off);
5399   %}
5400 %}
5401 
5402 operand indirectN(iRegN reg)
5403 %{
5404   predicate(CompressedOops::shift() == 0);
5405   constraint(ALLOC_IN_RC(ptr_reg));
5406   match(DecodeN reg);
5407   op_cost(0);
5408   format %{ "[$reg]\t# narrow" %}
5409   interface(MEMORY_INTER) %{
5410     base($reg);
5411     index(0xffffffff);
5412     scale(0x0);
5413     disp(0x0);
5414   %}
5415 %}
5416 
5417 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5418 %{
5419   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5420   constraint(ALLOC_IN_RC(ptr_reg));
5421   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5422   op_cost(0);
5423   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5424   interface(MEMORY_INTER) %{
5425     base($reg);
5426     index($ireg);
5427     scale($scale);
5428     disp(0x0);
5429   %}
5430 %}
5431 
5432 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5433 %{
5434   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5435   constraint(ALLOC_IN_RC(ptr_reg));
5436   match(AddP (DecodeN reg) (LShiftL lreg scale));
5437   op_cost(0);
5438   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5439   interface(MEMORY_INTER) %{
5440     base($reg);
5441     index($lreg);
5442     scale($scale);
5443     disp(0x0);
5444   %}
5445 %}
5446 
5447 operand indIndexI2LN(iRegN reg, iRegI ireg)
5448 %{
5449   predicate(CompressedOops::shift() == 0);
5450   constraint(ALLOC_IN_RC(ptr_reg));
5451   match(AddP (DecodeN reg) (ConvI2L ireg));
5452   op_cost(0);
5453   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5454   interface(MEMORY_INTER) %{
5455     base($reg);
5456     index($ireg);
5457     scale(0x0);
5458     disp(0x0);
5459   %}
5460 %}
5461 
5462 operand indIndexN(iRegN reg, iRegL lreg)
5463 %{
5464   predicate(CompressedOops::shift() == 0);
5465   constraint(ALLOC_IN_RC(ptr_reg));
5466   match(AddP (DecodeN reg) lreg);
5467   op_cost(0);
5468   format %{ "$reg, $lreg\t# narrow" %}
5469   interface(MEMORY_INTER) %{
5470     base($reg);
5471     index($lreg);
5472     scale(0x0);
5473     disp(0x0);
5474   %}
5475 %}
5476 
5477 operand indOffIN(iRegN reg, immIOffset off)
5478 %{
5479   predicate(CompressedOops::shift() == 0);
5480   constraint(ALLOC_IN_RC(ptr_reg));
5481   match(AddP (DecodeN reg) off);
5482   op_cost(0);
5483   format %{ "[$reg, $off]\t# narrow" %}
5484   interface(MEMORY_INTER) %{
5485     base($reg);
5486     index(0xffffffff);
5487     scale(0x0);
5488     disp($off);
5489   %}
5490 %}
5491 
5492 operand indOffLN(iRegN reg, immLoffset off)
5493 %{
5494   predicate(CompressedOops::shift() == 0);
5495   constraint(ALLOC_IN_RC(ptr_reg));
5496   match(AddP (DecodeN reg) off);
5497   op_cost(0);
5498   format %{ "[$reg, $off]\t# narrow" %}
5499   interface(MEMORY_INTER) %{
5500     base($reg);
5501     index(0xffffffff);
5502     scale(0x0);
5503     disp($off);
5504   %}
5505 %}
5506 
5507 
5508 
5509 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5510 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5511 %{
5512   constraint(ALLOC_IN_RC(ptr_reg));
5513   match(AddP reg off);
5514   op_cost(0);
5515   format %{ "[$reg, $off]" %}
5516   interface(MEMORY_INTER) %{
5517     base($reg);
5518     index(0xffffffff);
5519     scale(0x0);
5520     disp($off);
5521   %}
5522 %}
5523 
5524 //----------Special Memory Operands--------------------------------------------
5525 // Stack Slot Operand - This operand is used for loading and storing temporary
5526 //                      values on the stack where a match requires a value to
5527 //                      flow through memory.
5528 operand stackSlotP(sRegP reg)
5529 %{
5530   constraint(ALLOC_IN_RC(stack_slots));
5531   op_cost(100);
5532   // No match rule because this operand is only generated in matching
5533   // match(RegP);
5534   format %{ "[$reg]" %}
5535   interface(MEMORY_INTER) %{
5536     base(0x1e);  // RSP
5537     index(0x0);  // No Index
5538     scale(0x0);  // No Scale
5539     disp($reg);  // Stack Offset
5540   %}
5541 %}
5542 
5543 operand stackSlotI(sRegI reg)
5544 %{
5545   constraint(ALLOC_IN_RC(stack_slots));
5546   // No match rule because this operand is only generated in matching
5547   // match(RegI);
5548   format %{ "[$reg]" %}
5549   interface(MEMORY_INTER) %{
5550     base(0x1e);  // RSP
5551     index(0x0);  // No Index
5552     scale(0x0);  // No Scale
5553     disp($reg);  // Stack Offset
5554   %}
5555 %}
5556 
5557 operand stackSlotF(sRegF reg)
5558 %{
5559   constraint(ALLOC_IN_RC(stack_slots));
5560   // No match rule because this operand is only generated in matching
5561   // match(RegF);
5562   format %{ "[$reg]" %}
5563   interface(MEMORY_INTER) %{
5564     base(0x1e);  // RSP
5565     index(0x0);  // No Index
5566     scale(0x0);  // No Scale
5567     disp($reg);  // Stack Offset
5568   %}
5569 %}
5570 
5571 operand stackSlotD(sRegD reg)
5572 %{
5573   constraint(ALLOC_IN_RC(stack_slots));
5574   // No match rule because this operand is only generated in matching
5575   // match(RegD);
5576   format %{ "[$reg]" %}
5577   interface(MEMORY_INTER) %{
5578     base(0x1e);  // RSP
5579     index(0x0);  // No Index
5580     scale(0x0);  // No Scale
5581     disp($reg);  // Stack Offset
5582   %}
5583 %}
5584 
5585 operand stackSlotL(sRegL reg)
5586 %{
5587   constraint(ALLOC_IN_RC(stack_slots));
5588   // No match rule because this operand is only generated in matching
5589   // match(RegL);
5590   format %{ "[$reg]" %}
5591   interface(MEMORY_INTER) %{
5592     base(0x1e);  // RSP
5593     index(0x0);  // No Index
5594     scale(0x0);  // No Scale
5595     disp($reg);  // Stack Offset
5596   %}
5597 %}
5598 
5599 // Operands for expressing Control Flow
5600 // NOTE: Label is a predefined operand which should not be redefined in
5601 //       the AD file. It is generically handled within the ADLC.
5602 
5603 //----------Conditional Branch Operands----------------------------------------
5604 // Comparison Op  - This is the operation of the comparison, and is limited to
5605 //                  the following set of codes:
5606 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5607 //
5608 // Other attributes of the comparison, such as unsignedness, are specified
5609 // by the comparison instruction that sets a condition code flags register.
5610 // That result is represented by a flags operand whose subtype is appropriate
5611 // to the unsignedness (etc.) of the comparison.
5612 //
5613 // Later, the instruction which matches both the Comparison Op (a Bool) and
5614 // the flags (produced by the Cmp) specifies the coding of the comparison op
5615 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5616 
5617 // used for signed integral comparisons and fp comparisons
5618 
5619 operand cmpOp()
5620 %{
5621   match(Bool);
5622 
5623   format %{ "" %}
5624   interface(COND_INTER) %{
5625     equal(0x0, "eq");
5626     not_equal(0x1, "ne");
5627     less(0xb, "lt");
5628     greater_equal(0xa, "ge");
5629     less_equal(0xd, "le");
5630     greater(0xc, "gt");
5631     overflow(0x6, "vs");
5632     no_overflow(0x7, "vc");
5633   %}
5634 %}
5635 
5636 // used for unsigned integral comparisons
5637 
5638 operand cmpOpU()
5639 %{
5640   match(Bool);
5641 
5642   format %{ "" %}
5643   interface(COND_INTER) %{
5644     equal(0x0, "eq");
5645     not_equal(0x1, "ne");
5646     less(0x3, "lo");
5647     greater_equal(0x2, "hs");
5648     less_equal(0x9, "ls");
5649     greater(0x8, "hi");
5650     overflow(0x6, "vs");
5651     no_overflow(0x7, "vc");
5652   %}
5653 %}
5654 
5655 // used for certain integral comparisons which can be
5656 // converted to cbxx or tbxx instructions
5657 
5658 operand cmpOpEqNe()
5659 %{
5660   match(Bool);
5661   op_cost(0);
5662   predicate(n->as_Bool()->_test._test == BoolTest::ne
5663             || n->as_Bool()->_test._test == BoolTest::eq);
5664 
5665   format %{ "" %}
5666   interface(COND_INTER) %{
5667     equal(0x0, "eq");
5668     not_equal(0x1, "ne");
5669     less(0xb, "lt");
5670     greater_equal(0xa, "ge");
5671     less_equal(0xd, "le");
5672     greater(0xc, "gt");
5673     overflow(0x6, "vs");
5674     no_overflow(0x7, "vc");
5675   %}
5676 %}
5677 
5678 // used for certain integral comparisons which can be
5679 // converted to cbxx or tbxx instructions
5680 
5681 operand cmpOpLtGe()
5682 %{
5683   match(Bool);
5684   op_cost(0);
5685 
5686   predicate(n->as_Bool()->_test._test == BoolTest::lt
5687             || n->as_Bool()->_test._test == BoolTest::ge);
5688 
5689   format %{ "" %}
5690   interface(COND_INTER) %{
5691     equal(0x0, "eq");
5692     not_equal(0x1, "ne");
5693     less(0xb, "lt");
5694     greater_equal(0xa, "ge");
5695     less_equal(0xd, "le");
5696     greater(0xc, "gt");
5697     overflow(0x6, "vs");
5698     no_overflow(0x7, "vc");
5699   %}
5700 %}
5701 
5702 // used for certain unsigned integral comparisons which can be
5703 // converted to cbxx or tbxx instructions
5704 
5705 operand cmpOpUEqNeLtGe()
5706 %{
5707   match(Bool);
5708   op_cost(0);
5709 
5710   predicate(n->as_Bool()->_test._test == BoolTest::eq
5711             || n->as_Bool()->_test._test == BoolTest::ne
5712             || n->as_Bool()->_test._test == BoolTest::lt
5713             || n->as_Bool()->_test._test == BoolTest::ge);
5714 
5715   format %{ "" %}
5716   interface(COND_INTER) %{
5717     equal(0x0, "eq");
5718     not_equal(0x1, "ne");
5719     less(0xb, "lt");
5720     greater_equal(0xa, "ge");
5721     less_equal(0xd, "le");
5722     greater(0xc, "gt");
5723     overflow(0x6, "vs");
5724     no_overflow(0x7, "vc");
5725   %}
5726 %}
5727 
5728 // Special operand allowing long args to int ops to be truncated for free
5729 
5730 operand iRegL2I(iRegL reg) %{
5731 
5732   op_cost(0);
5733 
5734   match(ConvL2I reg);
5735 
5736   format %{ "l2i($reg)" %}
5737 
5738   interface(REG_INTER)
5739 %}
5740 
5741 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5742 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5743 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5744 
5745 //----------OPERAND CLASSES----------------------------------------------------
5746 // Operand Classes are groups of operands that are used as to simplify
5747 // instruction definitions by not requiring the AD writer to specify
5748 // separate instructions for every form of operand when the
5749 // instruction accepts multiple operand types with the same basic
5750 // encoding and format. The classic case of this is memory operands.
5751 
5752 // memory is used to define read/write location for load/store
5753 // instruction defs. we can turn a memory op into an Address
5754 
5755 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5756                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5757 
5758 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5759 // operations. it allows the src to be either an iRegI or a (ConvL2I
5760 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5761 // can be elided because the 32-bit instruction will just employ the
5762 // lower 32 bits anyway.
5763 //
5764 // n.b. this does not elide all L2I conversions. if the truncated
5765 // value is consumed by more than one operation then the ConvL2I
5766 // cannot be bundled into the consuming nodes so an l2i gets planted
5767 // (actually a movw $dst $src) and the downstream instructions consume
5768 // the result of the l2i as an iRegI input. That's a shame since the
5769 // movw is actually redundant but its not too costly.
5770 
5771 opclass iRegIorL2I(iRegI, iRegL2I);
5772 
5773 //----------PIPELINE-----------------------------------------------------------
5774 // Rules which define the behavior of the target architectures pipeline.
5775 
5776 // For specific pipelines, eg A53, define the stages of that pipeline
5777 //pipe_desc(ISS, EX1, EX2, WR);
5778 #define ISS S0
5779 #define EX1 S1
5780 #define EX2 S2
5781 #define WR  S3
5782 
5783 // Integer ALU reg operation
5784 pipeline %{
5785 
5786 attributes %{
5787   // ARM instructions are of fixed length
5788   fixed_size_instructions;        // Fixed size instructions TODO does
5789   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5790   // ARM instructions come in 32-bit word units
5791   instruction_unit_size = 4;         // An instruction is 4 bytes long
5792   instruction_fetch_unit_size = 64;  // The processor fetches one line
5793   instruction_fetch_units = 1;       // of 64 bytes
5794 
5795   // List of nop instructions
5796   nops( MachNop );
5797 %}
5798 
5799 // We don't use an actual pipeline model so don't care about resources
5800 // or description. we do use pipeline classes to introduce fixed
5801 // latencies
5802 
5803 //----------RESOURCES----------------------------------------------------------
5804 // Resources are the functional units available to the machine
5805 
5806 resources( INS0, INS1, INS01 = INS0 | INS1,
5807            ALU0, ALU1, ALU = ALU0 | ALU1,
5808            MAC,
5809            DIV,
5810            BRANCH,
5811            LDST,
5812            NEON_FP);
5813 
5814 //----------PIPELINE DESCRIPTION-----------------------------------------------
5815 // Pipeline Description specifies the stages in the machine's pipeline
5816 
5817 // Define the pipeline as a generic 6 stage pipeline
5818 pipe_desc(S0, S1, S2, S3, S4, S5);
5819 
5820 //----------PIPELINE CLASSES---------------------------------------------------
5821 // Pipeline Classes describe the stages in which input and output are
5822 // referenced by the hardware pipeline.
5823 
5824 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5825 %{
5826   single_instruction;
5827   src1   : S1(read);
5828   src2   : S2(read);
5829   dst    : S5(write);
5830   INS01  : ISS;
5831   NEON_FP : S5;
5832 %}
5833 
5834 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5835 %{
5836   single_instruction;
5837   src1   : S1(read);
5838   src2   : S2(read);
5839   dst    : S5(write);
5840   INS01  : ISS;
5841   NEON_FP : S5;
5842 %}
5843 
5844 pipe_class fp_uop_s(vRegF dst, vRegF src)
5845 %{
5846   single_instruction;
5847   src    : S1(read);
5848   dst    : S5(write);
5849   INS01  : ISS;
5850   NEON_FP : S5;
5851 %}
5852 
5853 pipe_class fp_uop_d(vRegD dst, vRegD src)
5854 %{
5855   single_instruction;
5856   src    : S1(read);
5857   dst    : S5(write);
5858   INS01  : ISS;
5859   NEON_FP : S5;
5860 %}
5861 
5862 pipe_class fp_d2f(vRegF dst, vRegD src)
5863 %{
5864   single_instruction;
5865   src    : S1(read);
5866   dst    : S5(write);
5867   INS01  : ISS;
5868   NEON_FP : S5;
5869 %}
5870 
5871 pipe_class fp_f2d(vRegD dst, vRegF src)
5872 %{
5873   single_instruction;
5874   src    : S1(read);
5875   dst    : S5(write);
5876   INS01  : ISS;
5877   NEON_FP : S5;
5878 %}
5879 
5880 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5881 %{
5882   single_instruction;
5883   src    : S1(read);
5884   dst    : S5(write);
5885   INS01  : ISS;
5886   NEON_FP : S5;
5887 %}
5888 
5889 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5890 %{
5891   single_instruction;
5892   src    : S1(read);
5893   dst    : S5(write);
5894   INS01  : ISS;
5895   NEON_FP : S5;
5896 %}
5897 
5898 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5899 %{
5900   single_instruction;
5901   src    : S1(read);
5902   dst    : S5(write);
5903   INS01  : ISS;
5904   NEON_FP : S5;
5905 %}
5906 
5907 pipe_class fp_l2f(vRegF dst, iRegL src)
5908 %{
5909   single_instruction;
5910   src    : S1(read);
5911   dst    : S5(write);
5912   INS01  : ISS;
5913   NEON_FP : S5;
5914 %}
5915 
5916 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5917 %{
5918   single_instruction;
5919   src    : S1(read);
5920   dst    : S5(write);
5921   INS01  : ISS;
5922   NEON_FP : S5;
5923 %}
5924 
5925 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5926 %{
5927   single_instruction;
5928   src    : S1(read);
5929   dst    : S5(write);
5930   INS01  : ISS;
5931   NEON_FP : S5;
5932 %}
5933 
5934 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5935 %{
5936   single_instruction;
5937   src    : S1(read);
5938   dst    : S5(write);
5939   INS01  : ISS;
5940   NEON_FP : S5;
5941 %}
5942 
5943 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5944 %{
5945   single_instruction;
5946   src    : S1(read);
5947   dst    : S5(write);
5948   INS01  : ISS;
5949   NEON_FP : S5;
5950 %}
5951 
5952 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5953 %{
5954   single_instruction;
5955   src1   : S1(read);
5956   src2   : S2(read);
5957   dst    : S5(write);
5958   INS0   : ISS;
5959   NEON_FP : S5;
5960 %}
5961 
5962 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5963 %{
5964   single_instruction;
5965   src1   : S1(read);
5966   src2   : S2(read);
5967   dst    : S5(write);
5968   INS0   : ISS;
5969   NEON_FP : S5;
5970 %}
5971 
5972 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5973 %{
5974   single_instruction;
5975   cr     : S1(read);
5976   src1   : S1(read);
5977   src2   : S1(read);
5978   dst    : S3(write);
5979   INS01  : ISS;
5980   NEON_FP : S3;
5981 %}
5982 
5983 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5984 %{
5985   single_instruction;
5986   cr     : S1(read);
5987   src1   : S1(read);
5988   src2   : S1(read);
5989   dst    : S3(write);
5990   INS01  : ISS;
5991   NEON_FP : S3;
5992 %}
5993 
5994 pipe_class fp_imm_s(vRegF dst)
5995 %{
5996   single_instruction;
5997   dst    : S3(write);
5998   INS01  : ISS;
5999   NEON_FP : S3;
6000 %}
6001 
6002 pipe_class fp_imm_d(vRegD dst)
6003 %{
6004   single_instruction;
6005   dst    : S3(write);
6006   INS01  : ISS;
6007   NEON_FP : S3;
6008 %}
6009 
6010 pipe_class fp_load_constant_s(vRegF dst)
6011 %{
6012   single_instruction;
6013   dst    : S4(write);
6014   INS01  : ISS;
6015   NEON_FP : S4;
6016 %}
6017 
6018 pipe_class fp_load_constant_d(vRegD dst)
6019 %{
6020   single_instruction;
6021   dst    : S4(write);
6022   INS01  : ISS;
6023   NEON_FP : S4;
6024 %}
6025 
6026 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6027 %{
6028   single_instruction;
6029   dst    : S5(write);
6030   src1   : S1(read);
6031   src2   : S1(read);
6032   INS01  : ISS;
6033   NEON_FP : S5;
6034 %}
6035 
6036 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6037 %{
6038   single_instruction;
6039   dst    : S5(write);
6040   src1   : S1(read);
6041   src2   : S1(read);
6042   INS0   : ISS;
6043   NEON_FP : S5;
6044 %}
6045 
6046 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6047 %{
6048   single_instruction;
6049   dst    : S5(write);
6050   src1   : S1(read);
6051   src2   : S1(read);
6052   dst    : S1(read);
6053   INS01  : ISS;
6054   NEON_FP : S5;
6055 %}
6056 
6057 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6058 %{
6059   single_instruction;
6060   dst    : S5(write);
6061   src1   : S1(read);
6062   src2   : S1(read);
6063   dst    : S1(read);
6064   INS0   : ISS;
6065   NEON_FP : S5;
6066 %}
6067 
6068 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6069 %{
6070   single_instruction;
6071   dst    : S4(write);
6072   src1   : S2(read);
6073   src2   : S2(read);
6074   INS01  : ISS;
6075   NEON_FP : S4;
6076 %}
6077 
6078 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6079 %{
6080   single_instruction;
6081   dst    : S4(write);
6082   src1   : S2(read);
6083   src2   : S2(read);
6084   INS0   : ISS;
6085   NEON_FP : S4;
6086 %}
6087 
6088 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6089 %{
6090   single_instruction;
6091   dst    : S3(write);
6092   src1   : S2(read);
6093   src2   : S2(read);
6094   INS01  : ISS;
6095   NEON_FP : S3;
6096 %}
6097 
6098 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6099 %{
6100   single_instruction;
6101   dst    : S3(write);
6102   src1   : S2(read);
6103   src2   : S2(read);
6104   INS0   : ISS;
6105   NEON_FP : S3;
6106 %}
6107 
6108 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6109 %{
6110   single_instruction;
6111   dst    : S3(write);
6112   src    : S1(read);
6113   shift  : S1(read);
6114   INS01  : ISS;
6115   NEON_FP : S3;
6116 %}
6117 
6118 pipe_class vshift128(vecX dst, vecX src, vecX shift)
6119 %{
6120   single_instruction;
6121   dst    : S3(write);
6122   src    : S1(read);
6123   shift  : S1(read);
6124   INS0   : ISS;
6125   NEON_FP : S3;
6126 %}
6127 
6128 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
6129 %{
6130   single_instruction;
6131   dst    : S3(write);
6132   src    : S1(read);
6133   INS01  : ISS;
6134   NEON_FP : S3;
6135 %}
6136 
6137 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
6138 %{
6139   single_instruction;
6140   dst    : S3(write);
6141   src    : S1(read);
6142   INS0   : ISS;
6143   NEON_FP : S3;
6144 %}
6145 
6146 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
6147 %{
6148   single_instruction;
6149   dst    : S5(write);
6150   src1   : S1(read);
6151   src2   : S1(read);
6152   INS01  : ISS;
6153   NEON_FP : S5;
6154 %}
6155 
6156 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
6157 %{
6158   single_instruction;
6159   dst    : S5(write);
6160   src1   : S1(read);
6161   src2   : S1(read);
6162   INS0   : ISS;
6163   NEON_FP : S5;
6164 %}
6165 
6166 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
6167 %{
6168   single_instruction;
6169   dst    : S5(write);
6170   src1   : S1(read);
6171   src2   : S1(read);
6172   INS0   : ISS;
6173   NEON_FP : S5;
6174 %}
6175 
6176 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
6177 %{
6178   single_instruction;
6179   dst    : S5(write);
6180   src1   : S1(read);
6181   src2   : S1(read);
6182   INS0   : ISS;
6183   NEON_FP : S5;
6184 %}
6185 
6186 pipe_class vsqrt_fp128(vecX dst, vecX src)
6187 %{
6188   single_instruction;
6189   dst    : S5(write);
6190   src    : S1(read);
6191   INS0   : ISS;
6192   NEON_FP : S5;
6193 %}
6194 
6195 pipe_class vunop_fp64(vecD dst, vecD src)
6196 %{
6197   single_instruction;
6198   dst    : S5(write);
6199   src    : S1(read);
6200   INS01  : ISS;
6201   NEON_FP : S5;
6202 %}
6203 
6204 pipe_class vunop_fp128(vecX dst, vecX src)
6205 %{
6206   single_instruction;
6207   dst    : S5(write);
6208   src    : S1(read);
6209   INS0   : ISS;
6210   NEON_FP : S5;
6211 %}
6212 
6213 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
6214 %{
6215   single_instruction;
6216   dst    : S3(write);
6217   src    : S1(read);
6218   INS01  : ISS;
6219   NEON_FP : S3;
6220 %}
6221 
6222 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
6223 %{
6224   single_instruction;
6225   dst    : S3(write);
6226   src    : S1(read);
6227   INS01  : ISS;
6228   NEON_FP : S3;
6229 %}
6230 
6231 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
6232 %{
6233   single_instruction;
6234   dst    : S3(write);
6235   src    : S1(read);
6236   INS01  : ISS;
6237   NEON_FP : S3;
6238 %}
6239 
6240 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
6241 %{
6242   single_instruction;
6243   dst    : S3(write);
6244   src    : S1(read);
6245   INS01  : ISS;
6246   NEON_FP : S3;
6247 %}
6248 
6249 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
6250 %{
6251   single_instruction;
6252   dst    : S3(write);
6253   src    : S1(read);
6254   INS01  : ISS;
6255   NEON_FP : S3;
6256 %}
6257 
6258 pipe_class vmovi_reg_imm64(vecD dst)
6259 %{
6260   single_instruction;
6261   dst    : S3(write);
6262   INS01  : ISS;
6263   NEON_FP : S3;
6264 %}
6265 
6266 pipe_class vmovi_reg_imm128(vecX dst)
6267 %{
6268   single_instruction;
6269   dst    : S3(write);
6270   INS0   : ISS;
6271   NEON_FP : S3;
6272 %}
6273 
6274 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6275 %{
6276   single_instruction;
6277   dst    : S5(write);
6278   mem    : ISS(read);
6279   INS01  : ISS;
6280   NEON_FP : S3;
6281 %}
6282 
6283 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6284 %{
6285   single_instruction;
6286   dst    : S5(write);
6287   mem    : ISS(read);
6288   INS01  : ISS;
6289   NEON_FP : S3;
6290 %}
6291 
6292 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6293 %{
6294   single_instruction;
6295   mem    : ISS(read);
6296   src    : S2(read);
6297   INS01  : ISS;
6298   NEON_FP : S3;
6299 %}
6300 
6301 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6302 %{
6303   single_instruction;
6304   mem    : ISS(read);
6305   src    : S2(read);
6306   INS01  : ISS;
6307   NEON_FP : S3;
6308 %}
6309 
6310 //------- Integer ALU operations --------------------------
6311 
6312 // Integer ALU reg-reg operation
6313 // Operands needed in EX1, result generated in EX2
6314 // Eg.  ADD     x0, x1, x2
6315 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6316 %{
6317   single_instruction;
6318   dst    : EX2(write);
6319   src1   : EX1(read);
6320   src2   : EX1(read);
6321   INS01  : ISS; // Dual issue as instruction 0 or 1
6322   ALU    : EX2;
6323 %}
6324 
6325 // Integer ALU reg-reg operation with constant shift
6326 // Shifted register must be available in LATE_ISS instead of EX1
6327 // Eg.  ADD     x0, x1, x2, LSL #2
6328 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6329 %{
6330   single_instruction;
6331   dst    : EX2(write);
6332   src1   : EX1(read);
6333   src2   : ISS(read);
6334   INS01  : ISS;
6335   ALU    : EX2;
6336 %}
6337 
6338 // Integer ALU reg operation with constant shift
6339 // Eg.  LSL     x0, x1, #shift
6340 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6341 %{
6342   single_instruction;
6343   dst    : EX2(write);
6344   src1   : ISS(read);
6345   INS01  : ISS;
6346   ALU    : EX2;
6347 %}
6348 
6349 // Integer ALU reg-reg operation with variable shift
6350 // Both operands must be available in LATE_ISS instead of EX1
6351 // Result is available in EX1 instead of EX2
6352 // Eg.  LSLV    x0, x1, x2
6353 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6354 %{
6355   single_instruction;
6356   dst    : EX1(write);
6357   src1   : ISS(read);
6358   src2   : ISS(read);
6359   INS01  : ISS;
6360   ALU    : EX1;
6361 %}
6362 
6363 // Integer ALU reg-reg operation with extract
6364 // As for _vshift above, but result generated in EX2
6365 // Eg.  EXTR    x0, x1, x2, #N
6366 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6367 %{
6368   single_instruction;
6369   dst    : EX2(write);
6370   src1   : ISS(read);
6371   src2   : ISS(read);
6372   INS1   : ISS; // Can only dual issue as Instruction 1
6373   ALU    : EX1;
6374 %}
6375 
6376 // Integer ALU reg operation
6377 // Eg.  NEG     x0, x1
6378 pipe_class ialu_reg(iRegI dst, iRegI src)
6379 %{
6380   single_instruction;
6381   dst    : EX2(write);
6382   src    : EX1(read);
6383   INS01  : ISS;
6384   ALU    : EX2;
6385 %}
6386 
6387 // Integer ALU reg mmediate operation
6388 // Eg.  ADD     x0, x1, #N
6389 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6390 %{
6391   single_instruction;
6392   dst    : EX2(write);
6393   src1   : EX1(read);
6394   INS01  : ISS;
6395   ALU    : EX2;
6396 %}
6397 
6398 // Integer ALU immediate operation (no source operands)
6399 // Eg.  MOV     x0, #N
6400 pipe_class ialu_imm(iRegI dst)
6401 %{
6402   single_instruction;
6403   dst    : EX1(write);
6404   INS01  : ISS;
6405   ALU    : EX1;
6406 %}
6407 
6408 //------- Compare operation -------------------------------
6409 
6410 // Compare reg-reg
6411 // Eg.  CMP     x0, x1
6412 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6413 %{
6414   single_instruction;
6415 //  fixed_latency(16);
6416   cr     : EX2(write);
6417   op1    : EX1(read);
6418   op2    : EX1(read);
6419   INS01  : ISS;
6420   ALU    : EX2;
6421 %}
6422 
6423 // Compare reg-reg
6424 // Eg.  CMP     x0, #N
6425 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6426 %{
6427   single_instruction;
6428 //  fixed_latency(16);
6429   cr     : EX2(write);
6430   op1    : EX1(read);
6431   INS01  : ISS;
6432   ALU    : EX2;
6433 %}
6434 
6435 //------- Conditional instructions ------------------------
6436 
6437 // Conditional no operands
6438 // Eg.  CSINC   x0, zr, zr, <cond>
6439 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6440 %{
6441   single_instruction;
6442   cr     : EX1(read);
6443   dst    : EX2(write);
6444   INS01  : ISS;
6445   ALU    : EX2;
6446 %}
6447 
6448 // Conditional 2 operand
6449 // EG.  CSEL    X0, X1, X2, <cond>
6450 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6451 %{
6452   single_instruction;
6453   cr     : EX1(read);
6454   src1   : EX1(read);
6455   src2   : EX1(read);
6456   dst    : EX2(write);
6457   INS01  : ISS;
6458   ALU    : EX2;
6459 %}
6460 
6461 // Conditional 2 operand
6462 // EG.  CSEL    X0, X1, X2, <cond>
6463 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6464 %{
6465   single_instruction;
6466   cr     : EX1(read);
6467   src    : EX1(read);
6468   dst    : EX2(write);
6469   INS01  : ISS;
6470   ALU    : EX2;
6471 %}
6472 
6473 //------- Multiply pipeline operations --------------------
6474 
6475 // Multiply reg-reg
6476 // Eg.  MUL     w0, w1, w2
6477 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6478 %{
6479   single_instruction;
6480   dst    : WR(write);
6481   src1   : ISS(read);
6482   src2   : ISS(read);
6483   INS01  : ISS;
6484   MAC    : WR;
6485 %}
6486 
6487 // Multiply accumulate
6488 // Eg.  MADD    w0, w1, w2, w3
6489 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6490 %{
6491   single_instruction;
6492   dst    : WR(write);
6493   src1   : ISS(read);
6494   src2   : ISS(read);
6495   src3   : ISS(read);
6496   INS01  : ISS;
6497   MAC    : WR;
6498 %}
6499 
6500 // Eg.  MUL     w0, w1, w2
6501 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6502 %{
6503   single_instruction;
6504   fixed_latency(3); // Maximum latency for 64 bit mul
6505   dst    : WR(write);
6506   src1   : ISS(read);
6507   src2   : ISS(read);
6508   INS01  : ISS;
6509   MAC    : WR;
6510 %}
6511 
6512 // Multiply accumulate
6513 // Eg.  MADD    w0, w1, w2, w3
6514 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6515 %{
6516   single_instruction;
6517   fixed_latency(3); // Maximum latency for 64 bit mul
6518   dst    : WR(write);
6519   src1   : ISS(read);
6520   src2   : ISS(read);
6521   src3   : ISS(read);
6522   INS01  : ISS;
6523   MAC    : WR;
6524 %}
6525 
6526 //------- Divide pipeline operations --------------------
6527 
6528 // Eg.  SDIV    w0, w1, w2
6529 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6530 %{
6531   single_instruction;
6532   fixed_latency(8); // Maximum latency for 32 bit divide
6533   dst    : WR(write);
6534   src1   : ISS(read);
6535   src2   : ISS(read);
6536   INS0   : ISS; // Can only dual issue as instruction 0
6537   DIV    : WR;
6538 %}
6539 
6540 // Eg.  SDIV    x0, x1, x2
6541 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6542 %{
6543   single_instruction;
6544   fixed_latency(16); // Maximum latency for 64 bit divide
6545   dst    : WR(write);
6546   src1   : ISS(read);
6547   src2   : ISS(read);
6548   INS0   : ISS; // Can only dual issue as instruction 0
6549   DIV    : WR;
6550 %}
6551 
6552 //------- Load pipeline operations ------------------------
6553 
6554 // Load - prefetch
6555 // Eg.  PFRM    <mem>
6556 pipe_class iload_prefetch(memory mem)
6557 %{
6558   single_instruction;
6559   mem    : ISS(read);
6560   INS01  : ISS;
6561   LDST   : WR;
6562 %}
6563 
6564 // Load - reg, mem
6565 // Eg.  LDR     x0, <mem>
6566 pipe_class iload_reg_mem(iRegI dst, memory mem)
6567 %{
6568   single_instruction;
6569   dst    : WR(write);
6570   mem    : ISS(read);
6571   INS01  : ISS;
6572   LDST   : WR;
6573 %}
6574 
6575 // Load - reg, reg
6576 // Eg.  LDR     x0, [sp, x1]
6577 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6578 %{
6579   single_instruction;
6580   dst    : WR(write);
6581   src    : ISS(read);
6582   INS01  : ISS;
6583   LDST   : WR;
6584 %}
6585 
6586 //------- Store pipeline operations -----------------------
6587 
6588 // Store - zr, mem
6589 // Eg.  STR     zr, <mem>
6590 pipe_class istore_mem(memory mem)
6591 %{
6592   single_instruction;
6593   mem    : ISS(read);
6594   INS01  : ISS;
6595   LDST   : WR;
6596 %}
6597 
6598 // Store - reg, mem
6599 // Eg.  STR     x0, <mem>
6600 pipe_class istore_reg_mem(iRegI src, memory mem)
6601 %{
6602   single_instruction;
6603   mem    : ISS(read);
6604   src    : EX2(read);
6605   INS01  : ISS;
6606   LDST   : WR;
6607 %}
6608 
6609 // Store - reg, reg
6610 // Eg. STR      x0, [sp, x1]
6611 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6612 %{
6613   single_instruction;
6614   dst    : ISS(read);
6615   src    : EX2(read);
6616   INS01  : ISS;
6617   LDST   : WR;
6618 %}
6619 
6620 //------- Store pipeline operations -----------------------
6621 
6622 // Branch
6623 pipe_class pipe_branch()
6624 %{
6625   single_instruction;
6626   INS01  : ISS;
6627   BRANCH : EX1;
6628 %}
6629 
6630 // Conditional branch
6631 pipe_class pipe_branch_cond(rFlagsReg cr)
6632 %{
6633   single_instruction;
6634   cr     : EX1(read);
6635   INS01  : ISS;
6636   BRANCH : EX1;
6637 %}
6638 
6639 // Compare & Branch
6640 // EG.  CBZ/CBNZ
6641 pipe_class pipe_cmp_branch(iRegI op1)
6642 %{
6643   single_instruction;
6644   op1    : EX1(read);
6645   INS01  : ISS;
6646   BRANCH : EX1;
6647 %}
6648 
6649 //------- Synchronisation operations ----------------------
6650 
6651 // Any operation requiring serialization.
6652 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6653 pipe_class pipe_serial()
6654 %{
6655   single_instruction;
6656   force_serialization;
6657   fixed_latency(16);
6658   INS01  : ISS(2); // Cannot dual issue with any other instruction
6659   LDST   : WR;
6660 %}
6661 
6662 // Generic big/slow expanded idiom - also serialized
6663 pipe_class pipe_slow()
6664 %{
6665   instruction_count(10);
6666   multiple_bundles;
6667   force_serialization;
6668   fixed_latency(16);
6669   INS01  : ISS(2); // Cannot dual issue with any other instruction
6670   LDST   : WR;
6671 %}
6672 
6673 // Empty pipeline class
6674 pipe_class pipe_class_empty()
6675 %{
6676   single_instruction;
6677   fixed_latency(0);
6678 %}
6679 
6680 // Default pipeline class.
6681 pipe_class pipe_class_default()
6682 %{
6683   single_instruction;
6684   fixed_latency(2);
6685 %}
6686 
6687 // Pipeline class for compares.
6688 pipe_class pipe_class_compare()
6689 %{
6690   single_instruction;
6691   fixed_latency(16);
6692 %}
6693 
6694 // Pipeline class for memory operations.
6695 pipe_class pipe_class_memory()
6696 %{
6697   single_instruction;
6698   fixed_latency(16);
6699 %}
6700 
6701 // Pipeline class for call.
6702 pipe_class pipe_class_call()
6703 %{
6704   single_instruction;
6705   fixed_latency(100);
6706 %}
6707 
6708 // Define the class for the Nop node.
6709 define %{
6710    MachNop = pipe_class_empty;
6711 %}
6712 
6713 %}
6714 //----------INSTRUCTIONS-------------------------------------------------------
6715 //
6716 // match      -- States which machine-independent subtree may be replaced
6717 //               by this instruction.
6718 // ins_cost   -- The estimated cost of this instruction is used by instruction
6719 //               selection to identify a minimum cost tree of machine
6720 //               instructions that matches a tree of machine-independent
6721 //               instructions.
6722 // format     -- A string providing the disassembly for this instruction.
6723 //               The value of an instruction's operand may be inserted
6724 //               by referring to it with a '$' prefix.
6725 // opcode     -- Three instruction opcodes may be provided.  These are referred
6726 //               to within an encode class as $primary, $secondary, and $tertiary
6727 //               rrspectively.  The primary opcode is commonly used to
6728 //               indicate the type of machine instruction, while secondary
6729 //               and tertiary are often used for prefix options or addressing
6730 //               modes.
6731 // ins_encode -- A list of encode classes with parameters. The encode class
6732 //               name must have been defined in an 'enc_class' specification
6733 //               in the encode section of the architecture description.
6734 
6735 // ============================================================================
6736 // Memory (Load/Store) Instructions
6737 
6738 // Load Instructions
6739 
6740 // Load Byte (8 bit signed)
6741 instruct loadB(iRegINoSp dst, memory mem)
6742 %{
6743   match(Set dst (LoadB mem));
6744   predicate(!needs_acquiring_load(n));
6745 
6746   ins_cost(4 * INSN_COST);
6747   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6748 
6749   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6750 
6751   ins_pipe(iload_reg_mem);
6752 %}
6753 
6754 // Load Byte (8 bit signed) into long
6755 instruct loadB2L(iRegLNoSp dst, memory mem)
6756 %{
6757   match(Set dst (ConvI2L (LoadB mem)));
6758   predicate(!needs_acquiring_load(n->in(1)));
6759 
6760   ins_cost(4 * INSN_COST);
6761   format %{ "ldrsb  $dst, $mem\t# byte" %}
6762 
6763   ins_encode(aarch64_enc_ldrsb(dst, mem));
6764 
6765   ins_pipe(iload_reg_mem);
6766 %}
6767 
6768 // Load Byte (8 bit unsigned)
6769 instruct loadUB(iRegINoSp dst, memory mem)
6770 %{
6771   match(Set dst (LoadUB mem));
6772   predicate(!needs_acquiring_load(n));
6773 
6774   ins_cost(4 * INSN_COST);
6775   format %{ "ldrbw  $dst, $mem\t# byte" %}
6776 
6777   ins_encode(aarch64_enc_ldrb(dst, mem));
6778 
6779   ins_pipe(iload_reg_mem);
6780 %}
6781 
6782 // Load Byte (8 bit unsigned) into long
6783 instruct loadUB2L(iRegLNoSp dst, memory mem)
6784 %{
6785   match(Set dst (ConvI2L (LoadUB mem)));
6786   predicate(!needs_acquiring_load(n->in(1)));
6787 
6788   ins_cost(4 * INSN_COST);
6789   format %{ "ldrb  $dst, $mem\t# byte" %}
6790 
6791   ins_encode(aarch64_enc_ldrb(dst, mem));
6792 
6793   ins_pipe(iload_reg_mem);
6794 %}
6795 
6796 // Load Short (16 bit signed)
6797 instruct loadS(iRegINoSp dst, memory mem)
6798 %{
6799   match(Set dst (LoadS mem));
6800   predicate(!needs_acquiring_load(n));
6801 
6802   ins_cost(4 * INSN_COST);
6803   format %{ "ldrshw  $dst, $mem\t# short" %}
6804 
6805   ins_encode(aarch64_enc_ldrshw(dst, mem));
6806 
6807   ins_pipe(iload_reg_mem);
6808 %}
6809 
6810 // Load Short (16 bit signed) into long
6811 instruct loadS2L(iRegLNoSp dst, memory mem)
6812 %{
6813   match(Set dst (ConvI2L (LoadS mem)));
6814   predicate(!needs_acquiring_load(n->in(1)));
6815 
6816   ins_cost(4 * INSN_COST);
6817   format %{ "ldrsh  $dst, $mem\t# short" %}
6818 
6819   ins_encode(aarch64_enc_ldrsh(dst, mem));
6820 
6821   ins_pipe(iload_reg_mem);
6822 %}
6823 
6824 // Load Char (16 bit unsigned)
6825 instruct loadUS(iRegINoSp dst, memory mem)
6826 %{
6827   match(Set dst (LoadUS mem));
6828   predicate(!needs_acquiring_load(n));
6829 
6830   ins_cost(4 * INSN_COST);
6831   format %{ "ldrh  $dst, $mem\t# short" %}
6832 
6833   ins_encode(aarch64_enc_ldrh(dst, mem));
6834 
6835   ins_pipe(iload_reg_mem);
6836 %}
6837 
6838 // Load Short/Char (16 bit unsigned) into long
6839 instruct loadUS2L(iRegLNoSp dst, memory mem)
6840 %{
6841   match(Set dst (ConvI2L (LoadUS mem)));
6842   predicate(!needs_acquiring_load(n->in(1)));
6843 
6844   ins_cost(4 * INSN_COST);
6845   format %{ "ldrh  $dst, $mem\t# short" %}
6846 
6847   ins_encode(aarch64_enc_ldrh(dst, mem));
6848 
6849   ins_pipe(iload_reg_mem);
6850 %}
6851 
6852 // Load Integer (32 bit signed)
6853 instruct loadI(iRegINoSp dst, memory mem)
6854 %{
6855   match(Set dst (LoadI mem));
6856   predicate(!needs_acquiring_load(n));
6857 
6858   ins_cost(4 * INSN_COST);
6859   format %{ "ldrw  $dst, $mem\t# int" %}
6860 
6861   ins_encode(aarch64_enc_ldrw(dst, mem));
6862 
6863   ins_pipe(iload_reg_mem);
6864 %}
6865 
6866 // Load Integer (32 bit signed) into long
6867 instruct loadI2L(iRegLNoSp dst, memory mem)
6868 %{
6869   match(Set dst (ConvI2L (LoadI mem)));
6870   predicate(!needs_acquiring_load(n->in(1)));
6871 
6872   ins_cost(4 * INSN_COST);
6873   format %{ "ldrsw  $dst, $mem\t# int" %}
6874 
6875   ins_encode(aarch64_enc_ldrsw(dst, mem));
6876 
6877   ins_pipe(iload_reg_mem);
6878 %}
6879 
6880 // Load Integer (32 bit unsigned) into long
6881 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6882 %{
6883   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6884   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6885 
6886   ins_cost(4 * INSN_COST);
6887   format %{ "ldrw  $dst, $mem\t# int" %}
6888 
6889   ins_encode(aarch64_enc_ldrw(dst, mem));
6890 
6891   ins_pipe(iload_reg_mem);
6892 %}
6893 
6894 // Load Long (64 bit signed)
6895 instruct loadL(iRegLNoSp dst, memory mem)
6896 %{
6897   match(Set dst (LoadL mem));
6898   predicate(!needs_acquiring_load(n));
6899 
6900   ins_cost(4 * INSN_COST);
6901   format %{ "ldr  $dst, $mem\t# int" %}
6902 
6903   ins_encode(aarch64_enc_ldr(dst, mem));
6904 
6905   ins_pipe(iload_reg_mem);
6906 %}
6907 
6908 // Load Range
6909 instruct loadRange(iRegINoSp dst, memory mem)
6910 %{
6911   match(Set dst (LoadRange mem));
6912 
6913   ins_cost(4 * INSN_COST);
6914   format %{ "ldrw  $dst, $mem\t# range" %}
6915 
6916   ins_encode(aarch64_enc_ldrw(dst, mem));
6917 
6918   ins_pipe(iload_reg_mem);
6919 %}
6920 
6921 // Load Pointer
6922 instruct loadP(iRegPNoSp dst, memory mem)
6923 %{
6924   match(Set dst (LoadP mem));
6925   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
6926 
6927   ins_cost(4 * INSN_COST);
6928   format %{ "ldr  $dst, $mem\t# ptr" %}
6929 
6930   ins_encode(aarch64_enc_ldr(dst, mem));
6931 
6932   ins_pipe(iload_reg_mem);
6933 %}
6934 
6935 // Load Compressed Pointer
6936 instruct loadN(iRegNNoSp dst, memory mem)
6937 %{
6938   match(Set dst (LoadN mem));
6939   predicate(!needs_acquiring_load(n));
6940 
6941   ins_cost(4 * INSN_COST);
6942   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6943 
6944   ins_encode(aarch64_enc_ldrw(dst, mem));
6945 
6946   ins_pipe(iload_reg_mem);
6947 %}
6948 
6949 // Load Klass Pointer
6950 instruct loadKlass(iRegPNoSp dst, memory mem)
6951 %{
6952   match(Set dst (LoadKlass mem));
6953   predicate(!needs_acquiring_load(n));
6954 
6955   ins_cost(4 * INSN_COST);
6956   format %{ "ldr  $dst, $mem\t# class" %}
6957 
6958   ins_encode(aarch64_enc_ldr(dst, mem));
6959 
6960   ins_pipe(iload_reg_mem);
6961 %}
6962 
6963 // Load Narrow Klass Pointer
6964 instruct loadNKlass(iRegNNoSp dst, memory mem)
6965 %{
6966   match(Set dst (LoadNKlass mem));
6967   predicate(!needs_acquiring_load(n));
6968 
6969   ins_cost(4 * INSN_COST);
6970   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6971 
6972   ins_encode(aarch64_enc_ldrw(dst, mem));
6973 
6974   ins_pipe(iload_reg_mem);
6975 %}
6976 
6977 // Load Float
6978 instruct loadF(vRegF dst, memory mem)
6979 %{
6980   match(Set dst (LoadF mem));
6981   predicate(!needs_acquiring_load(n));
6982 
6983   ins_cost(4 * INSN_COST);
6984   format %{ "ldrs  $dst, $mem\t# float" %}
6985 
6986   ins_encode( aarch64_enc_ldrs(dst, mem) );
6987 
6988   ins_pipe(pipe_class_memory);
6989 %}
6990 
6991 // Load Double
6992 instruct loadD(vRegD dst, memory mem)
6993 %{
6994   match(Set dst (LoadD mem));
6995   predicate(!needs_acquiring_load(n));
6996 
6997   ins_cost(4 * INSN_COST);
6998   format %{ "ldrd  $dst, $mem\t# double" %}
6999 
7000   ins_encode( aarch64_enc_ldrd(dst, mem) );
7001 
7002   ins_pipe(pipe_class_memory);
7003 %}
7004 
7005 
7006 // Load Int Constant
7007 instruct loadConI(iRegINoSp dst, immI src)
7008 %{
7009   match(Set dst src);
7010 
7011   ins_cost(INSN_COST);
7012   format %{ "mov $dst, $src\t# int" %}
7013 
7014   ins_encode( aarch64_enc_movw_imm(dst, src) );
7015 
7016   ins_pipe(ialu_imm);
7017 %}
7018 
7019 // Load Long Constant
7020 instruct loadConL(iRegLNoSp dst, immL src)
7021 %{
7022   match(Set dst src);
7023 
7024   ins_cost(INSN_COST);
7025   format %{ "mov $dst, $src\t# long" %}
7026 
7027   ins_encode( aarch64_enc_mov_imm(dst, src) );
7028 
7029   ins_pipe(ialu_imm);
7030 %}
7031 
7032 // Load Pointer Constant
7033 
7034 instruct loadConP(iRegPNoSp dst, immP con)
7035 %{
7036   match(Set dst con);
7037 
7038   ins_cost(INSN_COST * 4);
7039   format %{
7040     "mov  $dst, $con\t# ptr\n\t"
7041   %}
7042 
7043   ins_encode(aarch64_enc_mov_p(dst, con));
7044 
7045   ins_pipe(ialu_imm);
7046 %}
7047 
7048 // Load Null Pointer Constant
7049 
7050 instruct loadConP0(iRegPNoSp dst, immP0 con)
7051 %{
7052   match(Set dst con);
7053 
7054   ins_cost(INSN_COST);
7055   format %{ "mov  $dst, $con\t# NULL ptr" %}
7056 
7057   ins_encode(aarch64_enc_mov_p0(dst, con));
7058 
7059   ins_pipe(ialu_imm);
7060 %}
7061 
7062 // Load Pointer Constant One
7063 
7064 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7065 %{
7066   match(Set dst con);
7067 
7068   ins_cost(INSN_COST);
7069   format %{ "mov  $dst, $con\t# NULL ptr" %}
7070 
7071   ins_encode(aarch64_enc_mov_p1(dst, con));
7072 
7073   ins_pipe(ialu_imm);
7074 %}
7075 
7076 // Load Poll Page Constant
7077 
7078 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7079 %{
7080   match(Set dst con);
7081 
7082   ins_cost(INSN_COST);
7083   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7084 
7085   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7086 
7087   ins_pipe(ialu_imm);
7088 %}
7089 
7090 // Load Byte Map Base Constant
7091 
7092 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7093 %{
7094   match(Set dst con);
7095 
7096   ins_cost(INSN_COST);
7097   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7098 
7099   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7100 
7101   ins_pipe(ialu_imm);
7102 %}
7103 
7104 // Load Narrow Pointer Constant
7105 
7106 instruct loadConN(iRegNNoSp dst, immN con)
7107 %{
7108   match(Set dst con);
7109 
7110   ins_cost(INSN_COST * 4);
7111   format %{ "mov  $dst, $con\t# compressed ptr" %}
7112 
7113   ins_encode(aarch64_enc_mov_n(dst, con));
7114 
7115   ins_pipe(ialu_imm);
7116 %}
7117 
7118 // Load Narrow Null Pointer Constant
7119 
7120 instruct loadConN0(iRegNNoSp dst, immN0 con)
7121 %{
7122   match(Set dst con);
7123 
7124   ins_cost(INSN_COST);
7125   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7126 
7127   ins_encode(aarch64_enc_mov_n0(dst, con));
7128 
7129   ins_pipe(ialu_imm);
7130 %}
7131 
7132 // Load Narrow Klass Constant
7133 
7134 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7135 %{
7136   match(Set dst con);
7137 
7138   ins_cost(INSN_COST);
7139   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7140 
7141   ins_encode(aarch64_enc_mov_nk(dst, con));
7142 
7143   ins_pipe(ialu_imm);
7144 %}
7145 
7146 // Load Packed Float Constant
7147 
7148 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7149   match(Set dst con);
7150   ins_cost(INSN_COST * 4);
7151   format %{ "fmovs  $dst, $con"%}
7152   ins_encode %{
7153     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7154   %}
7155 
7156   ins_pipe(fp_imm_s);
7157 %}
7158 
7159 // Load Float Constant
7160 
7161 instruct loadConF(vRegF dst, immF con) %{
7162   match(Set dst con);
7163 
7164   ins_cost(INSN_COST * 4);
7165 
7166   format %{
7167     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7168   %}
7169 
7170   ins_encode %{
7171     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7172   %}
7173 
7174   ins_pipe(fp_load_constant_s);
7175 %}
7176 
7177 // Load Packed Double Constant
7178 
7179 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7180   match(Set dst con);
7181   ins_cost(INSN_COST);
7182   format %{ "fmovd  $dst, $con"%}
7183   ins_encode %{
7184     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7185   %}
7186 
7187   ins_pipe(fp_imm_d);
7188 %}
7189 
7190 // Load Double Constant
7191 
7192 instruct loadConD(vRegD dst, immD con) %{
7193   match(Set dst con);
7194 
7195   ins_cost(INSN_COST * 5);
7196   format %{
7197     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7198   %}
7199 
7200   ins_encode %{
7201     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7202   %}
7203 
7204   ins_pipe(fp_load_constant_d);
7205 %}
7206 
7207 // Store Instructions
7208 
7209 // Store CMS card-mark Immediate
7210 instruct storeimmCM0(immI0 zero, memory mem)
7211 %{
7212   match(Set mem (StoreCM mem zero));
7213 
7214   ins_cost(INSN_COST);
7215   format %{ "storestore (elided)\n\t"
7216             "strb zr, $mem\t# byte" %}
7217 
7218   ins_encode(aarch64_enc_strb0(mem));
7219 
7220   ins_pipe(istore_mem);
7221 %}
7222 
7223 // Store CMS card-mark Immediate with intervening StoreStore
7224 // needed when using CMS with no conditional card marking
7225 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7226 %{
7227   match(Set mem (StoreCM mem zero));
7228 
7229   ins_cost(INSN_COST * 2);
7230   format %{ "storestore\n\t"
7231             "dmb ishst"
7232             "\n\tstrb zr, $mem\t# byte" %}
7233 
7234   ins_encode(aarch64_enc_strb0_ordered(mem));
7235 
7236   ins_pipe(istore_mem);
7237 %}
7238 
7239 // Store Byte
7240 instruct storeB(iRegIorL2I src, memory mem)
7241 %{
7242   match(Set mem (StoreB mem src));
7243   predicate(!needs_releasing_store(n));
7244 
7245   ins_cost(INSN_COST);
7246   format %{ "strb  $src, $mem\t# byte" %}
7247 
7248   ins_encode(aarch64_enc_strb(src, mem));
7249 
7250   ins_pipe(istore_reg_mem);
7251 %}
7252 
7253 
7254 instruct storeimmB0(immI0 zero, memory mem)
7255 %{
7256   match(Set mem (StoreB mem zero));
7257   predicate(!needs_releasing_store(n));
7258 
7259   ins_cost(INSN_COST);
7260   format %{ "strb rscractch2, $mem\t# byte" %}
7261 
7262   ins_encode(aarch64_enc_strb0(mem));
7263 
7264   ins_pipe(istore_mem);
7265 %}
7266 
7267 // Store Char/Short
7268 instruct storeC(iRegIorL2I src, memory mem)
7269 %{
7270   match(Set mem (StoreC mem src));
7271   predicate(!needs_releasing_store(n));
7272 
7273   ins_cost(INSN_COST);
7274   format %{ "strh  $src, $mem\t# short" %}
7275 
7276   ins_encode(aarch64_enc_strh(src, mem));
7277 
7278   ins_pipe(istore_reg_mem);
7279 %}
7280 
7281 instruct storeimmC0(immI0 zero, memory mem)
7282 %{
7283   match(Set mem (StoreC mem zero));
7284   predicate(!needs_releasing_store(n));
7285 
7286   ins_cost(INSN_COST);
7287   format %{ "strh  zr, $mem\t# short" %}
7288 
7289   ins_encode(aarch64_enc_strh0(mem));
7290 
7291   ins_pipe(istore_mem);
7292 %}
7293 
7294 // Store Integer
7295 
7296 instruct storeI(iRegIorL2I src, memory mem)
7297 %{
7298   match(Set mem(StoreI mem src));
7299   predicate(!needs_releasing_store(n));
7300 
7301   ins_cost(INSN_COST);
7302   format %{ "strw  $src, $mem\t# int" %}
7303 
7304   ins_encode(aarch64_enc_strw(src, mem));
7305 
7306   ins_pipe(istore_reg_mem);
7307 %}
7308 
7309 instruct storeimmI0(immI0 zero, memory mem)
7310 %{
7311   match(Set mem(StoreI mem zero));
7312   predicate(!needs_releasing_store(n));
7313 
7314   ins_cost(INSN_COST);
7315   format %{ "strw  zr, $mem\t# int" %}
7316 
7317   ins_encode(aarch64_enc_strw0(mem));
7318 
7319   ins_pipe(istore_mem);
7320 %}
7321 
7322 // Store Long (64 bit signed)
7323 instruct storeL(iRegL src, memory mem)
7324 %{
7325   match(Set mem (StoreL mem src));
7326   predicate(!needs_releasing_store(n));
7327 
7328   ins_cost(INSN_COST);
7329   format %{ "str  $src, $mem\t# int" %}
7330 
7331   ins_encode(aarch64_enc_str(src, mem));
7332 
7333   ins_pipe(istore_reg_mem);
7334 %}
7335 
7336 // Store Long (64 bit signed)
7337 instruct storeimmL0(immL0 zero, memory mem)
7338 %{
7339   match(Set mem (StoreL mem zero));
7340   predicate(!needs_releasing_store(n));
7341 
7342   ins_cost(INSN_COST);
7343   format %{ "str  zr, $mem\t# int" %}
7344 
7345   ins_encode(aarch64_enc_str0(mem));
7346 
7347   ins_pipe(istore_mem);
7348 %}
7349 
7350 // Store Pointer
7351 instruct storeP(iRegP src, memory mem)
7352 %{
7353   match(Set mem (StoreP mem src));
7354   predicate(!needs_releasing_store(n));
7355 
7356   ins_cost(INSN_COST);
7357   format %{ "str  $src, $mem\t# ptr" %}
7358 
7359   ins_encode(aarch64_enc_str(src, mem));
7360 
7361   ins_pipe(istore_reg_mem);
7362 %}
7363 
7364 // Store Pointer
7365 instruct storeimmP0(immP0 zero, memory mem)
7366 %{
7367   match(Set mem (StoreP mem zero));
7368   predicate(!needs_releasing_store(n));
7369 
7370   ins_cost(INSN_COST);
7371   format %{ "str zr, $mem\t# ptr" %}
7372 
7373   ins_encode(aarch64_enc_str0(mem));
7374 
7375   ins_pipe(istore_mem);
7376 %}
7377 
7378 // Store Compressed Pointer
7379 instruct storeN(iRegN src, memory mem)
7380 %{
7381   match(Set mem (StoreN mem src));
7382   predicate(!needs_releasing_store(n));
7383 
7384   ins_cost(INSN_COST);
7385   format %{ "strw  $src, $mem\t# compressed ptr" %}
7386 
7387   ins_encode(aarch64_enc_strw(src, mem));
7388 
7389   ins_pipe(istore_reg_mem);
7390 %}
7391 
7392 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7393 %{
7394   match(Set mem (StoreN mem zero));
7395   predicate(CompressedOops::base() == NULL &&
7396             CompressedKlassPointers::base() == NULL &&
7397             (!needs_releasing_store(n)));
7398 
7399   ins_cost(INSN_COST);
7400   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7401 
7402   ins_encode(aarch64_enc_strw(heapbase, mem));
7403 
7404   ins_pipe(istore_reg_mem);
7405 %}
7406 
7407 // Store Float
7408 instruct storeF(vRegF src, memory mem)
7409 %{
7410   match(Set mem (StoreF mem src));
7411   predicate(!needs_releasing_store(n));
7412 
7413   ins_cost(INSN_COST);
7414   format %{ "strs  $src, $mem\t# float" %}
7415 
7416   ins_encode( aarch64_enc_strs(src, mem) );
7417 
7418   ins_pipe(pipe_class_memory);
7419 %}
7420 
7421 // TODO
7422 // implement storeImmF0 and storeFImmPacked
7423 
7424 // Store Double
7425 instruct storeD(vRegD src, memory mem)
7426 %{
7427   match(Set mem (StoreD mem src));
7428   predicate(!needs_releasing_store(n));
7429 
7430   ins_cost(INSN_COST);
7431   format %{ "strd  $src, $mem\t# double" %}
7432 
7433   ins_encode( aarch64_enc_strd(src, mem) );
7434 
7435   ins_pipe(pipe_class_memory);
7436 %}
7437 
7438 // Store Compressed Klass Pointer
7439 instruct storeNKlass(iRegN src, memory mem)
7440 %{
7441   predicate(!needs_releasing_store(n));
7442   match(Set mem (StoreNKlass mem src));
7443 
7444   ins_cost(INSN_COST);
7445   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7446 
7447   ins_encode(aarch64_enc_strw(src, mem));
7448 
7449   ins_pipe(istore_reg_mem);
7450 %}
7451 
7452 // TODO
7453 // implement storeImmD0 and storeDImmPacked
7454 
7455 // prefetch instructions
7456 // Must be safe to execute with invalid address (cannot fault).
7457 
7458 instruct prefetchalloc( memory mem ) %{
7459   match(PrefetchAllocation mem);
7460 
7461   ins_cost(INSN_COST);
7462   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7463 
7464   ins_encode( aarch64_enc_prefetchw(mem) );
7465 
7466   ins_pipe(iload_prefetch);
7467 %}
7468 
7469 //  ---------------- volatile loads and stores ----------------
7470 
7471 // Load Byte (8 bit signed)
7472 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7473 %{
7474   match(Set dst (LoadB mem));
7475 
7476   ins_cost(VOLATILE_REF_COST);
7477   format %{ "ldarsb  $dst, $mem\t# byte" %}
7478 
7479   ins_encode(aarch64_enc_ldarsb(dst, mem));
7480 
7481   ins_pipe(pipe_serial);
7482 %}
7483 
7484 // Load Byte (8 bit signed) into long
7485 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7486 %{
7487   match(Set dst (ConvI2L (LoadB mem)));
7488 
7489   ins_cost(VOLATILE_REF_COST);
7490   format %{ "ldarsb  $dst, $mem\t# byte" %}
7491 
7492   ins_encode(aarch64_enc_ldarsb(dst, mem));
7493 
7494   ins_pipe(pipe_serial);
7495 %}
7496 
7497 // Load Byte (8 bit unsigned)
7498 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7499 %{
7500   match(Set dst (LoadUB mem));
7501 
7502   ins_cost(VOLATILE_REF_COST);
7503   format %{ "ldarb  $dst, $mem\t# byte" %}
7504 
7505   ins_encode(aarch64_enc_ldarb(dst, mem));
7506 
7507   ins_pipe(pipe_serial);
7508 %}
7509 
7510 // Load Byte (8 bit unsigned) into long
7511 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7512 %{
7513   match(Set dst (ConvI2L (LoadUB mem)));
7514 
7515   ins_cost(VOLATILE_REF_COST);
7516   format %{ "ldarb  $dst, $mem\t# byte" %}
7517 
7518   ins_encode(aarch64_enc_ldarb(dst, mem));
7519 
7520   ins_pipe(pipe_serial);
7521 %}
7522 
7523 // Load Short (16 bit signed)
7524 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7525 %{
7526   match(Set dst (LoadS mem));
7527 
7528   ins_cost(VOLATILE_REF_COST);
7529   format %{ "ldarshw  $dst, $mem\t# short" %}
7530 
7531   ins_encode(aarch64_enc_ldarshw(dst, mem));
7532 
7533   ins_pipe(pipe_serial);
7534 %}
7535 
7536 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7537 %{
7538   match(Set dst (LoadUS mem));
7539 
7540   ins_cost(VOLATILE_REF_COST);
7541   format %{ "ldarhw  $dst, $mem\t# short" %}
7542 
7543   ins_encode(aarch64_enc_ldarhw(dst, mem));
7544 
7545   ins_pipe(pipe_serial);
7546 %}
7547 
7548 // Load Short/Char (16 bit unsigned) into long
7549 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7550 %{
7551   match(Set dst (ConvI2L (LoadUS mem)));
7552 
7553   ins_cost(VOLATILE_REF_COST);
7554   format %{ "ldarh  $dst, $mem\t# short" %}
7555 
7556   ins_encode(aarch64_enc_ldarh(dst, mem));
7557 
7558   ins_pipe(pipe_serial);
7559 %}
7560 
7561 // Load Short/Char (16 bit signed) into long
7562 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7563 %{
7564   match(Set dst (ConvI2L (LoadS mem)));
7565 
7566   ins_cost(VOLATILE_REF_COST);
7567   format %{ "ldarh  $dst, $mem\t# short" %}
7568 
7569   ins_encode(aarch64_enc_ldarsh(dst, mem));
7570 
7571   ins_pipe(pipe_serial);
7572 %}
7573 
7574 // Load Integer (32 bit signed)
7575 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7576 %{
7577   match(Set dst (LoadI mem));
7578 
7579   ins_cost(VOLATILE_REF_COST);
7580   format %{ "ldarw  $dst, $mem\t# int" %}
7581 
7582   ins_encode(aarch64_enc_ldarw(dst, mem));
7583 
7584   ins_pipe(pipe_serial);
7585 %}
7586 
7587 // Load Integer (32 bit unsigned) into long
7588 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7589 %{
7590   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7591 
7592   ins_cost(VOLATILE_REF_COST);
7593   format %{ "ldarw  $dst, $mem\t# int" %}
7594 
7595   ins_encode(aarch64_enc_ldarw(dst, mem));
7596 
7597   ins_pipe(pipe_serial);
7598 %}
7599 
7600 // Load Long (64 bit signed)
7601 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7602 %{
7603   match(Set dst (LoadL mem));
7604 
7605   ins_cost(VOLATILE_REF_COST);
7606   format %{ "ldar  $dst, $mem\t# int" %}
7607 
7608   ins_encode(aarch64_enc_ldar(dst, mem));
7609 
7610   ins_pipe(pipe_serial);
7611 %}
7612 
7613 // Load Pointer
7614 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7615 %{
7616   match(Set dst (LoadP mem));
7617   predicate(n->as_Load()->barrier_data() == 0);
7618 
7619   ins_cost(VOLATILE_REF_COST);
7620   format %{ "ldar  $dst, $mem\t# ptr" %}
7621 
7622   ins_encode(aarch64_enc_ldar(dst, mem));
7623 
7624   ins_pipe(pipe_serial);
7625 %}
7626 
7627 // Load Compressed Pointer
7628 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7629 %{
7630   match(Set dst (LoadN mem));
7631 
7632   ins_cost(VOLATILE_REF_COST);
7633   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7634 
7635   ins_encode(aarch64_enc_ldarw(dst, mem));
7636 
7637   ins_pipe(pipe_serial);
7638 %}
7639 
7640 // Load Float
7641 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7642 %{
7643   match(Set dst (LoadF mem));
7644 
7645   ins_cost(VOLATILE_REF_COST);
7646   format %{ "ldars  $dst, $mem\t# float" %}
7647 
7648   ins_encode( aarch64_enc_fldars(dst, mem) );
7649 
7650   ins_pipe(pipe_serial);
7651 %}
7652 
7653 // Load Double
7654 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7655 %{
7656   match(Set dst (LoadD mem));
7657 
7658   ins_cost(VOLATILE_REF_COST);
7659   format %{ "ldard  $dst, $mem\t# double" %}
7660 
7661   ins_encode( aarch64_enc_fldard(dst, mem) );
7662 
7663   ins_pipe(pipe_serial);
7664 %}
7665 
7666 // Store Byte
7667 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7668 %{
7669   match(Set mem (StoreB mem src));
7670 
7671   ins_cost(VOLATILE_REF_COST);
7672   format %{ "stlrb  $src, $mem\t# byte" %}
7673 
7674   ins_encode(aarch64_enc_stlrb(src, mem));
7675 
7676   ins_pipe(pipe_class_memory);
7677 %}
7678 
7679 // Store Char/Short
7680 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7681 %{
7682   match(Set mem (StoreC mem src));
7683 
7684   ins_cost(VOLATILE_REF_COST);
7685   format %{ "stlrh  $src, $mem\t# short" %}
7686 
7687   ins_encode(aarch64_enc_stlrh(src, mem));
7688 
7689   ins_pipe(pipe_class_memory);
7690 %}
7691 
7692 // Store Integer
7693 
7694 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7695 %{
7696   match(Set mem(StoreI mem src));
7697 
7698   ins_cost(VOLATILE_REF_COST);
7699   format %{ "stlrw  $src, $mem\t# int" %}
7700 
7701   ins_encode(aarch64_enc_stlrw(src, mem));
7702 
7703   ins_pipe(pipe_class_memory);
7704 %}
7705 
7706 // Store Long (64 bit signed)
7707 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7708 %{
7709   match(Set mem (StoreL mem src));
7710 
7711   ins_cost(VOLATILE_REF_COST);
7712   format %{ "stlr  $src, $mem\t# int" %}
7713 
7714   ins_encode(aarch64_enc_stlr(src, mem));
7715 
7716   ins_pipe(pipe_class_memory);
7717 %}
7718 
7719 // Store Pointer
7720 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7721 %{
7722   match(Set mem (StoreP mem src));
7723 
7724   ins_cost(VOLATILE_REF_COST);
7725   format %{ "stlr  $src, $mem\t# ptr" %}
7726 
7727   ins_encode(aarch64_enc_stlr(src, mem));
7728 
7729   ins_pipe(pipe_class_memory);
7730 %}
7731 
7732 // Store Compressed Pointer
7733 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7734 %{
7735   match(Set mem (StoreN mem src));
7736 
7737   ins_cost(VOLATILE_REF_COST);
7738   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7739 
7740   ins_encode(aarch64_enc_stlrw(src, mem));
7741 
7742   ins_pipe(pipe_class_memory);
7743 %}
7744 
7745 // Store Float
7746 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7747 %{
7748   match(Set mem (StoreF mem src));
7749 
7750   ins_cost(VOLATILE_REF_COST);
7751   format %{ "stlrs  $src, $mem\t# float" %}
7752 
7753   ins_encode( aarch64_enc_fstlrs(src, mem) );
7754 
7755   ins_pipe(pipe_class_memory);
7756 %}
7757 
7758 // TODO
7759 // implement storeImmF0 and storeFImmPacked
7760 
7761 // Store Double
7762 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7763 %{
7764   match(Set mem (StoreD mem src));
7765 
7766   ins_cost(VOLATILE_REF_COST);
7767   format %{ "stlrd  $src, $mem\t# double" %}
7768 
7769   ins_encode( aarch64_enc_fstlrd(src, mem) );
7770 
7771   ins_pipe(pipe_class_memory);
7772 %}
7773 
7774 //  ---------------- end of volatile loads and stores ----------------
7775 
7776 instruct cacheWB(indirect addr)
7777 %{
7778   predicate(VM_Version::supports_data_cache_line_flush());
7779   match(CacheWB addr);
7780 
7781   ins_cost(100);
7782   format %{"cache wb $addr" %}
7783   ins_encode %{
7784     assert($addr->index_position() < 0, "should be");
7785     assert($addr$$disp == 0, "should be");
7786     __ cache_wb(Address($addr$$base$$Register, 0));
7787   %}
7788   ins_pipe(pipe_slow); // XXX
7789 %}
7790 
7791 instruct cacheWBPreSync()
7792 %{
7793   predicate(VM_Version::supports_data_cache_line_flush());
7794   match(CacheWBPreSync);
7795 
7796   ins_cost(100);
7797   format %{"cache wb presync" %}
7798   ins_encode %{
7799     __ cache_wbsync(true);
7800   %}
7801   ins_pipe(pipe_slow); // XXX
7802 %}
7803 
7804 instruct cacheWBPostSync()
7805 %{
7806   predicate(VM_Version::supports_data_cache_line_flush());
7807   match(CacheWBPostSync);
7808 
7809   ins_cost(100);
7810   format %{"cache wb postsync" %}
7811   ins_encode %{
7812     __ cache_wbsync(false);
7813   %}
7814   ins_pipe(pipe_slow); // XXX
7815 %}
7816 
7817 // ============================================================================
7818 // BSWAP Instructions
7819 
7820 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7821   match(Set dst (ReverseBytesI src));
7822 
7823   ins_cost(INSN_COST);
7824   format %{ "revw  $dst, $src" %}
7825 
7826   ins_encode %{
7827     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7828   %}
7829 
7830   ins_pipe(ialu_reg);
7831 %}
7832 
7833 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7834   match(Set dst (ReverseBytesL src));
7835 
7836   ins_cost(INSN_COST);
7837   format %{ "rev  $dst, $src" %}
7838 
7839   ins_encode %{
7840     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7841   %}
7842 
7843   ins_pipe(ialu_reg);
7844 %}
7845 
7846 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7847   match(Set dst (ReverseBytesUS src));
7848 
7849   ins_cost(INSN_COST);
7850   format %{ "rev16w  $dst, $src" %}
7851 
7852   ins_encode %{
7853     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7854   %}
7855 
7856   ins_pipe(ialu_reg);
7857 %}
7858 
7859 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7860   match(Set dst (ReverseBytesS src));
7861 
7862   ins_cost(INSN_COST);
7863   format %{ "rev16w  $dst, $src\n\t"
7864             "sbfmw $dst, $dst, #0, #15" %}
7865 
7866   ins_encode %{
7867     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7868     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7869   %}
7870 
7871   ins_pipe(ialu_reg);
7872 %}
7873 
7874 // ============================================================================
7875 // Zero Count Instructions
7876 
7877 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7878   match(Set dst (CountLeadingZerosI src));
7879 
7880   ins_cost(INSN_COST);
7881   format %{ "clzw  $dst, $src" %}
7882   ins_encode %{
7883     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7884   %}
7885 
7886   ins_pipe(ialu_reg);
7887 %}
7888 
7889 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7890   match(Set dst (CountLeadingZerosL src));
7891 
7892   ins_cost(INSN_COST);
7893   format %{ "clz   $dst, $src" %}
7894   ins_encode %{
7895     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7896   %}
7897 
7898   ins_pipe(ialu_reg);
7899 %}
7900 
7901 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7902   match(Set dst (CountTrailingZerosI src));
7903 
7904   ins_cost(INSN_COST * 2);
7905   format %{ "rbitw  $dst, $src\n\t"
7906             "clzw   $dst, $dst" %}
7907   ins_encode %{
7908     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7909     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7910   %}
7911 
7912   ins_pipe(ialu_reg);
7913 %}
7914 
7915 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7916   match(Set dst (CountTrailingZerosL src));
7917 
7918   ins_cost(INSN_COST * 2);
7919   format %{ "rbit   $dst, $src\n\t"
7920             "clz    $dst, $dst" %}
7921   ins_encode %{
7922     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7923     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7924   %}
7925 
7926   ins_pipe(ialu_reg);
7927 %}
7928 
7929 //---------- Population Count Instructions -------------------------------------
7930 //
7931 
7932 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7933   predicate(UsePopCountInstruction);
7934   match(Set dst (PopCountI src));
7935   effect(TEMP tmp);
7936   ins_cost(INSN_COST * 13);
7937 
7938   format %{ "movw   $src, $src\n\t"
7939             "mov    $tmp, $src\t# vector (1D)\n\t"
7940             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7941             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7942             "mov    $dst, $tmp\t# vector (1D)" %}
7943   ins_encode %{
7944     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7945     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7946     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7947     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7948     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7949   %}
7950 
7951   ins_pipe(pipe_class_default);
7952 %}
7953 
7954 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7955   predicate(UsePopCountInstruction);
7956   match(Set dst (PopCountI (LoadI mem)));
7957   effect(TEMP tmp);
7958   ins_cost(INSN_COST * 13);
7959 
7960   format %{ "ldrs   $tmp, $mem\n\t"
7961             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7962             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7963             "mov    $dst, $tmp\t# vector (1D)" %}
7964   ins_encode %{
7965     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7966     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7967                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7968     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7969     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7970     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7971   %}
7972 
7973   ins_pipe(pipe_class_default);
7974 %}
7975 
7976 // Note: Long.bitCount(long) returns an int.
7977 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7978   predicate(UsePopCountInstruction);
7979   match(Set dst (PopCountL src));
7980   effect(TEMP tmp);
7981   ins_cost(INSN_COST * 13);
7982 
7983   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7984             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7985             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7986             "mov    $dst, $tmp\t# vector (1D)" %}
7987   ins_encode %{
7988     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7989     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7990     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7991     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7992   %}
7993 
7994   ins_pipe(pipe_class_default);
7995 %}
7996 
7997 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7998   predicate(UsePopCountInstruction);
7999   match(Set dst (PopCountL (LoadL mem)));
8000   effect(TEMP tmp);
8001   ins_cost(INSN_COST * 13);
8002 
8003   format %{ "ldrd   $tmp, $mem\n\t"
8004             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8005             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8006             "mov    $dst, $tmp\t# vector (1D)" %}
8007   ins_encode %{
8008     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8009     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8010                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8011     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8012     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8013     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8014   %}
8015 
8016   ins_pipe(pipe_class_default);
8017 %}
8018 
8019 // ============================================================================
8020 // MemBar Instruction
8021 
8022 instruct load_fence() %{
8023   match(LoadFence);
8024   ins_cost(VOLATILE_REF_COST);
8025 
8026   format %{ "load_fence" %}
8027 
8028   ins_encode %{
8029     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8030   %}
8031   ins_pipe(pipe_serial);
8032 %}
8033 
8034 instruct unnecessary_membar_acquire() %{
8035   predicate(unnecessary_acquire(n));
8036   match(MemBarAcquire);
8037   ins_cost(0);
8038 
8039   format %{ "membar_acquire (elided)" %}
8040 
8041   ins_encode %{
8042     __ block_comment("membar_acquire (elided)");
8043   %}
8044 
8045   ins_pipe(pipe_class_empty);
8046 %}
8047 
8048 instruct membar_acquire() %{
8049   match(MemBarAcquire);
8050   ins_cost(VOLATILE_REF_COST);
8051 
8052   format %{ "membar_acquire\n\t"
8053             "dmb ish" %}
8054 
8055   ins_encode %{
8056     __ block_comment("membar_acquire");
8057     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8058   %}
8059 
8060   ins_pipe(pipe_serial);
8061 %}
8062 
8063 
8064 instruct membar_acquire_lock() %{
8065   match(MemBarAcquireLock);
8066   ins_cost(VOLATILE_REF_COST);
8067 
8068   format %{ "membar_acquire_lock (elided)" %}
8069 
8070   ins_encode %{
8071     __ block_comment("membar_acquire_lock (elided)");
8072   %}
8073 
8074   ins_pipe(pipe_serial);
8075 %}
8076 
8077 instruct store_fence() %{
8078   match(StoreFence);
8079   ins_cost(VOLATILE_REF_COST);
8080 
8081   format %{ "store_fence" %}
8082 
8083   ins_encode %{
8084     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8085   %}
8086   ins_pipe(pipe_serial);
8087 %}
8088 
8089 instruct unnecessary_membar_release() %{
8090   predicate(unnecessary_release(n));
8091   match(MemBarRelease);
8092   ins_cost(0);
8093 
8094   format %{ "membar_release (elided)" %}
8095 
8096   ins_encode %{
8097     __ block_comment("membar_release (elided)");
8098   %}
8099   ins_pipe(pipe_serial);
8100 %}
8101 
8102 instruct membar_release() %{
8103   match(MemBarRelease);
8104   ins_cost(VOLATILE_REF_COST);
8105 
8106   format %{ "membar_release\n\t"
8107             "dmb ish" %}
8108 
8109   ins_encode %{
8110     __ block_comment("membar_release");
8111     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8112   %}
8113   ins_pipe(pipe_serial);
8114 %}
8115 
8116 instruct membar_storestore() %{
8117   match(MemBarStoreStore);
8118   ins_cost(VOLATILE_REF_COST);
8119 
8120   format %{ "MEMBAR-store-store" %}
8121 
8122   ins_encode %{
8123     __ membar(Assembler::StoreStore);
8124   %}
8125   ins_pipe(pipe_serial);
8126 %}
8127 
8128 instruct membar_release_lock() %{
8129   match(MemBarReleaseLock);
8130   ins_cost(VOLATILE_REF_COST);
8131 
8132   format %{ "membar_release_lock (elided)" %}
8133 
8134   ins_encode %{
8135     __ block_comment("membar_release_lock (elided)");
8136   %}
8137 
8138   ins_pipe(pipe_serial);
8139 %}
8140 
8141 instruct unnecessary_membar_volatile() %{
8142   predicate(unnecessary_volatile(n));
8143   match(MemBarVolatile);
8144   ins_cost(0);
8145 
8146   format %{ "membar_volatile (elided)" %}
8147 
8148   ins_encode %{
8149     __ block_comment("membar_volatile (elided)");
8150   %}
8151 
8152   ins_pipe(pipe_serial);
8153 %}
8154 
8155 instruct membar_volatile() %{
8156   match(MemBarVolatile);
8157   ins_cost(VOLATILE_REF_COST*100);
8158 
8159   format %{ "membar_volatile\n\t"
8160              "dmb ish"%}
8161 
8162   ins_encode %{
8163     __ block_comment("membar_volatile");
8164     __ membar(Assembler::StoreLoad);
8165   %}
8166 
8167   ins_pipe(pipe_serial);
8168 %}
8169 
8170 // ============================================================================
8171 // Cast/Convert Instructions
8172 
8173 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8174   match(Set dst (CastX2P src));
8175 
8176   ins_cost(INSN_COST);
8177   format %{ "mov $dst, $src\t# long -> ptr" %}
8178 
8179   ins_encode %{
8180     if ($dst$$reg != $src$$reg) {
8181       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8182     }
8183   %}
8184 
8185   ins_pipe(ialu_reg);
8186 %}
8187 
8188 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8189   match(Set dst (CastP2X src));
8190 
8191   ins_cost(INSN_COST);
8192   format %{ "mov $dst, $src\t# ptr -> long" %}
8193 
8194   ins_encode %{
8195     if ($dst$$reg != $src$$reg) {
8196       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8197     }
8198   %}
8199 
8200   ins_pipe(ialu_reg);
8201 %}
8202 
8203 // Convert oop into int for vectors alignment masking
8204 instruct convP2I(iRegINoSp dst, iRegP src) %{
8205   match(Set dst (ConvL2I (CastP2X src)));
8206 
8207   ins_cost(INSN_COST);
8208   format %{ "movw $dst, $src\t# ptr -> int" %}
8209   ins_encode %{
8210     __ movw($dst$$Register, $src$$Register);
8211   %}
8212 
8213   ins_pipe(ialu_reg);
8214 %}
8215 
8216 // Convert compressed oop into int for vectors alignment masking
8217 // in case of 32bit oops (heap < 4Gb).
8218 instruct convN2I(iRegINoSp dst, iRegN src)
8219 %{
8220   predicate(CompressedOops::shift() == 0);
8221   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8222 
8223   ins_cost(INSN_COST);
8224   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8225   ins_encode %{
8226     __ movw($dst$$Register, $src$$Register);
8227   %}
8228 
8229   ins_pipe(ialu_reg);
8230 %}
8231 
8232 
8233 // Convert oop pointer into compressed form
8234 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8235   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8236   match(Set dst (EncodeP src));
8237   effect(KILL cr);
8238   ins_cost(INSN_COST * 3);
8239   format %{ "encode_heap_oop $dst, $src" %}
8240   ins_encode %{
8241     Register s = $src$$Register;
8242     Register d = $dst$$Register;
8243     __ encode_heap_oop(d, s);
8244   %}
8245   ins_pipe(ialu_reg);
8246 %}
8247 
8248 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8249   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8250   match(Set dst (EncodeP src));
8251   ins_cost(INSN_COST * 3);
8252   format %{ "encode_heap_oop_not_null $dst, $src" %}
8253   ins_encode %{
8254     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8255   %}
8256   ins_pipe(ialu_reg);
8257 %}
8258 
8259 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8260   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8261             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8262   match(Set dst (DecodeN src));
8263   ins_cost(INSN_COST * 3);
8264   format %{ "decode_heap_oop $dst, $src" %}
8265   ins_encode %{
8266     Register s = $src$$Register;
8267     Register d = $dst$$Register;
8268     __ decode_heap_oop(d, s);
8269   %}
8270   ins_pipe(ialu_reg);
8271 %}
8272 
8273 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8274   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8275             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8276   match(Set dst (DecodeN src));
8277   ins_cost(INSN_COST * 3);
8278   format %{ "decode_heap_oop_not_null $dst, $src" %}
8279   ins_encode %{
8280     Register s = $src$$Register;
8281     Register d = $dst$$Register;
8282     __ decode_heap_oop_not_null(d, s);
8283   %}
8284   ins_pipe(ialu_reg);
8285 %}
8286 
8287 // n.b. AArch64 implementations of encode_klass_not_null and
8288 // decode_klass_not_null do not modify the flags register so, unlike
8289 // Intel, we don't kill CR as a side effect here
8290 
8291 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8292   match(Set dst (EncodePKlass src));
8293 
8294   ins_cost(INSN_COST * 3);
8295   format %{ "encode_klass_not_null $dst,$src" %}
8296 
8297   ins_encode %{
8298     Register src_reg = as_Register($src$$reg);
8299     Register dst_reg = as_Register($dst$$reg);
8300     __ encode_klass_not_null(dst_reg, src_reg);
8301   %}
8302 
8303    ins_pipe(ialu_reg);
8304 %}
8305 
8306 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8307   match(Set dst (DecodeNKlass src));
8308 
8309   ins_cost(INSN_COST * 3);
8310   format %{ "decode_klass_not_null $dst,$src" %}
8311 
8312   ins_encode %{
8313     Register src_reg = as_Register($src$$reg);
8314     Register dst_reg = as_Register($dst$$reg);
8315     if (dst_reg != src_reg) {
8316       __ decode_klass_not_null(dst_reg, src_reg);
8317     } else {
8318       __ decode_klass_not_null(dst_reg);
8319     }
8320   %}
8321 
8322    ins_pipe(ialu_reg);
8323 %}
8324 
8325 instruct checkCastPP(iRegPNoSp dst)
8326 %{
8327   match(Set dst (CheckCastPP dst));
8328 
8329   size(0);
8330   format %{ "# checkcastPP of $dst" %}
8331   ins_encode(/* empty encoding */);
8332   ins_pipe(pipe_class_empty);
8333 %}
8334 
8335 instruct castPP(iRegPNoSp dst)
8336 %{
8337   match(Set dst (CastPP dst));
8338 
8339   size(0);
8340   format %{ "# castPP of $dst" %}
8341   ins_encode(/* empty encoding */);
8342   ins_pipe(pipe_class_empty);
8343 %}
8344 
8345 instruct castII(iRegI dst)
8346 %{
8347   match(Set dst (CastII dst));
8348 
8349   size(0);
8350   format %{ "# castII of $dst" %}
8351   ins_encode(/* empty encoding */);
8352   ins_cost(0);
8353   ins_pipe(pipe_class_empty);
8354 %}
8355 
8356 instruct castLL(iRegL dst)
8357 %{
8358   match(Set dst (CastLL dst));
8359 
8360   size(0);
8361   format %{ "# castLL of $dst" %}
8362   ins_encode(/* empty encoding */);
8363   ins_cost(0);
8364   ins_pipe(pipe_class_empty);
8365 %}
8366 
8367 // ============================================================================
8368 // Atomic operation instructions
8369 //
8370 // Intel and SPARC both implement Ideal Node LoadPLocked and
8371 // Store{PIL}Conditional instructions using a normal load for the
8372 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8373 //
8374 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8375 // pair to lock object allocations from Eden space when not using
8376 // TLABs.
8377 //
8378 // There does not appear to be a Load{IL}Locked Ideal Node and the
8379 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8380 // and to use StoreIConditional only for 32-bit and StoreLConditional
8381 // only for 64-bit.
8382 //
8383 // We implement LoadPLocked and StorePLocked instructions using,
8384 // respectively the AArch64 hw load-exclusive and store-conditional
8385 // instructions. Whereas we must implement each of
8386 // Store{IL}Conditional using a CAS which employs a pair of
8387 // instructions comprising a load-exclusive followed by a
8388 // store-conditional.
8389 
8390 
8391 // Locked-load (linked load) of the current heap-top
8392 // used when updating the eden heap top
8393 // implemented using ldaxr on AArch64
8394 
8395 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8396 %{
8397   match(Set dst (LoadPLocked mem));
8398 
8399   ins_cost(VOLATILE_REF_COST);
8400 
8401   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8402 
8403   ins_encode(aarch64_enc_ldaxr(dst, mem));
8404 
8405   ins_pipe(pipe_serial);
8406 %}
8407 
8408 // Conditional-store of the updated heap-top.
8409 // Used during allocation of the shared heap.
8410 // Sets flag (EQ) on success.
8411 // implemented using stlxr on AArch64.
8412 
8413 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8414 %{
8415   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8416 
8417   ins_cost(VOLATILE_REF_COST);
8418 
8419  // TODO
8420  // do we need to do a store-conditional release or can we just use a
8421  // plain store-conditional?
8422 
8423   format %{
8424     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8425     "cmpw rscratch1, zr\t# EQ on successful write"
8426   %}
8427 
8428   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8429 
8430   ins_pipe(pipe_serial);
8431 %}
8432 
8433 
8434 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8435 // when attempting to rebias a lock towards the current thread.  We
8436 // must use the acquire form of cmpxchg in order to guarantee acquire
8437 // semantics in this case.
8438 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8439 %{
8440   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8441 
8442   ins_cost(VOLATILE_REF_COST);
8443 
8444   format %{
8445     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8446     "cmpw rscratch1, zr\t# EQ on successful write"
8447   %}
8448 
8449   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8450 
8451   ins_pipe(pipe_slow);
8452 %}
8453 
8454 // storeIConditional also has acquire semantics, for no better reason
8455 // than matching storeLConditional.  At the time of writing this
8456 // comment storeIConditional was not used anywhere by AArch64.
8457 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8458 %{
8459   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8460 
8461   ins_cost(VOLATILE_REF_COST);
8462 
8463   format %{
8464     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8465     "cmpw rscratch1, zr\t# EQ on successful write"
8466   %}
8467 
8468   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8469 
8470   ins_pipe(pipe_slow);
8471 %}
8472 
8473 // standard CompareAndSwapX when we are using barriers
8474 // these have higher priority than the rules selected by a predicate
8475 
8476 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8477 // can't match them
8478 
8479 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8480 
8481   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8482   ins_cost(2 * VOLATILE_REF_COST);
8483 
8484   effect(KILL cr);
8485 
8486   format %{
8487     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8488     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8489   %}
8490 
8491   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8492             aarch64_enc_cset_eq(res));
8493 
8494   ins_pipe(pipe_slow);
8495 %}
8496 
8497 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8498 
8499   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8500   ins_cost(2 * VOLATILE_REF_COST);
8501 
8502   effect(KILL cr);
8503 
8504   format %{
8505     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8506     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8507   %}
8508 
8509   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8510             aarch64_enc_cset_eq(res));
8511 
8512   ins_pipe(pipe_slow);
8513 %}
8514 
8515 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8516 
8517   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8518   ins_cost(2 * VOLATILE_REF_COST);
8519 
8520   effect(KILL cr);
8521 
8522  format %{
8523     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8524     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8525  %}
8526 
8527  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8528             aarch64_enc_cset_eq(res));
8529 
8530   ins_pipe(pipe_slow);
8531 %}
8532 
8533 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8534 
8535   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8536   ins_cost(2 * VOLATILE_REF_COST);
8537 
8538   effect(KILL cr);
8539 
8540  format %{
8541     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8542     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8543  %}
8544 
8545  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8546             aarch64_enc_cset_eq(res));
8547 
8548   ins_pipe(pipe_slow);
8549 %}
8550 
8551 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8552 
8553   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8554   predicate(n->as_LoadStore()->barrier_data() == 0);
8555   ins_cost(2 * VOLATILE_REF_COST);
8556 
8557   effect(KILL cr);
8558 
8559  format %{
8560     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8561     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8562  %}
8563 
8564  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8565             aarch64_enc_cset_eq(res));
8566 
8567   ins_pipe(pipe_slow);
8568 %}
8569 
8570 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8571 
8572   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8573   ins_cost(2 * VOLATILE_REF_COST);
8574 
8575   effect(KILL cr);
8576 
8577  format %{
8578     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8579     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8580  %}
8581 
8582  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8583             aarch64_enc_cset_eq(res));
8584 
8585   ins_pipe(pipe_slow);
8586 %}
8587 
8588 // alternative CompareAndSwapX when we are eliding barriers
8589 
8590 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8591 
8592   predicate(needs_acquiring_load_exclusive(n));
8593   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8594   ins_cost(VOLATILE_REF_COST);
8595 
8596   effect(KILL cr);
8597 
8598   format %{
8599     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8600     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8601   %}
8602 
8603   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8604             aarch64_enc_cset_eq(res));
8605 
8606   ins_pipe(pipe_slow);
8607 %}
8608 
8609 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8610 
8611   predicate(needs_acquiring_load_exclusive(n));
8612   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8613   ins_cost(VOLATILE_REF_COST);
8614 
8615   effect(KILL cr);
8616 
8617   format %{
8618     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8619     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8620   %}
8621 
8622   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8623             aarch64_enc_cset_eq(res));
8624 
8625   ins_pipe(pipe_slow);
8626 %}
8627 
8628 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8629 
8630   predicate(needs_acquiring_load_exclusive(n));
8631   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8632   ins_cost(VOLATILE_REF_COST);
8633 
8634   effect(KILL cr);
8635 
8636  format %{
8637     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8638     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8639  %}
8640 
8641  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8642             aarch64_enc_cset_eq(res));
8643 
8644   ins_pipe(pipe_slow);
8645 %}
8646 
8647 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8648 
8649   predicate(needs_acquiring_load_exclusive(n));
8650   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8651   ins_cost(VOLATILE_REF_COST);
8652 
8653   effect(KILL cr);
8654 
8655  format %{
8656     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8657     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8658  %}
8659 
8660  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8661             aarch64_enc_cset_eq(res));
8662 
8663   ins_pipe(pipe_slow);
8664 %}
8665 
8666 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8667 
8668   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
8669   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8670   ins_cost(VOLATILE_REF_COST);
8671 
8672   effect(KILL cr);
8673 
8674  format %{
8675     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8676     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8677  %}
8678 
8679  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8680             aarch64_enc_cset_eq(res));
8681 
8682   ins_pipe(pipe_slow);
8683 %}
8684 
8685 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8686 
8687   predicate(needs_acquiring_load_exclusive(n));
8688   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8689   ins_cost(VOLATILE_REF_COST);
8690 
8691   effect(KILL cr);
8692 
8693  format %{
8694     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8695     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8696  %}
8697 
8698  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8699             aarch64_enc_cset_eq(res));
8700 
8701   ins_pipe(pipe_slow);
8702 %}
8703 
8704 
8705 // ---------------------------------------------------------------------
8706 
8707 
8708 // BEGIN This section of the file is automatically generated. Do not edit --------------
8709 
8710 // Sundry CAS operations.  Note that release is always true,
8711 // regardless of the memory ordering of the CAS.  This is because we
8712 // need the volatile case to be sequentially consistent but there is
8713 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8714 // can't check the type of memory ordering here, so we always emit a
8715 // STLXR.
8716 
8717 // This section is generated from aarch64_ad_cas.m4
8718 
8719 
8720 
8721 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8722   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8723   ins_cost(2 * VOLATILE_REF_COST);
8724   effect(TEMP_DEF res, KILL cr);
8725   format %{
8726     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8727   %}
8728   ins_encode %{
8729     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8730                Assembler::byte, /*acquire*/ false, /*release*/ true,
8731                /*weak*/ false, $res$$Register);
8732     __ sxtbw($res$$Register, $res$$Register);
8733   %}
8734   ins_pipe(pipe_slow);
8735 %}
8736 
8737 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8738   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8739   ins_cost(2 * VOLATILE_REF_COST);
8740   effect(TEMP_DEF res, KILL cr);
8741   format %{
8742     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8743   %}
8744   ins_encode %{
8745     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8746                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8747                /*weak*/ false, $res$$Register);
8748     __ sxthw($res$$Register, $res$$Register);
8749   %}
8750   ins_pipe(pipe_slow);
8751 %}
8752 
8753 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8754   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8755   ins_cost(2 * VOLATILE_REF_COST);
8756   effect(TEMP_DEF res, KILL cr);
8757   format %{
8758     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8759   %}
8760   ins_encode %{
8761     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8762                Assembler::word, /*acquire*/ false, /*release*/ true,
8763                /*weak*/ false, $res$$Register);
8764   %}
8765   ins_pipe(pipe_slow);
8766 %}
8767 
8768 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8769   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8770   ins_cost(2 * VOLATILE_REF_COST);
8771   effect(TEMP_DEF res, KILL cr);
8772   format %{
8773     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8774   %}
8775   ins_encode %{
8776     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8777                Assembler::xword, /*acquire*/ false, /*release*/ true,
8778                /*weak*/ false, $res$$Register);
8779   %}
8780   ins_pipe(pipe_slow);
8781 %}
8782 
8783 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8784   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8785   ins_cost(2 * VOLATILE_REF_COST);
8786   effect(TEMP_DEF res, KILL cr);
8787   format %{
8788     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8789   %}
8790   ins_encode %{
8791     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8792                Assembler::word, /*acquire*/ false, /*release*/ true,
8793                /*weak*/ false, $res$$Register);
8794   %}
8795   ins_pipe(pipe_slow);
8796 %}
8797 
8798 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8799   predicate(n->as_LoadStore()->barrier_data() == 0);
8800   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8801   ins_cost(2 * VOLATILE_REF_COST);
8802   effect(TEMP_DEF res, KILL cr);
8803   format %{
8804     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8805   %}
8806   ins_encode %{
8807     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8808                Assembler::xword, /*acquire*/ false, /*release*/ true,
8809                /*weak*/ false, $res$$Register);
8810   %}
8811   ins_pipe(pipe_slow);
8812 %}
8813 
8814 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8815   predicate(needs_acquiring_load_exclusive(n));
8816   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8817   ins_cost(VOLATILE_REF_COST);
8818   effect(TEMP_DEF res, KILL cr);
8819   format %{
8820     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8821   %}
8822   ins_encode %{
8823     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8824                Assembler::byte, /*acquire*/ true, /*release*/ true,
8825                /*weak*/ false, $res$$Register);
8826     __ sxtbw($res$$Register, $res$$Register);
8827   %}
8828   ins_pipe(pipe_slow);
8829 %}
8830 
8831 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8832   predicate(needs_acquiring_load_exclusive(n));
8833   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8834   ins_cost(VOLATILE_REF_COST);
8835   effect(TEMP_DEF res, KILL cr);
8836   format %{
8837     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8838   %}
8839   ins_encode %{
8840     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8841                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8842                /*weak*/ false, $res$$Register);
8843     __ sxthw($res$$Register, $res$$Register);
8844   %}
8845   ins_pipe(pipe_slow);
8846 %}
8847 
8848 
8849 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8850   predicate(needs_acquiring_load_exclusive(n));
8851   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8852   ins_cost(VOLATILE_REF_COST);
8853   effect(TEMP_DEF res, KILL cr);
8854   format %{
8855     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8856   %}
8857   ins_encode %{
8858     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8859                Assembler::word, /*acquire*/ true, /*release*/ true,
8860                /*weak*/ false, $res$$Register);
8861   %}
8862   ins_pipe(pipe_slow);
8863 %}
8864 
8865 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8866   predicate(needs_acquiring_load_exclusive(n));
8867   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8868   ins_cost(VOLATILE_REF_COST);
8869   effect(TEMP_DEF res, KILL cr);
8870   format %{
8871     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8872   %}
8873   ins_encode %{
8874     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8875                Assembler::xword, /*acquire*/ true, /*release*/ true,
8876                /*weak*/ false, $res$$Register);
8877   %}
8878   ins_pipe(pipe_slow);
8879 %}
8880 
8881 
8882 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8883   predicate(needs_acquiring_load_exclusive(n));
8884   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8885   ins_cost(VOLATILE_REF_COST);
8886   effect(TEMP_DEF res, KILL cr);
8887   format %{
8888     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8889   %}
8890   ins_encode %{
8891     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8892                Assembler::word, /*acquire*/ true, /*release*/ true,
8893                /*weak*/ false, $res$$Register);
8894   %}
8895   ins_pipe(pipe_slow);
8896 %}
8897 
8898 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8899   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
8900   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8901   ins_cost(VOLATILE_REF_COST);
8902   effect(TEMP_DEF res, KILL cr);
8903   format %{
8904     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8905   %}
8906   ins_encode %{
8907     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8908                Assembler::xword, /*acquire*/ true, /*release*/ true,
8909                /*weak*/ false, $res$$Register);
8910   %}
8911   ins_pipe(pipe_slow);
8912 %}
8913 
8914 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8915   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8916   ins_cost(2 * VOLATILE_REF_COST);
8917   effect(KILL cr);
8918   format %{
8919     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8920     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8921   %}
8922   ins_encode %{
8923     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8924                Assembler::byte, /*acquire*/ false, /*release*/ true,
8925                /*weak*/ true, noreg);
8926     __ csetw($res$$Register, Assembler::EQ);
8927   %}
8928   ins_pipe(pipe_slow);
8929 %}
8930 
8931 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8932   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8933   ins_cost(2 * VOLATILE_REF_COST);
8934   effect(KILL cr);
8935   format %{
8936     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8937     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8938   %}
8939   ins_encode %{
8940     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8941                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8942                /*weak*/ true, noreg);
8943     __ csetw($res$$Register, Assembler::EQ);
8944   %}
8945   ins_pipe(pipe_slow);
8946 %}
8947 
8948 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8949   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8950   ins_cost(2 * VOLATILE_REF_COST);
8951   effect(KILL cr);
8952   format %{
8953     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8954     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8955   %}
8956   ins_encode %{
8957     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8958                Assembler::word, /*acquire*/ false, /*release*/ true,
8959                /*weak*/ true, noreg);
8960     __ csetw($res$$Register, Assembler::EQ);
8961   %}
8962   ins_pipe(pipe_slow);
8963 %}
8964 
8965 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8966   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8967   ins_cost(2 * VOLATILE_REF_COST);
8968   effect(KILL cr);
8969   format %{
8970     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8971     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8972   %}
8973   ins_encode %{
8974     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8975                Assembler::xword, /*acquire*/ false, /*release*/ true,
8976                /*weak*/ true, noreg);
8977     __ csetw($res$$Register, Assembler::EQ);
8978   %}
8979   ins_pipe(pipe_slow);
8980 %}
8981 
8982 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8983   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8984   ins_cost(2 * VOLATILE_REF_COST);
8985   effect(KILL cr);
8986   format %{
8987     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8988     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8989   %}
8990   ins_encode %{
8991     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8992                Assembler::word, /*acquire*/ false, /*release*/ true,
8993                /*weak*/ true, noreg);
8994     __ csetw($res$$Register, Assembler::EQ);
8995   %}
8996   ins_pipe(pipe_slow);
8997 %}
8998 
8999 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9000   predicate(n->as_LoadStore()->barrier_data() == 0);
9001   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9002   ins_cost(2 * VOLATILE_REF_COST);
9003   effect(KILL cr);
9004   format %{
9005     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9006     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9007   %}
9008   ins_encode %{
9009     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9010                Assembler::xword, /*acquire*/ false, /*release*/ true,
9011                /*weak*/ true, noreg);
9012     __ csetw($res$$Register, Assembler::EQ);
9013   %}
9014   ins_pipe(pipe_slow);
9015 %}
9016 
9017 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9018   predicate(needs_acquiring_load_exclusive(n));
9019   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9020   ins_cost(VOLATILE_REF_COST);
9021   effect(KILL cr);
9022   format %{
9023     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9024     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9025   %}
9026   ins_encode %{
9027     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9028                Assembler::byte, /*acquire*/ true, /*release*/ true,
9029                /*weak*/ true, noreg);
9030     __ csetw($res$$Register, Assembler::EQ);
9031   %}
9032   ins_pipe(pipe_slow);
9033 %}
9034 
9035 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9036   predicate(needs_acquiring_load_exclusive(n));
9037   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
9038   ins_cost(VOLATILE_REF_COST);
9039   effect(KILL cr);
9040   format %{
9041     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9042     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9043   %}
9044   ins_encode %{
9045     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9046                Assembler::halfword, /*acquire*/ true, /*release*/ true,
9047                /*weak*/ true, noreg);
9048     __ csetw($res$$Register, Assembler::EQ);
9049   %}
9050   ins_pipe(pipe_slow);
9051 %}
9052 
9053 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9054   predicate(needs_acquiring_load_exclusive(n));
9055   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
9056   ins_cost(VOLATILE_REF_COST);
9057   effect(KILL cr);
9058   format %{
9059     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9060     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9061   %}
9062   ins_encode %{
9063     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9064                Assembler::word, /*acquire*/ true, /*release*/ true,
9065                /*weak*/ true, noreg);
9066     __ csetw($res$$Register, Assembler::EQ);
9067   %}
9068   ins_pipe(pipe_slow);
9069 %}
9070 
9071 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9072   predicate(needs_acquiring_load_exclusive(n));
9073   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9074   ins_cost(VOLATILE_REF_COST);
9075   effect(KILL cr);
9076   format %{
9077     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9078     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9079   %}
9080   ins_encode %{
9081     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9082                Assembler::xword, /*acquire*/ true, /*release*/ true,
9083                /*weak*/ true, noreg);
9084     __ csetw($res$$Register, Assembler::EQ);
9085   %}
9086   ins_pipe(pipe_slow);
9087 %}
9088 
9089 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9090   predicate(needs_acquiring_load_exclusive(n));
9091   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9092   ins_cost(VOLATILE_REF_COST);
9093   effect(KILL cr);
9094   format %{
9095     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9096     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9097   %}
9098   ins_encode %{
9099     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9100                Assembler::word, /*acquire*/ true, /*release*/ true,
9101                /*weak*/ true, noreg);
9102     __ csetw($res$$Register, Assembler::EQ);
9103   %}
9104   ins_pipe(pipe_slow);
9105 %}
9106 
9107 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9108   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9109   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9110   ins_cost(VOLATILE_REF_COST);
9111   effect(KILL cr);
9112   format %{
9113     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9114     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9115   %}
9116   ins_encode %{
9117     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9118                Assembler::xword, /*acquire*/ true, /*release*/ true,
9119                /*weak*/ true, noreg);
9120     __ csetw($res$$Register, Assembler::EQ);
9121   %}
9122   ins_pipe(pipe_slow);
9123 %}
9124 
9125 // END This section of the file is automatically generated. Do not edit --------------
9126 // ---------------------------------------------------------------------
9127 
9128 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9129   match(Set prev (GetAndSetI mem newv));
9130   ins_cost(2 * VOLATILE_REF_COST);
9131   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9132   ins_encode %{
9133     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9134   %}
9135   ins_pipe(pipe_serial);
9136 %}
9137 
9138 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
9139   match(Set prev (GetAndSetL mem newv));
9140   ins_cost(2 * VOLATILE_REF_COST);
9141   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9142   ins_encode %{
9143     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9144   %}
9145   ins_pipe(pipe_serial);
9146 %}
9147 
9148 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
9149   match(Set prev (GetAndSetN mem newv));
9150   ins_cost(2 * VOLATILE_REF_COST);
9151   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9152   ins_encode %{
9153     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9154   %}
9155   ins_pipe(pipe_serial);
9156 %}
9157 
9158 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
9159   predicate(n->as_LoadStore()->barrier_data() == 0);
9160   match(Set prev (GetAndSetP mem newv));
9161   ins_cost(2 * VOLATILE_REF_COST);
9162   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9163   ins_encode %{
9164     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9165   %}
9166   ins_pipe(pipe_serial);
9167 %}
9168 
9169 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
9170   predicate(needs_acquiring_load_exclusive(n));
9171   match(Set prev (GetAndSetI mem newv));
9172   ins_cost(VOLATILE_REF_COST);
9173   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
9174   ins_encode %{
9175     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9176   %}
9177   ins_pipe(pipe_serial);
9178 %}
9179 
9180 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
9181   predicate(needs_acquiring_load_exclusive(n));
9182   match(Set prev (GetAndSetL mem newv));
9183   ins_cost(VOLATILE_REF_COST);
9184   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9185   ins_encode %{
9186     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9187   %}
9188   ins_pipe(pipe_serial);
9189 %}
9190 
9191 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
9192   predicate(needs_acquiring_load_exclusive(n));
9193   match(Set prev (GetAndSetN mem newv));
9194   ins_cost(VOLATILE_REF_COST);
9195   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9196   ins_encode %{
9197     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9198   %}
9199   ins_pipe(pipe_serial);
9200 %}
9201 
9202 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
9203   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9204   match(Set prev (GetAndSetP mem newv));
9205   ins_cost(VOLATILE_REF_COST);
9206   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9207   ins_encode %{
9208     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9209   %}
9210   ins_pipe(pipe_serial);
9211 %}
9212 
9213 
9214 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9215   match(Set newval (GetAndAddL mem incr));
9216   ins_cost(2 * VOLATILE_REF_COST + 1);
9217   format %{ "get_and_addL $newval, [$mem], $incr" %}
9218   ins_encode %{
9219     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9220   %}
9221   ins_pipe(pipe_serial);
9222 %}
9223 
9224 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9225   predicate(n->as_LoadStore()->result_not_used());
9226   match(Set dummy (GetAndAddL mem incr));
9227   ins_cost(2 * VOLATILE_REF_COST);
9228   format %{ "get_and_addL [$mem], $incr" %}
9229   ins_encode %{
9230     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9231   %}
9232   ins_pipe(pipe_serial);
9233 %}
9234 
9235 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9236   match(Set newval (GetAndAddL mem incr));
9237   ins_cost(2 * VOLATILE_REF_COST + 1);
9238   format %{ "get_and_addL $newval, [$mem], $incr" %}
9239   ins_encode %{
9240     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9241   %}
9242   ins_pipe(pipe_serial);
9243 %}
9244 
9245 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9246   predicate(n->as_LoadStore()->result_not_used());
9247   match(Set dummy (GetAndAddL mem incr));
9248   ins_cost(2 * VOLATILE_REF_COST);
9249   format %{ "get_and_addL [$mem], $incr" %}
9250   ins_encode %{
9251     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9252   %}
9253   ins_pipe(pipe_serial);
9254 %}
9255 
9256 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9257   match(Set newval (GetAndAddI mem incr));
9258   ins_cost(2 * VOLATILE_REF_COST + 1);
9259   format %{ "get_and_addI $newval, [$mem], $incr" %}
9260   ins_encode %{
9261     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9262   %}
9263   ins_pipe(pipe_serial);
9264 %}
9265 
9266 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9267   predicate(n->as_LoadStore()->result_not_used());
9268   match(Set dummy (GetAndAddI mem incr));
9269   ins_cost(2 * VOLATILE_REF_COST);
9270   format %{ "get_and_addI [$mem], $incr" %}
9271   ins_encode %{
9272     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9273   %}
9274   ins_pipe(pipe_serial);
9275 %}
9276 
9277 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9278   match(Set newval (GetAndAddI mem incr));
9279   ins_cost(2 * VOLATILE_REF_COST + 1);
9280   format %{ "get_and_addI $newval, [$mem], $incr" %}
9281   ins_encode %{
9282     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9283   %}
9284   ins_pipe(pipe_serial);
9285 %}
9286 
9287 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9288   predicate(n->as_LoadStore()->result_not_used());
9289   match(Set dummy (GetAndAddI mem incr));
9290   ins_cost(2 * VOLATILE_REF_COST);
9291   format %{ "get_and_addI [$mem], $incr" %}
9292   ins_encode %{
9293     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9294   %}
9295   ins_pipe(pipe_serial);
9296 %}
9297 
9298 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
9299   predicate(needs_acquiring_load_exclusive(n));
9300   match(Set newval (GetAndAddL mem incr));
9301   ins_cost(VOLATILE_REF_COST + 1);
9302   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9303   ins_encode %{
9304     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
9305   %}
9306   ins_pipe(pipe_serial);
9307 %}
9308 
9309 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
9310   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9311   match(Set dummy (GetAndAddL mem incr));
9312   ins_cost(VOLATILE_REF_COST);
9313   format %{ "get_and_addL_acq [$mem], $incr" %}
9314   ins_encode %{
9315     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
9316   %}
9317   ins_pipe(pipe_serial);
9318 %}
9319 
9320 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9321   predicate(needs_acquiring_load_exclusive(n));
9322   match(Set newval (GetAndAddL mem incr));
9323   ins_cost(VOLATILE_REF_COST + 1);
9324   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9325   ins_encode %{
9326     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9327   %}
9328   ins_pipe(pipe_serial);
9329 %}
9330 
9331 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9332   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9333   match(Set dummy (GetAndAddL mem incr));
9334   ins_cost(VOLATILE_REF_COST);
9335   format %{ "get_and_addL_acq [$mem], $incr" %}
9336   ins_encode %{
9337     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9338   %}
9339   ins_pipe(pipe_serial);
9340 %}
9341 
9342 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9343   predicate(needs_acquiring_load_exclusive(n));
9344   match(Set newval (GetAndAddI mem incr));
9345   ins_cost(VOLATILE_REF_COST + 1);
9346   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9347   ins_encode %{
9348     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9349   %}
9350   ins_pipe(pipe_serial);
9351 %}
9352 
9353 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9354   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9355   match(Set dummy (GetAndAddI mem incr));
9356   ins_cost(VOLATILE_REF_COST);
9357   format %{ "get_and_addI_acq [$mem], $incr" %}
9358   ins_encode %{
9359     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9360   %}
9361   ins_pipe(pipe_serial);
9362 %}
9363 
9364 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9365   predicate(needs_acquiring_load_exclusive(n));
9366   match(Set newval (GetAndAddI mem incr));
9367   ins_cost(VOLATILE_REF_COST + 1);
9368   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9369   ins_encode %{
9370     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9371   %}
9372   ins_pipe(pipe_serial);
9373 %}
9374 
9375 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9376   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9377   match(Set dummy (GetAndAddI mem incr));
9378   ins_cost(VOLATILE_REF_COST);
9379   format %{ "get_and_addI_acq [$mem], $incr" %}
9380   ins_encode %{
9381     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9382   %}
9383   ins_pipe(pipe_serial);
9384 %}
9385 
9386 // Manifest a CmpL result in an integer register.
9387 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9388 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9389 %{
9390   match(Set dst (CmpL3 src1 src2));
9391   effect(KILL flags);
9392 
9393   ins_cost(INSN_COST * 6);
9394   format %{
9395       "cmp $src1, $src2"
9396       "csetw $dst, ne"
9397       "cnegw $dst, lt"
9398   %}
9399   // format %{ "CmpL3 $dst, $src1, $src2" %}
9400   ins_encode %{
9401     __ cmp($src1$$Register, $src2$$Register);
9402     __ csetw($dst$$Register, Assembler::NE);
9403     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9404   %}
9405 
9406   ins_pipe(pipe_class_default);
9407 %}
9408 
9409 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9410 %{
9411   match(Set dst (CmpL3 src1 src2));
9412   effect(KILL flags);
9413 
9414   ins_cost(INSN_COST * 6);
9415   format %{
9416       "cmp $src1, $src2"
9417       "csetw $dst, ne"
9418       "cnegw $dst, lt"
9419   %}
9420   ins_encode %{
9421     int32_t con = (int32_t)$src2$$constant;
9422      if (con < 0) {
9423       __ adds(zr, $src1$$Register, -con);
9424     } else {
9425       __ subs(zr, $src1$$Register, con);
9426     }
9427     __ csetw($dst$$Register, Assembler::NE);
9428     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9429   %}
9430 
9431   ins_pipe(pipe_class_default);
9432 %}
9433 
9434 // ============================================================================
9435 // Conditional Move Instructions
9436 
9437 // n.b. we have identical rules for both a signed compare op (cmpOp)
9438 // and an unsigned compare op (cmpOpU). it would be nice if we could
9439 // define an op class which merged both inputs and use it to type the
9440 // argument to a single rule. unfortunatelyt his fails because the
9441 // opclass does not live up to the COND_INTER interface of its
9442 // component operands. When the generic code tries to negate the
9443 // operand it ends up running the generci Machoper::negate method
9444 // which throws a ShouldNotHappen. So, we have to provide two flavours
9445 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9446 
9447 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9448   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9449 
9450   ins_cost(INSN_COST * 2);
9451   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9452 
9453   ins_encode %{
9454     __ cselw(as_Register($dst$$reg),
9455              as_Register($src2$$reg),
9456              as_Register($src1$$reg),
9457              (Assembler::Condition)$cmp$$cmpcode);
9458   %}
9459 
9460   ins_pipe(icond_reg_reg);
9461 %}
9462 
9463 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9464   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9465 
9466   ins_cost(INSN_COST * 2);
9467   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9468 
9469   ins_encode %{
9470     __ cselw(as_Register($dst$$reg),
9471              as_Register($src2$$reg),
9472              as_Register($src1$$reg),
9473              (Assembler::Condition)$cmp$$cmpcode);
9474   %}
9475 
9476   ins_pipe(icond_reg_reg);
9477 %}
9478 
9479 // special cases where one arg is zero
9480 
9481 // n.b. this is selected in preference to the rule above because it
9482 // avoids loading constant 0 into a source register
9483 
9484 // TODO
9485 // we ought only to be able to cull one of these variants as the ideal
9486 // transforms ought always to order the zero consistently (to left/right?)
9487 
9488 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9489   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9490 
9491   ins_cost(INSN_COST * 2);
9492   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9493 
9494   ins_encode %{
9495     __ cselw(as_Register($dst$$reg),
9496              as_Register($src$$reg),
9497              zr,
9498              (Assembler::Condition)$cmp$$cmpcode);
9499   %}
9500 
9501   ins_pipe(icond_reg);
9502 %}
9503 
9504 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9505   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9506 
9507   ins_cost(INSN_COST * 2);
9508   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9509 
9510   ins_encode %{
9511     __ cselw(as_Register($dst$$reg),
9512              as_Register($src$$reg),
9513              zr,
9514              (Assembler::Condition)$cmp$$cmpcode);
9515   %}
9516 
9517   ins_pipe(icond_reg);
9518 %}
9519 
9520 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9521   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9522 
9523   ins_cost(INSN_COST * 2);
9524   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9525 
9526   ins_encode %{
9527     __ cselw(as_Register($dst$$reg),
9528              zr,
9529              as_Register($src$$reg),
9530              (Assembler::Condition)$cmp$$cmpcode);
9531   %}
9532 
9533   ins_pipe(icond_reg);
9534 %}
9535 
9536 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9537   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9538 
9539   ins_cost(INSN_COST * 2);
9540   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9541 
9542   ins_encode %{
9543     __ cselw(as_Register($dst$$reg),
9544              zr,
9545              as_Register($src$$reg),
9546              (Assembler::Condition)$cmp$$cmpcode);
9547   %}
9548 
9549   ins_pipe(icond_reg);
9550 %}
9551 
9552 // special case for creating a boolean 0 or 1
9553 
9554 // n.b. this is selected in preference to the rule above because it
9555 // avoids loading constants 0 and 1 into a source register
9556 
9557 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9558   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9559 
9560   ins_cost(INSN_COST * 2);
9561   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9562 
9563   ins_encode %{
9564     // equivalently
9565     // cset(as_Register($dst$$reg),
9566     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9567     __ csincw(as_Register($dst$$reg),
9568              zr,
9569              zr,
9570              (Assembler::Condition)$cmp$$cmpcode);
9571   %}
9572 
9573   ins_pipe(icond_none);
9574 %}
9575 
9576 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9577   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9578 
9579   ins_cost(INSN_COST * 2);
9580   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9581 
9582   ins_encode %{
9583     // equivalently
9584     // cset(as_Register($dst$$reg),
9585     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9586     __ csincw(as_Register($dst$$reg),
9587              zr,
9588              zr,
9589              (Assembler::Condition)$cmp$$cmpcode);
9590   %}
9591 
9592   ins_pipe(icond_none);
9593 %}
9594 
9595 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9596   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9597 
9598   ins_cost(INSN_COST * 2);
9599   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9600 
9601   ins_encode %{
9602     __ csel(as_Register($dst$$reg),
9603             as_Register($src2$$reg),
9604             as_Register($src1$$reg),
9605             (Assembler::Condition)$cmp$$cmpcode);
9606   %}
9607 
9608   ins_pipe(icond_reg_reg);
9609 %}
9610 
9611 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9612   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9613 
9614   ins_cost(INSN_COST * 2);
9615   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9616 
9617   ins_encode %{
9618     __ csel(as_Register($dst$$reg),
9619             as_Register($src2$$reg),
9620             as_Register($src1$$reg),
9621             (Assembler::Condition)$cmp$$cmpcode);
9622   %}
9623 
9624   ins_pipe(icond_reg_reg);
9625 %}
9626 
9627 // special cases where one arg is zero
9628 
9629 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9630   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9631 
9632   ins_cost(INSN_COST * 2);
9633   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9634 
9635   ins_encode %{
9636     __ csel(as_Register($dst$$reg),
9637             zr,
9638             as_Register($src$$reg),
9639             (Assembler::Condition)$cmp$$cmpcode);
9640   %}
9641 
9642   ins_pipe(icond_reg);
9643 %}
9644 
9645 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9646   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9647 
9648   ins_cost(INSN_COST * 2);
9649   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9650 
9651   ins_encode %{
9652     __ csel(as_Register($dst$$reg),
9653             zr,
9654             as_Register($src$$reg),
9655             (Assembler::Condition)$cmp$$cmpcode);
9656   %}
9657 
9658   ins_pipe(icond_reg);
9659 %}
9660 
9661 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9662   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9663 
9664   ins_cost(INSN_COST * 2);
9665   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9666 
9667   ins_encode %{
9668     __ csel(as_Register($dst$$reg),
9669             as_Register($src$$reg),
9670             zr,
9671             (Assembler::Condition)$cmp$$cmpcode);
9672   %}
9673 
9674   ins_pipe(icond_reg);
9675 %}
9676 
9677 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9678   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9679 
9680   ins_cost(INSN_COST * 2);
9681   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9682 
9683   ins_encode %{
9684     __ csel(as_Register($dst$$reg),
9685             as_Register($src$$reg),
9686             zr,
9687             (Assembler::Condition)$cmp$$cmpcode);
9688   %}
9689 
9690   ins_pipe(icond_reg);
9691 %}
9692 
9693 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9694   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9695 
9696   ins_cost(INSN_COST * 2);
9697   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9698 
9699   ins_encode %{
9700     __ csel(as_Register($dst$$reg),
9701             as_Register($src2$$reg),
9702             as_Register($src1$$reg),
9703             (Assembler::Condition)$cmp$$cmpcode);
9704   %}
9705 
9706   ins_pipe(icond_reg_reg);
9707 %}
9708 
9709 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9710   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9711 
9712   ins_cost(INSN_COST * 2);
9713   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9714 
9715   ins_encode %{
9716     __ csel(as_Register($dst$$reg),
9717             as_Register($src2$$reg),
9718             as_Register($src1$$reg),
9719             (Assembler::Condition)$cmp$$cmpcode);
9720   %}
9721 
9722   ins_pipe(icond_reg_reg);
9723 %}
9724 
9725 // special cases where one arg is zero
9726 
9727 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9728   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9729 
9730   ins_cost(INSN_COST * 2);
9731   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9732 
9733   ins_encode %{
9734     __ csel(as_Register($dst$$reg),
9735             zr,
9736             as_Register($src$$reg),
9737             (Assembler::Condition)$cmp$$cmpcode);
9738   %}
9739 
9740   ins_pipe(icond_reg);
9741 %}
9742 
9743 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9744   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9745 
9746   ins_cost(INSN_COST * 2);
9747   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9748 
9749   ins_encode %{
9750     __ csel(as_Register($dst$$reg),
9751             zr,
9752             as_Register($src$$reg),
9753             (Assembler::Condition)$cmp$$cmpcode);
9754   %}
9755 
9756   ins_pipe(icond_reg);
9757 %}
9758 
9759 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9760   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9761 
9762   ins_cost(INSN_COST * 2);
9763   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9764 
9765   ins_encode %{
9766     __ csel(as_Register($dst$$reg),
9767             as_Register($src$$reg),
9768             zr,
9769             (Assembler::Condition)$cmp$$cmpcode);
9770   %}
9771 
9772   ins_pipe(icond_reg);
9773 %}
9774 
9775 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9776   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9777 
9778   ins_cost(INSN_COST * 2);
9779   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9780 
9781   ins_encode %{
9782     __ csel(as_Register($dst$$reg),
9783             as_Register($src$$reg),
9784             zr,
9785             (Assembler::Condition)$cmp$$cmpcode);
9786   %}
9787 
9788   ins_pipe(icond_reg);
9789 %}
9790 
9791 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9792   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9793 
9794   ins_cost(INSN_COST * 2);
9795   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9796 
9797   ins_encode %{
9798     __ cselw(as_Register($dst$$reg),
9799              as_Register($src2$$reg),
9800              as_Register($src1$$reg),
9801              (Assembler::Condition)$cmp$$cmpcode);
9802   %}
9803 
9804   ins_pipe(icond_reg_reg);
9805 %}
9806 
9807 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9808   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9809 
9810   ins_cost(INSN_COST * 2);
9811   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9812 
9813   ins_encode %{
9814     __ cselw(as_Register($dst$$reg),
9815              as_Register($src2$$reg),
9816              as_Register($src1$$reg),
9817              (Assembler::Condition)$cmp$$cmpcode);
9818   %}
9819 
9820   ins_pipe(icond_reg_reg);
9821 %}
9822 
9823 // special cases where one arg is zero
9824 
9825 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9826   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9827 
9828   ins_cost(INSN_COST * 2);
9829   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9830 
9831   ins_encode %{
9832     __ cselw(as_Register($dst$$reg),
9833              zr,
9834              as_Register($src$$reg),
9835              (Assembler::Condition)$cmp$$cmpcode);
9836   %}
9837 
9838   ins_pipe(icond_reg);
9839 %}
9840 
9841 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9842   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9843 
9844   ins_cost(INSN_COST * 2);
9845   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9846 
9847   ins_encode %{
9848     __ cselw(as_Register($dst$$reg),
9849              zr,
9850              as_Register($src$$reg),
9851              (Assembler::Condition)$cmp$$cmpcode);
9852   %}
9853 
9854   ins_pipe(icond_reg);
9855 %}
9856 
9857 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9858   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9859 
9860   ins_cost(INSN_COST * 2);
9861   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9862 
9863   ins_encode %{
9864     __ cselw(as_Register($dst$$reg),
9865              as_Register($src$$reg),
9866              zr,
9867              (Assembler::Condition)$cmp$$cmpcode);
9868   %}
9869 
9870   ins_pipe(icond_reg);
9871 %}
9872 
9873 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9874   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9875 
9876   ins_cost(INSN_COST * 2);
9877   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9878 
9879   ins_encode %{
9880     __ cselw(as_Register($dst$$reg),
9881              as_Register($src$$reg),
9882              zr,
9883              (Assembler::Condition)$cmp$$cmpcode);
9884   %}
9885 
9886   ins_pipe(icond_reg);
9887 %}
9888 
9889 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9890 %{
9891   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9892 
9893   ins_cost(INSN_COST * 3);
9894 
9895   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9896   ins_encode %{
9897     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9898     __ fcsels(as_FloatRegister($dst$$reg),
9899               as_FloatRegister($src2$$reg),
9900               as_FloatRegister($src1$$reg),
9901               cond);
9902   %}
9903 
9904   ins_pipe(fp_cond_reg_reg_s);
9905 %}
9906 
9907 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9908 %{
9909   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9910 
9911   ins_cost(INSN_COST * 3);
9912 
9913   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9914   ins_encode %{
9915     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9916     __ fcsels(as_FloatRegister($dst$$reg),
9917               as_FloatRegister($src2$$reg),
9918               as_FloatRegister($src1$$reg),
9919               cond);
9920   %}
9921 
9922   ins_pipe(fp_cond_reg_reg_s);
9923 %}
9924 
9925 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9926 %{
9927   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9928 
9929   ins_cost(INSN_COST * 3);
9930 
9931   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9932   ins_encode %{
9933     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9934     __ fcseld(as_FloatRegister($dst$$reg),
9935               as_FloatRegister($src2$$reg),
9936               as_FloatRegister($src1$$reg),
9937               cond);
9938   %}
9939 
9940   ins_pipe(fp_cond_reg_reg_d);
9941 %}
9942 
9943 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9944 %{
9945   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9946 
9947   ins_cost(INSN_COST * 3);
9948 
9949   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9950   ins_encode %{
9951     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9952     __ fcseld(as_FloatRegister($dst$$reg),
9953               as_FloatRegister($src2$$reg),
9954               as_FloatRegister($src1$$reg),
9955               cond);
9956   %}
9957 
9958   ins_pipe(fp_cond_reg_reg_d);
9959 %}
9960 
9961 // ============================================================================
9962 // Arithmetic Instructions
9963 //
9964 
9965 // Integer Addition
9966 
9967 // TODO
9968 // these currently employ operations which do not set CR and hence are
9969 // not flagged as killing CR but we would like to isolate the cases
9970 // where we want to set flags from those where we don't. need to work
9971 // out how to do that.
9972 
9973 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9974   match(Set dst (AddI src1 src2));
9975 
9976   ins_cost(INSN_COST);
9977   format %{ "addw  $dst, $src1, $src2" %}
9978 
9979   ins_encode %{
9980     __ addw(as_Register($dst$$reg),
9981             as_Register($src1$$reg),
9982             as_Register($src2$$reg));
9983   %}
9984 
9985   ins_pipe(ialu_reg_reg);
9986 %}
9987 
9988 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9989   match(Set dst (AddI src1 src2));
9990 
9991   ins_cost(INSN_COST);
9992   format %{ "addw $dst, $src1, $src2" %}
9993 
9994   // use opcode to indicate that this is an add not a sub
9995   opcode(0x0);
9996 
9997   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9998 
9999   ins_pipe(ialu_reg_imm);
10000 %}
10001 
10002 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10003   match(Set dst (AddI (ConvL2I src1) src2));
10004 
10005   ins_cost(INSN_COST);
10006   format %{ "addw $dst, $src1, $src2" %}
10007 
10008   // use opcode to indicate that this is an add not a sub
10009   opcode(0x0);
10010 
10011   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10012 
10013   ins_pipe(ialu_reg_imm);
10014 %}
10015 
10016 // Pointer Addition
10017 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10018   match(Set dst (AddP src1 src2));
10019 
10020   ins_cost(INSN_COST);
10021   format %{ "add $dst, $src1, $src2\t# ptr" %}
10022 
10023   ins_encode %{
10024     __ add(as_Register($dst$$reg),
10025            as_Register($src1$$reg),
10026            as_Register($src2$$reg));
10027   %}
10028 
10029   ins_pipe(ialu_reg_reg);
10030 %}
10031 
10032 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10033   match(Set dst (AddP src1 (ConvI2L src2)));
10034 
10035   ins_cost(1.9 * INSN_COST);
10036   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10037 
10038   ins_encode %{
10039     __ add(as_Register($dst$$reg),
10040            as_Register($src1$$reg),
10041            as_Register($src2$$reg), ext::sxtw);
10042   %}
10043 
10044   ins_pipe(ialu_reg_reg);
10045 %}
10046 
10047 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10048   match(Set dst (AddP src1 (LShiftL src2 scale)));
10049 
10050   ins_cost(1.9 * INSN_COST);
10051   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10052 
10053   ins_encode %{
10054     __ lea(as_Register($dst$$reg),
10055            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10056                    Address::lsl($scale$$constant)));
10057   %}
10058 
10059   ins_pipe(ialu_reg_reg_shift);
10060 %}
10061 
10062 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10063   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10064 
10065   ins_cost(1.9 * INSN_COST);
10066   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10067 
10068   ins_encode %{
10069     __ lea(as_Register($dst$$reg),
10070            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10071                    Address::sxtw($scale$$constant)));
10072   %}
10073 
10074   ins_pipe(ialu_reg_reg_shift);
10075 %}
10076 
10077 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10078   match(Set dst (LShiftL (ConvI2L src) scale));
10079 
10080   ins_cost(INSN_COST);
10081   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10082 
10083   ins_encode %{
10084     __ sbfiz(as_Register($dst$$reg),
10085           as_Register($src$$reg),
10086           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10087   %}
10088 
10089   ins_pipe(ialu_reg_shift);
10090 %}
10091 
10092 // Pointer Immediate Addition
10093 // n.b. this needs to be more expensive than using an indirect memory
10094 // operand
10095 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10096   match(Set dst (AddP src1 src2));
10097 
10098   ins_cost(INSN_COST);
10099   format %{ "add $dst, $src1, $src2\t# ptr" %}
10100 
10101   // use opcode to indicate that this is an add not a sub
10102   opcode(0x0);
10103 
10104   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10105 
10106   ins_pipe(ialu_reg_imm);
10107 %}
10108 
10109 // Long Addition
10110 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10111 
10112   match(Set dst (AddL src1 src2));
10113 
10114   ins_cost(INSN_COST);
10115   format %{ "add  $dst, $src1, $src2" %}
10116 
10117   ins_encode %{
10118     __ add(as_Register($dst$$reg),
10119            as_Register($src1$$reg),
10120            as_Register($src2$$reg));
10121   %}
10122 
10123   ins_pipe(ialu_reg_reg);
10124 %}
10125 
10126 // No constant pool entries requiredLong Immediate Addition.
10127 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10128   match(Set dst (AddL src1 src2));
10129 
10130   ins_cost(INSN_COST);
10131   format %{ "add $dst, $src1, $src2" %}
10132 
10133   // use opcode to indicate that this is an add not a sub
10134   opcode(0x0);
10135 
10136   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10137 
10138   ins_pipe(ialu_reg_imm);
10139 %}
10140 
10141 // Integer Subtraction
10142 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10143   match(Set dst (SubI src1 src2));
10144 
10145   ins_cost(INSN_COST);
10146   format %{ "subw  $dst, $src1, $src2" %}
10147 
10148   ins_encode %{
10149     __ subw(as_Register($dst$$reg),
10150             as_Register($src1$$reg),
10151             as_Register($src2$$reg));
10152   %}
10153 
10154   ins_pipe(ialu_reg_reg);
10155 %}
10156 
10157 // Immediate Subtraction
10158 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10159   match(Set dst (SubI src1 src2));
10160 
10161   ins_cost(INSN_COST);
10162   format %{ "subw $dst, $src1, $src2" %}
10163 
10164   // use opcode to indicate that this is a sub not an add
10165   opcode(0x1);
10166 
10167   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10168 
10169   ins_pipe(ialu_reg_imm);
10170 %}
10171 
10172 // Long Subtraction
10173 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10174 
10175   match(Set dst (SubL src1 src2));
10176 
10177   ins_cost(INSN_COST);
10178   format %{ "sub  $dst, $src1, $src2" %}
10179 
10180   ins_encode %{
10181     __ sub(as_Register($dst$$reg),
10182            as_Register($src1$$reg),
10183            as_Register($src2$$reg));
10184   %}
10185 
10186   ins_pipe(ialu_reg_reg);
10187 %}
10188 
10189 // No constant pool entries requiredLong Immediate Subtraction.
10190 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10191   match(Set dst (SubL src1 src2));
10192 
10193   ins_cost(INSN_COST);
10194   format %{ "sub$dst, $src1, $src2" %}
10195 
10196   // use opcode to indicate that this is a sub not an add
10197   opcode(0x1);
10198 
10199   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10200 
10201   ins_pipe(ialu_reg_imm);
10202 %}
10203 
10204 // Integer Negation (special case for sub)
10205 
10206 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10207   match(Set dst (SubI zero src));
10208 
10209   ins_cost(INSN_COST);
10210   format %{ "negw $dst, $src\t# int" %}
10211 
10212   ins_encode %{
10213     __ negw(as_Register($dst$$reg),
10214             as_Register($src$$reg));
10215   %}
10216 
10217   ins_pipe(ialu_reg);
10218 %}
10219 
10220 // Long Negation
10221 
10222 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10223   match(Set dst (SubL zero src));
10224 
10225   ins_cost(INSN_COST);
10226   format %{ "neg $dst, $src\t# long" %}
10227 
10228   ins_encode %{
10229     __ neg(as_Register($dst$$reg),
10230            as_Register($src$$reg));
10231   %}
10232 
10233   ins_pipe(ialu_reg);
10234 %}
10235 
10236 // Integer Multiply
10237 
10238 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10239   match(Set dst (MulI src1 src2));
10240 
10241   ins_cost(INSN_COST * 3);
10242   format %{ "mulw  $dst, $src1, $src2" %}
10243 
10244   ins_encode %{
10245     __ mulw(as_Register($dst$$reg),
10246             as_Register($src1$$reg),
10247             as_Register($src2$$reg));
10248   %}
10249 
10250   ins_pipe(imul_reg_reg);
10251 %}
10252 
10253 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10254   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10255 
10256   ins_cost(INSN_COST * 3);
10257   format %{ "smull  $dst, $src1, $src2" %}
10258 
10259   ins_encode %{
10260     __ smull(as_Register($dst$$reg),
10261              as_Register($src1$$reg),
10262              as_Register($src2$$reg));
10263   %}
10264 
10265   ins_pipe(imul_reg_reg);
10266 %}
10267 
10268 // Long Multiply
10269 
10270 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10271   match(Set dst (MulL src1 src2));
10272 
10273   ins_cost(INSN_COST * 5);
10274   format %{ "mul  $dst, $src1, $src2" %}
10275 
10276   ins_encode %{
10277     __ mul(as_Register($dst$$reg),
10278            as_Register($src1$$reg),
10279            as_Register($src2$$reg));
10280   %}
10281 
10282   ins_pipe(lmul_reg_reg);
10283 %}
10284 
10285 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10286 %{
10287   match(Set dst (MulHiL src1 src2));
10288 
10289   ins_cost(INSN_COST * 7);
10290   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10291 
10292   ins_encode %{
10293     __ smulh(as_Register($dst$$reg),
10294              as_Register($src1$$reg),
10295              as_Register($src2$$reg));
10296   %}
10297 
10298   ins_pipe(lmul_reg_reg);
10299 %}
10300 
10301 // Combined Integer Multiply & Add/Sub
10302 
10303 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10304   match(Set dst (AddI src3 (MulI src1 src2)));
10305 
10306   ins_cost(INSN_COST * 3);
10307   format %{ "madd  $dst, $src1, $src2, $src3" %}
10308 
10309   ins_encode %{
10310     __ maddw(as_Register($dst$$reg),
10311              as_Register($src1$$reg),
10312              as_Register($src2$$reg),
10313              as_Register($src3$$reg));
10314   %}
10315 
10316   ins_pipe(imac_reg_reg);
10317 %}
10318 
10319 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10320   match(Set dst (SubI src3 (MulI src1 src2)));
10321 
10322   ins_cost(INSN_COST * 3);
10323   format %{ "msub  $dst, $src1, $src2, $src3" %}
10324 
10325   ins_encode %{
10326     __ msubw(as_Register($dst$$reg),
10327              as_Register($src1$$reg),
10328              as_Register($src2$$reg),
10329              as_Register($src3$$reg));
10330   %}
10331 
10332   ins_pipe(imac_reg_reg);
10333 %}
10334 
10335 // Combined Integer Multiply & Neg
10336 
10337 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10338   match(Set dst (MulI (SubI zero src1) src2));
10339   match(Set dst (MulI src1 (SubI zero src2)));
10340 
10341   ins_cost(INSN_COST * 3);
10342   format %{ "mneg  $dst, $src1, $src2" %}
10343 
10344   ins_encode %{
10345     __ mnegw(as_Register($dst$$reg),
10346              as_Register($src1$$reg),
10347              as_Register($src2$$reg));
10348   %}
10349 
10350   ins_pipe(imac_reg_reg);
10351 %}
10352 
10353 // Combined Long Multiply & Add/Sub
10354 
10355 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10356   match(Set dst (AddL src3 (MulL src1 src2)));
10357 
10358   ins_cost(INSN_COST * 5);
10359   format %{ "madd  $dst, $src1, $src2, $src3" %}
10360 
10361   ins_encode %{
10362     __ madd(as_Register($dst$$reg),
10363             as_Register($src1$$reg),
10364             as_Register($src2$$reg),
10365             as_Register($src3$$reg));
10366   %}
10367 
10368   ins_pipe(lmac_reg_reg);
10369 %}
10370 
10371 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10372   match(Set dst (SubL src3 (MulL src1 src2)));
10373 
10374   ins_cost(INSN_COST * 5);
10375   format %{ "msub  $dst, $src1, $src2, $src3" %}
10376 
10377   ins_encode %{
10378     __ msub(as_Register($dst$$reg),
10379             as_Register($src1$$reg),
10380             as_Register($src2$$reg),
10381             as_Register($src3$$reg));
10382   %}
10383 
10384   ins_pipe(lmac_reg_reg);
10385 %}
10386 
10387 // Combined Long Multiply & Neg
10388 
10389 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10390   match(Set dst (MulL (SubL zero src1) src2));
10391   match(Set dst (MulL src1 (SubL zero src2)));
10392 
10393   ins_cost(INSN_COST * 5);
10394   format %{ "mneg  $dst, $src1, $src2" %}
10395 
10396   ins_encode %{
10397     __ mneg(as_Register($dst$$reg),
10398             as_Register($src1$$reg),
10399             as_Register($src2$$reg));
10400   %}
10401 
10402   ins_pipe(lmac_reg_reg);
10403 %}
10404 
10405 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10406 
10407 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10408   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10409 
10410   ins_cost(INSN_COST * 3);
10411   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10412 
10413   ins_encode %{
10414     __ smaddl(as_Register($dst$$reg),
10415               as_Register($src1$$reg),
10416               as_Register($src2$$reg),
10417               as_Register($src3$$reg));
10418   %}
10419 
10420   ins_pipe(imac_reg_reg);
10421 %}
10422 
10423 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10424   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10425 
10426   ins_cost(INSN_COST * 3);
10427   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10428 
10429   ins_encode %{
10430     __ smsubl(as_Register($dst$$reg),
10431               as_Register($src1$$reg),
10432               as_Register($src2$$reg),
10433               as_Register($src3$$reg));
10434   %}
10435 
10436   ins_pipe(imac_reg_reg);
10437 %}
10438 
10439 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10440   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10441   match(Set dst (MulL (ConvI2L src1) (SubL zero (ConvI2L src2))));
10442 
10443   ins_cost(INSN_COST * 3);
10444   format %{ "smnegl  $dst, $src1, $src2" %}
10445 
10446   ins_encode %{
10447     __ smnegl(as_Register($dst$$reg),
10448               as_Register($src1$$reg),
10449               as_Register($src2$$reg));
10450   %}
10451 
10452   ins_pipe(imac_reg_reg);
10453 %}
10454 
10455 // Integer Divide
10456 
10457 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10458   match(Set dst (DivI src1 src2));
10459 
10460   ins_cost(INSN_COST * 19);
10461   format %{ "sdivw  $dst, $src1, $src2" %}
10462 
10463   ins_encode(aarch64_enc_divw(dst, src1, src2));
10464   ins_pipe(idiv_reg_reg);
10465 %}
10466 
10467 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10468   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10469   ins_cost(INSN_COST);
10470   format %{ "lsrw $dst, $src1, $div1" %}
10471   ins_encode %{
10472     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10473   %}
10474   ins_pipe(ialu_reg_shift);
10475 %}
10476 
10477 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10478   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10479   ins_cost(INSN_COST);
10480   format %{ "addw $dst, $src, LSR $div1" %}
10481 
10482   ins_encode %{
10483     __ addw(as_Register($dst$$reg),
10484               as_Register($src$$reg),
10485               as_Register($src$$reg),
10486               Assembler::LSR, 31);
10487   %}
10488   ins_pipe(ialu_reg);
10489 %}
10490 
10491 // Long Divide
10492 
10493 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10494   match(Set dst (DivL src1 src2));
10495 
10496   ins_cost(INSN_COST * 35);
10497   format %{ "sdiv   $dst, $src1, $src2" %}
10498 
10499   ins_encode(aarch64_enc_div(dst, src1, src2));
10500   ins_pipe(ldiv_reg_reg);
10501 %}
10502 
10503 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10504   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10505   ins_cost(INSN_COST);
10506   format %{ "lsr $dst, $src1, $div1" %}
10507   ins_encode %{
10508     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10509   %}
10510   ins_pipe(ialu_reg_shift);
10511 %}
10512 
10513 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10514   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10515   ins_cost(INSN_COST);
10516   format %{ "add $dst, $src, $div1" %}
10517 
10518   ins_encode %{
10519     __ add(as_Register($dst$$reg),
10520               as_Register($src$$reg),
10521               as_Register($src$$reg),
10522               Assembler::LSR, 63);
10523   %}
10524   ins_pipe(ialu_reg);
10525 %}
10526 
10527 // Integer Remainder
10528 
10529 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10530   match(Set dst (ModI src1 src2));
10531 
10532   ins_cost(INSN_COST * 22);
10533   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10534             "msubw($dst, rscratch1, $src2, $src1" %}
10535 
10536   ins_encode(aarch64_enc_modw(dst, src1, src2));
10537   ins_pipe(idiv_reg_reg);
10538 %}
10539 
10540 // Long Remainder
10541 
10542 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10543   match(Set dst (ModL src1 src2));
10544 
10545   ins_cost(INSN_COST * 38);
10546   format %{ "sdiv   rscratch1, $src1, $src2\n"
10547             "msub($dst, rscratch1, $src2, $src1" %}
10548 
10549   ins_encode(aarch64_enc_mod(dst, src1, src2));
10550   ins_pipe(ldiv_reg_reg);
10551 %}
10552 
10553 // Integer Shifts
10554 
10555 // Shift Left Register
10556 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10557   match(Set dst (LShiftI src1 src2));
10558 
10559   ins_cost(INSN_COST * 2);
10560   format %{ "lslvw  $dst, $src1, $src2" %}
10561 
10562   ins_encode %{
10563     __ lslvw(as_Register($dst$$reg),
10564              as_Register($src1$$reg),
10565              as_Register($src2$$reg));
10566   %}
10567 
10568   ins_pipe(ialu_reg_reg_vshift);
10569 %}
10570 
10571 // Shift Left Immediate
10572 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10573   match(Set dst (LShiftI src1 src2));
10574 
10575   ins_cost(INSN_COST);
10576   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10577 
10578   ins_encode %{
10579     __ lslw(as_Register($dst$$reg),
10580             as_Register($src1$$reg),
10581             $src2$$constant & 0x1f);
10582   %}
10583 
10584   ins_pipe(ialu_reg_shift);
10585 %}
10586 
10587 // Shift Right Logical Register
10588 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10589   match(Set dst (URShiftI src1 src2));
10590 
10591   ins_cost(INSN_COST * 2);
10592   format %{ "lsrvw  $dst, $src1, $src2" %}
10593 
10594   ins_encode %{
10595     __ lsrvw(as_Register($dst$$reg),
10596              as_Register($src1$$reg),
10597              as_Register($src2$$reg));
10598   %}
10599 
10600   ins_pipe(ialu_reg_reg_vshift);
10601 %}
10602 
10603 // Shift Right Logical Immediate
10604 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10605   match(Set dst (URShiftI src1 src2));
10606 
10607   ins_cost(INSN_COST);
10608   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10609 
10610   ins_encode %{
10611     __ lsrw(as_Register($dst$$reg),
10612             as_Register($src1$$reg),
10613             $src2$$constant & 0x1f);
10614   %}
10615 
10616   ins_pipe(ialu_reg_shift);
10617 %}
10618 
10619 // Shift Right Arithmetic Register
10620 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10621   match(Set dst (RShiftI src1 src2));
10622 
10623   ins_cost(INSN_COST * 2);
10624   format %{ "asrvw  $dst, $src1, $src2" %}
10625 
10626   ins_encode %{
10627     __ asrvw(as_Register($dst$$reg),
10628              as_Register($src1$$reg),
10629              as_Register($src2$$reg));
10630   %}
10631 
10632   ins_pipe(ialu_reg_reg_vshift);
10633 %}
10634 
10635 // Shift Right Arithmetic Immediate
10636 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10637   match(Set dst (RShiftI src1 src2));
10638 
10639   ins_cost(INSN_COST);
10640   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10641 
10642   ins_encode %{
10643     __ asrw(as_Register($dst$$reg),
10644             as_Register($src1$$reg),
10645             $src2$$constant & 0x1f);
10646   %}
10647 
10648   ins_pipe(ialu_reg_shift);
10649 %}
10650 
10651 // Combined Int Mask and Right Shift (using UBFM)
10652 // TODO
10653 
10654 // Long Shifts
10655 
10656 // Shift Left Register
10657 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10658   match(Set dst (LShiftL src1 src2));
10659 
10660   ins_cost(INSN_COST * 2);
10661   format %{ "lslv  $dst, $src1, $src2" %}
10662 
10663   ins_encode %{
10664     __ lslv(as_Register($dst$$reg),
10665             as_Register($src1$$reg),
10666             as_Register($src2$$reg));
10667   %}
10668 
10669   ins_pipe(ialu_reg_reg_vshift);
10670 %}
10671 
10672 // Shift Left Immediate
10673 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10674   match(Set dst (LShiftL src1 src2));
10675 
10676   ins_cost(INSN_COST);
10677   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10678 
10679   ins_encode %{
10680     __ lsl(as_Register($dst$$reg),
10681             as_Register($src1$$reg),
10682             $src2$$constant & 0x3f);
10683   %}
10684 
10685   ins_pipe(ialu_reg_shift);
10686 %}
10687 
10688 // Shift Right Logical Register
10689 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10690   match(Set dst (URShiftL src1 src2));
10691 
10692   ins_cost(INSN_COST * 2);
10693   format %{ "lsrv  $dst, $src1, $src2" %}
10694 
10695   ins_encode %{
10696     __ lsrv(as_Register($dst$$reg),
10697             as_Register($src1$$reg),
10698             as_Register($src2$$reg));
10699   %}
10700 
10701   ins_pipe(ialu_reg_reg_vshift);
10702 %}
10703 
10704 // Shift Right Logical Immediate
10705 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10706   match(Set dst (URShiftL src1 src2));
10707 
10708   ins_cost(INSN_COST);
10709   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10710 
10711   ins_encode %{
10712     __ lsr(as_Register($dst$$reg),
10713            as_Register($src1$$reg),
10714            $src2$$constant & 0x3f);
10715   %}
10716 
10717   ins_pipe(ialu_reg_shift);
10718 %}
10719 
10720 // A special-case pattern for card table stores.
10721 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10722   match(Set dst (URShiftL (CastP2X src1) src2));
10723 
10724   ins_cost(INSN_COST);
10725   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10726 
10727   ins_encode %{
10728     __ lsr(as_Register($dst$$reg),
10729            as_Register($src1$$reg),
10730            $src2$$constant & 0x3f);
10731   %}
10732 
10733   ins_pipe(ialu_reg_shift);
10734 %}
10735 
10736 // Shift Right Arithmetic Register
10737 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10738   match(Set dst (RShiftL src1 src2));
10739 
10740   ins_cost(INSN_COST * 2);
10741   format %{ "asrv  $dst, $src1, $src2" %}
10742 
10743   ins_encode %{
10744     __ asrv(as_Register($dst$$reg),
10745             as_Register($src1$$reg),
10746             as_Register($src2$$reg));
10747   %}
10748 
10749   ins_pipe(ialu_reg_reg_vshift);
10750 %}
10751 
10752 // Shift Right Arithmetic Immediate
10753 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10754   match(Set dst (RShiftL src1 src2));
10755 
10756   ins_cost(INSN_COST);
10757   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10758 
10759   ins_encode %{
10760     __ asr(as_Register($dst$$reg),
10761            as_Register($src1$$reg),
10762            $src2$$constant & 0x3f);
10763   %}
10764 
10765   ins_pipe(ialu_reg_shift);
10766 %}
10767 
10768 // BEGIN This section of the file is automatically generated. Do not edit --------------
10769 
10770 instruct regL_not_reg(iRegLNoSp dst,
10771                          iRegL src1, immL_M1 m1,
10772                          rFlagsReg cr) %{
10773   match(Set dst (XorL src1 m1));
10774   ins_cost(INSN_COST);
10775   format %{ "eon  $dst, $src1, zr" %}
10776 
10777   ins_encode %{
10778     __ eon(as_Register($dst$$reg),
10779               as_Register($src1$$reg),
10780               zr,
10781               Assembler::LSL, 0);
10782   %}
10783 
10784   ins_pipe(ialu_reg);
10785 %}
10786 instruct regI_not_reg(iRegINoSp dst,
10787                          iRegIorL2I src1, immI_M1 m1,
10788                          rFlagsReg cr) %{
10789   match(Set dst (XorI src1 m1));
10790   ins_cost(INSN_COST);
10791   format %{ "eonw  $dst, $src1, zr" %}
10792 
10793   ins_encode %{
10794     __ eonw(as_Register($dst$$reg),
10795               as_Register($src1$$reg),
10796               zr,
10797               Assembler::LSL, 0);
10798   %}
10799 
10800   ins_pipe(ialu_reg);
10801 %}
10802 
10803 instruct AndI_reg_not_reg(iRegINoSp dst,
10804                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10805                          rFlagsReg cr) %{
10806   match(Set dst (AndI src1 (XorI src2 m1)));
10807   ins_cost(INSN_COST);
10808   format %{ "bicw  $dst, $src1, $src2" %}
10809 
10810   ins_encode %{
10811     __ bicw(as_Register($dst$$reg),
10812               as_Register($src1$$reg),
10813               as_Register($src2$$reg),
10814               Assembler::LSL, 0);
10815   %}
10816 
10817   ins_pipe(ialu_reg_reg);
10818 %}
10819 
10820 instruct AndL_reg_not_reg(iRegLNoSp dst,
10821                          iRegL src1, iRegL src2, immL_M1 m1,
10822                          rFlagsReg cr) %{
10823   match(Set dst (AndL src1 (XorL src2 m1)));
10824   ins_cost(INSN_COST);
10825   format %{ "bic  $dst, $src1, $src2" %}
10826 
10827   ins_encode %{
10828     __ bic(as_Register($dst$$reg),
10829               as_Register($src1$$reg),
10830               as_Register($src2$$reg),
10831               Assembler::LSL, 0);
10832   %}
10833 
10834   ins_pipe(ialu_reg_reg);
10835 %}
10836 
10837 instruct OrI_reg_not_reg(iRegINoSp dst,
10838                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10839                          rFlagsReg cr) %{
10840   match(Set dst (OrI src1 (XorI src2 m1)));
10841   ins_cost(INSN_COST);
10842   format %{ "ornw  $dst, $src1, $src2" %}
10843 
10844   ins_encode %{
10845     __ ornw(as_Register($dst$$reg),
10846               as_Register($src1$$reg),
10847               as_Register($src2$$reg),
10848               Assembler::LSL, 0);
10849   %}
10850 
10851   ins_pipe(ialu_reg_reg);
10852 %}
10853 
10854 instruct OrL_reg_not_reg(iRegLNoSp dst,
10855                          iRegL src1, iRegL src2, immL_M1 m1,
10856                          rFlagsReg cr) %{
10857   match(Set dst (OrL src1 (XorL src2 m1)));
10858   ins_cost(INSN_COST);
10859   format %{ "orn  $dst, $src1, $src2" %}
10860 
10861   ins_encode %{
10862     __ orn(as_Register($dst$$reg),
10863               as_Register($src1$$reg),
10864               as_Register($src2$$reg),
10865               Assembler::LSL, 0);
10866   %}
10867 
10868   ins_pipe(ialu_reg_reg);
10869 %}
10870 
10871 instruct XorI_reg_not_reg(iRegINoSp dst,
10872                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10873                          rFlagsReg cr) %{
10874   match(Set dst (XorI m1 (XorI src2 src1)));
10875   ins_cost(INSN_COST);
10876   format %{ "eonw  $dst, $src1, $src2" %}
10877 
10878   ins_encode %{
10879     __ eonw(as_Register($dst$$reg),
10880               as_Register($src1$$reg),
10881               as_Register($src2$$reg),
10882               Assembler::LSL, 0);
10883   %}
10884 
10885   ins_pipe(ialu_reg_reg);
10886 %}
10887 
10888 instruct XorL_reg_not_reg(iRegLNoSp dst,
10889                          iRegL src1, iRegL src2, immL_M1 m1,
10890                          rFlagsReg cr) %{
10891   match(Set dst (XorL m1 (XorL src2 src1)));
10892   ins_cost(INSN_COST);
10893   format %{ "eon  $dst, $src1, $src2" %}
10894 
10895   ins_encode %{
10896     __ eon(as_Register($dst$$reg),
10897               as_Register($src1$$reg),
10898               as_Register($src2$$reg),
10899               Assembler::LSL, 0);
10900   %}
10901 
10902   ins_pipe(ialu_reg_reg);
10903 %}
10904 
10905 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10906                          iRegIorL2I src1, iRegIorL2I src2,
10907                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10908   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10909   ins_cost(1.9 * INSN_COST);
10910   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10911 
10912   ins_encode %{
10913     __ bicw(as_Register($dst$$reg),
10914               as_Register($src1$$reg),
10915               as_Register($src2$$reg),
10916               Assembler::LSR,
10917               $src3$$constant & 0x1f);
10918   %}
10919 
10920   ins_pipe(ialu_reg_reg_shift);
10921 %}
10922 
10923 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10924                          iRegL src1, iRegL src2,
10925                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10926   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10927   ins_cost(1.9 * INSN_COST);
10928   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10929 
10930   ins_encode %{
10931     __ bic(as_Register($dst$$reg),
10932               as_Register($src1$$reg),
10933               as_Register($src2$$reg),
10934               Assembler::LSR,
10935               $src3$$constant & 0x3f);
10936   %}
10937 
10938   ins_pipe(ialu_reg_reg_shift);
10939 %}
10940 
10941 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10942                          iRegIorL2I src1, iRegIorL2I src2,
10943                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10944   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10945   ins_cost(1.9 * INSN_COST);
10946   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10947 
10948   ins_encode %{
10949     __ bicw(as_Register($dst$$reg),
10950               as_Register($src1$$reg),
10951               as_Register($src2$$reg),
10952               Assembler::ASR,
10953               $src3$$constant & 0x1f);
10954   %}
10955 
10956   ins_pipe(ialu_reg_reg_shift);
10957 %}
10958 
10959 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10960                          iRegL src1, iRegL src2,
10961                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10962   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10963   ins_cost(1.9 * INSN_COST);
10964   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10965 
10966   ins_encode %{
10967     __ bic(as_Register($dst$$reg),
10968               as_Register($src1$$reg),
10969               as_Register($src2$$reg),
10970               Assembler::ASR,
10971               $src3$$constant & 0x3f);
10972   %}
10973 
10974   ins_pipe(ialu_reg_reg_shift);
10975 %}
10976 
10977 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10978                          iRegIorL2I src1, iRegIorL2I src2,
10979                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10980   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10981   ins_cost(1.9 * INSN_COST);
10982   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10983 
10984   ins_encode %{
10985     __ bicw(as_Register($dst$$reg),
10986               as_Register($src1$$reg),
10987               as_Register($src2$$reg),
10988               Assembler::LSL,
10989               $src3$$constant & 0x1f);
10990   %}
10991 
10992   ins_pipe(ialu_reg_reg_shift);
10993 %}
10994 
10995 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10996                          iRegL src1, iRegL src2,
10997                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10998   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10999   ins_cost(1.9 * INSN_COST);
11000   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11001 
11002   ins_encode %{
11003     __ bic(as_Register($dst$$reg),
11004               as_Register($src1$$reg),
11005               as_Register($src2$$reg),
11006               Assembler::LSL,
11007               $src3$$constant & 0x3f);
11008   %}
11009 
11010   ins_pipe(ialu_reg_reg_shift);
11011 %}
11012 
11013 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11014                          iRegIorL2I src1, iRegIorL2I src2,
11015                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11016   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11017   ins_cost(1.9 * INSN_COST);
11018   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11019 
11020   ins_encode %{
11021     __ eonw(as_Register($dst$$reg),
11022               as_Register($src1$$reg),
11023               as_Register($src2$$reg),
11024               Assembler::LSR,
11025               $src3$$constant & 0x1f);
11026   %}
11027 
11028   ins_pipe(ialu_reg_reg_shift);
11029 %}
11030 
11031 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11032                          iRegL src1, iRegL src2,
11033                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11034   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11035   ins_cost(1.9 * INSN_COST);
11036   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11037 
11038   ins_encode %{
11039     __ eon(as_Register($dst$$reg),
11040               as_Register($src1$$reg),
11041               as_Register($src2$$reg),
11042               Assembler::LSR,
11043               $src3$$constant & 0x3f);
11044   %}
11045 
11046   ins_pipe(ialu_reg_reg_shift);
11047 %}
11048 
11049 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11050                          iRegIorL2I src1, iRegIorL2I src2,
11051                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11052   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11053   ins_cost(1.9 * INSN_COST);
11054   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11055 
11056   ins_encode %{
11057     __ eonw(as_Register($dst$$reg),
11058               as_Register($src1$$reg),
11059               as_Register($src2$$reg),
11060               Assembler::ASR,
11061               $src3$$constant & 0x1f);
11062   %}
11063 
11064   ins_pipe(ialu_reg_reg_shift);
11065 %}
11066 
11067 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11068                          iRegL src1, iRegL src2,
11069                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11070   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11071   ins_cost(1.9 * INSN_COST);
11072   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11073 
11074   ins_encode %{
11075     __ eon(as_Register($dst$$reg),
11076               as_Register($src1$$reg),
11077               as_Register($src2$$reg),
11078               Assembler::ASR,
11079               $src3$$constant & 0x3f);
11080   %}
11081 
11082   ins_pipe(ialu_reg_reg_shift);
11083 %}
11084 
11085 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11086                          iRegIorL2I src1, iRegIorL2I src2,
11087                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11088   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11089   ins_cost(1.9 * INSN_COST);
11090   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11091 
11092   ins_encode %{
11093     __ eonw(as_Register($dst$$reg),
11094               as_Register($src1$$reg),
11095               as_Register($src2$$reg),
11096               Assembler::LSL,
11097               $src3$$constant & 0x1f);
11098   %}
11099 
11100   ins_pipe(ialu_reg_reg_shift);
11101 %}
11102 
11103 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11104                          iRegL src1, iRegL src2,
11105                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11106   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11107   ins_cost(1.9 * INSN_COST);
11108   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11109 
11110   ins_encode %{
11111     __ eon(as_Register($dst$$reg),
11112               as_Register($src1$$reg),
11113               as_Register($src2$$reg),
11114               Assembler::LSL,
11115               $src3$$constant & 0x3f);
11116   %}
11117 
11118   ins_pipe(ialu_reg_reg_shift);
11119 %}
11120 
11121 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11122                          iRegIorL2I src1, iRegIorL2I src2,
11123                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11124   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11125   ins_cost(1.9 * INSN_COST);
11126   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11127 
11128   ins_encode %{
11129     __ ornw(as_Register($dst$$reg),
11130               as_Register($src1$$reg),
11131               as_Register($src2$$reg),
11132               Assembler::LSR,
11133               $src3$$constant & 0x1f);
11134   %}
11135 
11136   ins_pipe(ialu_reg_reg_shift);
11137 %}
11138 
11139 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11140                          iRegL src1, iRegL src2,
11141                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11142   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11143   ins_cost(1.9 * INSN_COST);
11144   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11145 
11146   ins_encode %{
11147     __ orn(as_Register($dst$$reg),
11148               as_Register($src1$$reg),
11149               as_Register($src2$$reg),
11150               Assembler::LSR,
11151               $src3$$constant & 0x3f);
11152   %}
11153 
11154   ins_pipe(ialu_reg_reg_shift);
11155 %}
11156 
11157 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11158                          iRegIorL2I src1, iRegIorL2I src2,
11159                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11160   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11161   ins_cost(1.9 * INSN_COST);
11162   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11163 
11164   ins_encode %{
11165     __ ornw(as_Register($dst$$reg),
11166               as_Register($src1$$reg),
11167               as_Register($src2$$reg),
11168               Assembler::ASR,
11169               $src3$$constant & 0x1f);
11170   %}
11171 
11172   ins_pipe(ialu_reg_reg_shift);
11173 %}
11174 
11175 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11176                          iRegL src1, iRegL src2,
11177                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11178   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11179   ins_cost(1.9 * INSN_COST);
11180   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11181 
11182   ins_encode %{
11183     __ orn(as_Register($dst$$reg),
11184               as_Register($src1$$reg),
11185               as_Register($src2$$reg),
11186               Assembler::ASR,
11187               $src3$$constant & 0x3f);
11188   %}
11189 
11190   ins_pipe(ialu_reg_reg_shift);
11191 %}
11192 
11193 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11194                          iRegIorL2I src1, iRegIorL2I src2,
11195                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11196   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11197   ins_cost(1.9 * INSN_COST);
11198   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11199 
11200   ins_encode %{
11201     __ ornw(as_Register($dst$$reg),
11202               as_Register($src1$$reg),
11203               as_Register($src2$$reg),
11204               Assembler::LSL,
11205               $src3$$constant & 0x1f);
11206   %}
11207 
11208   ins_pipe(ialu_reg_reg_shift);
11209 %}
11210 
11211 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11212                          iRegL src1, iRegL src2,
11213                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11214   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11215   ins_cost(1.9 * INSN_COST);
11216   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11217 
11218   ins_encode %{
11219     __ orn(as_Register($dst$$reg),
11220               as_Register($src1$$reg),
11221               as_Register($src2$$reg),
11222               Assembler::LSL,
11223               $src3$$constant & 0x3f);
11224   %}
11225 
11226   ins_pipe(ialu_reg_reg_shift);
11227 %}
11228 
11229 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11230                          iRegIorL2I src1, iRegIorL2I src2,
11231                          immI src3, rFlagsReg cr) %{
11232   match(Set dst (AndI src1 (URShiftI src2 src3)));
11233 
11234   ins_cost(1.9 * INSN_COST);
11235   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11236 
11237   ins_encode %{
11238     __ andw(as_Register($dst$$reg),
11239               as_Register($src1$$reg),
11240               as_Register($src2$$reg),
11241               Assembler::LSR,
11242               $src3$$constant & 0x1f);
11243   %}
11244 
11245   ins_pipe(ialu_reg_reg_shift);
11246 %}
11247 
11248 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11249                          iRegL src1, iRegL src2,
11250                          immI src3, rFlagsReg cr) %{
11251   match(Set dst (AndL src1 (URShiftL src2 src3)));
11252 
11253   ins_cost(1.9 * INSN_COST);
11254   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11255 
11256   ins_encode %{
11257     __ andr(as_Register($dst$$reg),
11258               as_Register($src1$$reg),
11259               as_Register($src2$$reg),
11260               Assembler::LSR,
11261               $src3$$constant & 0x3f);
11262   %}
11263 
11264   ins_pipe(ialu_reg_reg_shift);
11265 %}
11266 
11267 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11268                          iRegIorL2I src1, iRegIorL2I src2,
11269                          immI src3, rFlagsReg cr) %{
11270   match(Set dst (AndI src1 (RShiftI src2 src3)));
11271 
11272   ins_cost(1.9 * INSN_COST);
11273   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11274 
11275   ins_encode %{
11276     __ andw(as_Register($dst$$reg),
11277               as_Register($src1$$reg),
11278               as_Register($src2$$reg),
11279               Assembler::ASR,
11280               $src3$$constant & 0x1f);
11281   %}
11282 
11283   ins_pipe(ialu_reg_reg_shift);
11284 %}
11285 
11286 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11287                          iRegL src1, iRegL src2,
11288                          immI src3, rFlagsReg cr) %{
11289   match(Set dst (AndL src1 (RShiftL src2 src3)));
11290 
11291   ins_cost(1.9 * INSN_COST);
11292   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11293 
11294   ins_encode %{
11295     __ andr(as_Register($dst$$reg),
11296               as_Register($src1$$reg),
11297               as_Register($src2$$reg),
11298               Assembler::ASR,
11299               $src3$$constant & 0x3f);
11300   %}
11301 
11302   ins_pipe(ialu_reg_reg_shift);
11303 %}
11304 
11305 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11306                          iRegIorL2I src1, iRegIorL2I src2,
11307                          immI src3, rFlagsReg cr) %{
11308   match(Set dst (AndI src1 (LShiftI src2 src3)));
11309 
11310   ins_cost(1.9 * INSN_COST);
11311   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11312 
11313   ins_encode %{
11314     __ andw(as_Register($dst$$reg),
11315               as_Register($src1$$reg),
11316               as_Register($src2$$reg),
11317               Assembler::LSL,
11318               $src3$$constant & 0x1f);
11319   %}
11320 
11321   ins_pipe(ialu_reg_reg_shift);
11322 %}
11323 
11324 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11325                          iRegL src1, iRegL src2,
11326                          immI src3, rFlagsReg cr) %{
11327   match(Set dst (AndL src1 (LShiftL src2 src3)));
11328 
11329   ins_cost(1.9 * INSN_COST);
11330   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11331 
11332   ins_encode %{
11333     __ andr(as_Register($dst$$reg),
11334               as_Register($src1$$reg),
11335               as_Register($src2$$reg),
11336               Assembler::LSL,
11337               $src3$$constant & 0x3f);
11338   %}
11339 
11340   ins_pipe(ialu_reg_reg_shift);
11341 %}
11342 
11343 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11344                          iRegIorL2I src1, iRegIorL2I src2,
11345                          immI src3, rFlagsReg cr) %{
11346   match(Set dst (XorI src1 (URShiftI src2 src3)));
11347 
11348   ins_cost(1.9 * INSN_COST);
11349   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11350 
11351   ins_encode %{
11352     __ eorw(as_Register($dst$$reg),
11353               as_Register($src1$$reg),
11354               as_Register($src2$$reg),
11355               Assembler::LSR,
11356               $src3$$constant & 0x1f);
11357   %}
11358 
11359   ins_pipe(ialu_reg_reg_shift);
11360 %}
11361 
11362 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11363                          iRegL src1, iRegL src2,
11364                          immI src3, rFlagsReg cr) %{
11365   match(Set dst (XorL src1 (URShiftL src2 src3)));
11366 
11367   ins_cost(1.9 * INSN_COST);
11368   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11369 
11370   ins_encode %{
11371     __ eor(as_Register($dst$$reg),
11372               as_Register($src1$$reg),
11373               as_Register($src2$$reg),
11374               Assembler::LSR,
11375               $src3$$constant & 0x3f);
11376   %}
11377 
11378   ins_pipe(ialu_reg_reg_shift);
11379 %}
11380 
11381 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11382                          iRegIorL2I src1, iRegIorL2I src2,
11383                          immI src3, rFlagsReg cr) %{
11384   match(Set dst (XorI src1 (RShiftI src2 src3)));
11385 
11386   ins_cost(1.9 * INSN_COST);
11387   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11388 
11389   ins_encode %{
11390     __ eorw(as_Register($dst$$reg),
11391               as_Register($src1$$reg),
11392               as_Register($src2$$reg),
11393               Assembler::ASR,
11394               $src3$$constant & 0x1f);
11395   %}
11396 
11397   ins_pipe(ialu_reg_reg_shift);
11398 %}
11399 
11400 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11401                          iRegL src1, iRegL src2,
11402                          immI src3, rFlagsReg cr) %{
11403   match(Set dst (XorL src1 (RShiftL src2 src3)));
11404 
11405   ins_cost(1.9 * INSN_COST);
11406   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11407 
11408   ins_encode %{
11409     __ eor(as_Register($dst$$reg),
11410               as_Register($src1$$reg),
11411               as_Register($src2$$reg),
11412               Assembler::ASR,
11413               $src3$$constant & 0x3f);
11414   %}
11415 
11416   ins_pipe(ialu_reg_reg_shift);
11417 %}
11418 
11419 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11420                          iRegIorL2I src1, iRegIorL2I src2,
11421                          immI src3, rFlagsReg cr) %{
11422   match(Set dst (XorI src1 (LShiftI src2 src3)));
11423 
11424   ins_cost(1.9 * INSN_COST);
11425   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11426 
11427   ins_encode %{
11428     __ eorw(as_Register($dst$$reg),
11429               as_Register($src1$$reg),
11430               as_Register($src2$$reg),
11431               Assembler::LSL,
11432               $src3$$constant & 0x1f);
11433   %}
11434 
11435   ins_pipe(ialu_reg_reg_shift);
11436 %}
11437 
11438 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11439                          iRegL src1, iRegL src2,
11440                          immI src3, rFlagsReg cr) %{
11441   match(Set dst (XorL src1 (LShiftL src2 src3)));
11442 
11443   ins_cost(1.9 * INSN_COST);
11444   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11445 
11446   ins_encode %{
11447     __ eor(as_Register($dst$$reg),
11448               as_Register($src1$$reg),
11449               as_Register($src2$$reg),
11450               Assembler::LSL,
11451               $src3$$constant & 0x3f);
11452   %}
11453 
11454   ins_pipe(ialu_reg_reg_shift);
11455 %}
11456 
11457 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11458                          iRegIorL2I src1, iRegIorL2I src2,
11459                          immI src3, rFlagsReg cr) %{
11460   match(Set dst (OrI src1 (URShiftI src2 src3)));
11461 
11462   ins_cost(1.9 * INSN_COST);
11463   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11464 
11465   ins_encode %{
11466     __ orrw(as_Register($dst$$reg),
11467               as_Register($src1$$reg),
11468               as_Register($src2$$reg),
11469               Assembler::LSR,
11470               $src3$$constant & 0x1f);
11471   %}
11472 
11473   ins_pipe(ialu_reg_reg_shift);
11474 %}
11475 
11476 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11477                          iRegL src1, iRegL src2,
11478                          immI src3, rFlagsReg cr) %{
11479   match(Set dst (OrL src1 (URShiftL src2 src3)));
11480 
11481   ins_cost(1.9 * INSN_COST);
11482   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11483 
11484   ins_encode %{
11485     __ orr(as_Register($dst$$reg),
11486               as_Register($src1$$reg),
11487               as_Register($src2$$reg),
11488               Assembler::LSR,
11489               $src3$$constant & 0x3f);
11490   %}
11491 
11492   ins_pipe(ialu_reg_reg_shift);
11493 %}
11494 
11495 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11496                          iRegIorL2I src1, iRegIorL2I src2,
11497                          immI src3, rFlagsReg cr) %{
11498   match(Set dst (OrI src1 (RShiftI src2 src3)));
11499 
11500   ins_cost(1.9 * INSN_COST);
11501   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11502 
11503   ins_encode %{
11504     __ orrw(as_Register($dst$$reg),
11505               as_Register($src1$$reg),
11506               as_Register($src2$$reg),
11507               Assembler::ASR,
11508               $src3$$constant & 0x1f);
11509   %}
11510 
11511   ins_pipe(ialu_reg_reg_shift);
11512 %}
11513 
11514 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11515                          iRegL src1, iRegL src2,
11516                          immI src3, rFlagsReg cr) %{
11517   match(Set dst (OrL src1 (RShiftL src2 src3)));
11518 
11519   ins_cost(1.9 * INSN_COST);
11520   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11521 
11522   ins_encode %{
11523     __ orr(as_Register($dst$$reg),
11524               as_Register($src1$$reg),
11525               as_Register($src2$$reg),
11526               Assembler::ASR,
11527               $src3$$constant & 0x3f);
11528   %}
11529 
11530   ins_pipe(ialu_reg_reg_shift);
11531 %}
11532 
11533 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11534                          iRegIorL2I src1, iRegIorL2I src2,
11535                          immI src3, rFlagsReg cr) %{
11536   match(Set dst (OrI src1 (LShiftI src2 src3)));
11537 
11538   ins_cost(1.9 * INSN_COST);
11539   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11540 
11541   ins_encode %{
11542     __ orrw(as_Register($dst$$reg),
11543               as_Register($src1$$reg),
11544               as_Register($src2$$reg),
11545               Assembler::LSL,
11546               $src3$$constant & 0x1f);
11547   %}
11548 
11549   ins_pipe(ialu_reg_reg_shift);
11550 %}
11551 
11552 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11553                          iRegL src1, iRegL src2,
11554                          immI src3, rFlagsReg cr) %{
11555   match(Set dst (OrL src1 (LShiftL src2 src3)));
11556 
11557   ins_cost(1.9 * INSN_COST);
11558   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11559 
11560   ins_encode %{
11561     __ orr(as_Register($dst$$reg),
11562               as_Register($src1$$reg),
11563               as_Register($src2$$reg),
11564               Assembler::LSL,
11565               $src3$$constant & 0x3f);
11566   %}
11567 
11568   ins_pipe(ialu_reg_reg_shift);
11569 %}
11570 
11571 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11572                          iRegIorL2I src1, iRegIorL2I src2,
11573                          immI src3, rFlagsReg cr) %{
11574   match(Set dst (AddI src1 (URShiftI src2 src3)));
11575 
11576   ins_cost(1.9 * INSN_COST);
11577   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11578 
11579   ins_encode %{
11580     __ addw(as_Register($dst$$reg),
11581               as_Register($src1$$reg),
11582               as_Register($src2$$reg),
11583               Assembler::LSR,
11584               $src3$$constant & 0x1f);
11585   %}
11586 
11587   ins_pipe(ialu_reg_reg_shift);
11588 %}
11589 
11590 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11591                          iRegL src1, iRegL src2,
11592                          immI src3, rFlagsReg cr) %{
11593   match(Set dst (AddL src1 (URShiftL src2 src3)));
11594 
11595   ins_cost(1.9 * INSN_COST);
11596   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11597 
11598   ins_encode %{
11599     __ add(as_Register($dst$$reg),
11600               as_Register($src1$$reg),
11601               as_Register($src2$$reg),
11602               Assembler::LSR,
11603               $src3$$constant & 0x3f);
11604   %}
11605 
11606   ins_pipe(ialu_reg_reg_shift);
11607 %}
11608 
11609 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11610                          iRegIorL2I src1, iRegIorL2I src2,
11611                          immI src3, rFlagsReg cr) %{
11612   match(Set dst (AddI src1 (RShiftI src2 src3)));
11613 
11614   ins_cost(1.9 * INSN_COST);
11615   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11616 
11617   ins_encode %{
11618     __ addw(as_Register($dst$$reg),
11619               as_Register($src1$$reg),
11620               as_Register($src2$$reg),
11621               Assembler::ASR,
11622               $src3$$constant & 0x1f);
11623   %}
11624 
11625   ins_pipe(ialu_reg_reg_shift);
11626 %}
11627 
11628 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11629                          iRegL src1, iRegL src2,
11630                          immI src3, rFlagsReg cr) %{
11631   match(Set dst (AddL src1 (RShiftL src2 src3)));
11632 
11633   ins_cost(1.9 * INSN_COST);
11634   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11635 
11636   ins_encode %{
11637     __ add(as_Register($dst$$reg),
11638               as_Register($src1$$reg),
11639               as_Register($src2$$reg),
11640               Assembler::ASR,
11641               $src3$$constant & 0x3f);
11642   %}
11643 
11644   ins_pipe(ialu_reg_reg_shift);
11645 %}
11646 
11647 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11648                          iRegIorL2I src1, iRegIorL2I src2,
11649                          immI src3, rFlagsReg cr) %{
11650   match(Set dst (AddI src1 (LShiftI src2 src3)));
11651 
11652   ins_cost(1.9 * INSN_COST);
11653   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11654 
11655   ins_encode %{
11656     __ addw(as_Register($dst$$reg),
11657               as_Register($src1$$reg),
11658               as_Register($src2$$reg),
11659               Assembler::LSL,
11660               $src3$$constant & 0x1f);
11661   %}
11662 
11663   ins_pipe(ialu_reg_reg_shift);
11664 %}
11665 
11666 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11667                          iRegL src1, iRegL src2,
11668                          immI src3, rFlagsReg cr) %{
11669   match(Set dst (AddL src1 (LShiftL src2 src3)));
11670 
11671   ins_cost(1.9 * INSN_COST);
11672   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11673 
11674   ins_encode %{
11675     __ add(as_Register($dst$$reg),
11676               as_Register($src1$$reg),
11677               as_Register($src2$$reg),
11678               Assembler::LSL,
11679               $src3$$constant & 0x3f);
11680   %}
11681 
11682   ins_pipe(ialu_reg_reg_shift);
11683 %}
11684 
11685 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11686                          iRegIorL2I src1, iRegIorL2I src2,
11687                          immI src3, rFlagsReg cr) %{
11688   match(Set dst (SubI src1 (URShiftI src2 src3)));
11689 
11690   ins_cost(1.9 * INSN_COST);
11691   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11692 
11693   ins_encode %{
11694     __ subw(as_Register($dst$$reg),
11695               as_Register($src1$$reg),
11696               as_Register($src2$$reg),
11697               Assembler::LSR,
11698               $src3$$constant & 0x1f);
11699   %}
11700 
11701   ins_pipe(ialu_reg_reg_shift);
11702 %}
11703 
11704 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11705                          iRegL src1, iRegL src2,
11706                          immI src3, rFlagsReg cr) %{
11707   match(Set dst (SubL src1 (URShiftL src2 src3)));
11708 
11709   ins_cost(1.9 * INSN_COST);
11710   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11711 
11712   ins_encode %{
11713     __ sub(as_Register($dst$$reg),
11714               as_Register($src1$$reg),
11715               as_Register($src2$$reg),
11716               Assembler::LSR,
11717               $src3$$constant & 0x3f);
11718   %}
11719 
11720   ins_pipe(ialu_reg_reg_shift);
11721 %}
11722 
11723 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11724                          iRegIorL2I src1, iRegIorL2I src2,
11725                          immI src3, rFlagsReg cr) %{
11726   match(Set dst (SubI src1 (RShiftI src2 src3)));
11727 
11728   ins_cost(1.9 * INSN_COST);
11729   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11730 
11731   ins_encode %{
11732     __ subw(as_Register($dst$$reg),
11733               as_Register($src1$$reg),
11734               as_Register($src2$$reg),
11735               Assembler::ASR,
11736               $src3$$constant & 0x1f);
11737   %}
11738 
11739   ins_pipe(ialu_reg_reg_shift);
11740 %}
11741 
11742 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11743                          iRegL src1, iRegL src2,
11744                          immI src3, rFlagsReg cr) %{
11745   match(Set dst (SubL src1 (RShiftL src2 src3)));
11746 
11747   ins_cost(1.9 * INSN_COST);
11748   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11749 
11750   ins_encode %{
11751     __ sub(as_Register($dst$$reg),
11752               as_Register($src1$$reg),
11753               as_Register($src2$$reg),
11754               Assembler::ASR,
11755               $src3$$constant & 0x3f);
11756   %}
11757 
11758   ins_pipe(ialu_reg_reg_shift);
11759 %}
11760 
11761 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11762                          iRegIorL2I src1, iRegIorL2I src2,
11763                          immI src3, rFlagsReg cr) %{
11764   match(Set dst (SubI src1 (LShiftI src2 src3)));
11765 
11766   ins_cost(1.9 * INSN_COST);
11767   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11768 
11769   ins_encode %{
11770     __ subw(as_Register($dst$$reg),
11771               as_Register($src1$$reg),
11772               as_Register($src2$$reg),
11773               Assembler::LSL,
11774               $src3$$constant & 0x1f);
11775   %}
11776 
11777   ins_pipe(ialu_reg_reg_shift);
11778 %}
11779 
11780 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11781                          iRegL src1, iRegL src2,
11782                          immI src3, rFlagsReg cr) %{
11783   match(Set dst (SubL src1 (LShiftL src2 src3)));
11784 
11785   ins_cost(1.9 * INSN_COST);
11786   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11787 
11788   ins_encode %{
11789     __ sub(as_Register($dst$$reg),
11790               as_Register($src1$$reg),
11791               as_Register($src2$$reg),
11792               Assembler::LSL,
11793               $src3$$constant & 0x3f);
11794   %}
11795 
11796   ins_pipe(ialu_reg_reg_shift);
11797 %}
11798 
11799 
11800 
11801 // Shift Left followed by Shift Right.
11802 // This idiom is used by the compiler for the i2b bytecode etc.
11803 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11804 %{
11805   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11806   ins_cost(INSN_COST * 2);
11807   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11808   ins_encode %{
11809     int lshift = $lshift_count$$constant & 63;
11810     int rshift = $rshift_count$$constant & 63;
11811     int s = 63 - lshift;
11812     int r = (rshift - lshift) & 63;
11813     __ sbfm(as_Register($dst$$reg),
11814             as_Register($src$$reg),
11815             r, s);
11816   %}
11817 
11818   ins_pipe(ialu_reg_shift);
11819 %}
11820 
11821 // Shift Left followed by Shift Right.
11822 // This idiom is used by the compiler for the i2b bytecode etc.
11823 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11824 %{
11825   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11826   ins_cost(INSN_COST * 2);
11827   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11828   ins_encode %{
11829     int lshift = $lshift_count$$constant & 31;
11830     int rshift = $rshift_count$$constant & 31;
11831     int s = 31 - lshift;
11832     int r = (rshift - lshift) & 31;
11833     __ sbfmw(as_Register($dst$$reg),
11834             as_Register($src$$reg),
11835             r, s);
11836   %}
11837 
11838   ins_pipe(ialu_reg_shift);
11839 %}
11840 
11841 // Shift Left followed by Shift Right.
11842 // This idiom is used by the compiler for the i2b bytecode etc.
11843 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11844 %{
11845   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11846   ins_cost(INSN_COST * 2);
11847   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11848   ins_encode %{
11849     int lshift = $lshift_count$$constant & 63;
11850     int rshift = $rshift_count$$constant & 63;
11851     int s = 63 - lshift;
11852     int r = (rshift - lshift) & 63;
11853     __ ubfm(as_Register($dst$$reg),
11854             as_Register($src$$reg),
11855             r, s);
11856   %}
11857 
11858   ins_pipe(ialu_reg_shift);
11859 %}
11860 
11861 // Shift Left followed by Shift Right.
11862 // This idiom is used by the compiler for the i2b bytecode etc.
11863 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11864 %{
11865   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11866   ins_cost(INSN_COST * 2);
11867   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11868   ins_encode %{
11869     int lshift = $lshift_count$$constant & 31;
11870     int rshift = $rshift_count$$constant & 31;
11871     int s = 31 - lshift;
11872     int r = (rshift - lshift) & 31;
11873     __ ubfmw(as_Register($dst$$reg),
11874             as_Register($src$$reg),
11875             r, s);
11876   %}
11877 
11878   ins_pipe(ialu_reg_shift);
11879 %}
11880 // Bitfield extract with shift & mask
11881 
11882 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11883 %{
11884   match(Set dst (AndI (URShiftI src rshift) mask));
11885   // Make sure we are not going to exceed what ubfxw can do.
11886   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11887 
11888   ins_cost(INSN_COST);
11889   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11890   ins_encode %{
11891     int rshift = $rshift$$constant & 31;
11892     long mask = $mask$$constant;
11893     int width = exact_log2(mask+1);
11894     __ ubfxw(as_Register($dst$$reg),
11895             as_Register($src$$reg), rshift, width);
11896   %}
11897   ins_pipe(ialu_reg_shift);
11898 %}
11899 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11900 %{
11901   match(Set dst (AndL (URShiftL src rshift) mask));
11902   // Make sure we are not going to exceed what ubfx can do.
11903   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
11904 
11905   ins_cost(INSN_COST);
11906   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11907   ins_encode %{
11908     int rshift = $rshift$$constant & 63;
11909     long mask = $mask$$constant;
11910     int width = exact_log2_long(mask+1);
11911     __ ubfx(as_Register($dst$$reg),
11912             as_Register($src$$reg), rshift, width);
11913   %}
11914   ins_pipe(ialu_reg_shift);
11915 %}
11916 
11917 // We can use ubfx when extending an And with a mask when we know mask
11918 // is positive.  We know that because immI_bitmask guarantees it.
11919 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11920 %{
11921   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11922   // Make sure we are not going to exceed what ubfxw can do.
11923   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11924 
11925   ins_cost(INSN_COST * 2);
11926   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11927   ins_encode %{
11928     int rshift = $rshift$$constant & 31;
11929     long mask = $mask$$constant;
11930     int width = exact_log2(mask+1);
11931     __ ubfx(as_Register($dst$$reg),
11932             as_Register($src$$reg), rshift, width);
11933   %}
11934   ins_pipe(ialu_reg_shift);
11935 %}
11936 
11937 // We can use ubfiz when masking by a positive number and then left shifting the result.
11938 // We know that the mask is positive because immI_bitmask guarantees it.
11939 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11940 %{
11941   match(Set dst (LShiftI (AndI src mask) lshift));
11942   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
11943 
11944   ins_cost(INSN_COST);
11945   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11946   ins_encode %{
11947     int lshift = $lshift$$constant & 31;
11948     long mask = $mask$$constant;
11949     int width = exact_log2(mask+1);
11950     __ ubfizw(as_Register($dst$$reg),
11951           as_Register($src$$reg), lshift, width);
11952   %}
11953   ins_pipe(ialu_reg_shift);
11954 %}
11955 // We can use ubfiz when masking by a positive number and then left shifting the result.
11956 // We know that the mask is positive because immL_bitmask guarantees it.
11957 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11958 %{
11959   match(Set dst (LShiftL (AndL src mask) lshift));
11960   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11961 
11962   ins_cost(INSN_COST);
11963   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11964   ins_encode %{
11965     int lshift = $lshift$$constant & 63;
11966     long mask = $mask$$constant;
11967     int width = exact_log2_long(mask+1);
11968     __ ubfiz(as_Register($dst$$reg),
11969           as_Register($src$$reg), lshift, width);
11970   %}
11971   ins_pipe(ialu_reg_shift);
11972 %}
11973 
11974 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11975 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11976 %{
11977   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
11978   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11979 
11980   ins_cost(INSN_COST);
11981   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11982   ins_encode %{
11983     int lshift = $lshift$$constant & 63;
11984     long mask = $mask$$constant;
11985     int width = exact_log2(mask+1);
11986     __ ubfiz(as_Register($dst$$reg),
11987              as_Register($src$$reg), lshift, width);
11988   %}
11989   ins_pipe(ialu_reg_shift);
11990 %}
11991 
11992 // Rotations
11993 
11994 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11995 %{
11996   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11997   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
11998 
11999   ins_cost(INSN_COST);
12000   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12001 
12002   ins_encode %{
12003     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12004             $rshift$$constant & 63);
12005   %}
12006   ins_pipe(ialu_reg_reg_extr);
12007 %}
12008 
12009 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12010 %{
12011   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12012   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12013 
12014   ins_cost(INSN_COST);
12015   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12016 
12017   ins_encode %{
12018     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12019             $rshift$$constant & 31);
12020   %}
12021   ins_pipe(ialu_reg_reg_extr);
12022 %}
12023 
12024 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12025 %{
12026   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12027   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12028 
12029   ins_cost(INSN_COST);
12030   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12031 
12032   ins_encode %{
12033     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12034             $rshift$$constant & 63);
12035   %}
12036   ins_pipe(ialu_reg_reg_extr);
12037 %}
12038 
12039 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12040 %{
12041   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12042   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12043 
12044   ins_cost(INSN_COST);
12045   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12046 
12047   ins_encode %{
12048     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12049             $rshift$$constant & 31);
12050   %}
12051   ins_pipe(ialu_reg_reg_extr);
12052 %}
12053 
12054 
12055 // rol expander
12056 
12057 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12058 %{
12059   effect(DEF dst, USE src, USE shift);
12060 
12061   format %{ "rol    $dst, $src, $shift" %}
12062   ins_cost(INSN_COST * 3);
12063   ins_encode %{
12064     __ subw(rscratch1, zr, as_Register($shift$$reg));
12065     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12066             rscratch1);
12067     %}
12068   ins_pipe(ialu_reg_reg_vshift);
12069 %}
12070 
12071 // rol expander
12072 
12073 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12074 %{
12075   effect(DEF dst, USE src, USE shift);
12076 
12077   format %{ "rol    $dst, $src, $shift" %}
12078   ins_cost(INSN_COST * 3);
12079   ins_encode %{
12080     __ subw(rscratch1, zr, as_Register($shift$$reg));
12081     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12082             rscratch1);
12083     %}
12084   ins_pipe(ialu_reg_reg_vshift);
12085 %}
12086 
12087 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12088 %{
12089   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12090 
12091   expand %{
12092     rolL_rReg(dst, src, shift, cr);
12093   %}
12094 %}
12095 
12096 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12097 %{
12098   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12099 
12100   expand %{
12101     rolL_rReg(dst, src, shift, cr);
12102   %}
12103 %}
12104 
12105 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12106 %{
12107   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12108 
12109   expand %{
12110     rolI_rReg(dst, src, shift, cr);
12111   %}
12112 %}
12113 
12114 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12115 %{
12116   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12117 
12118   expand %{
12119     rolI_rReg(dst, src, shift, cr);
12120   %}
12121 %}
12122 
12123 // ror expander
12124 
12125 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12126 %{
12127   effect(DEF dst, USE src, USE shift);
12128 
12129   format %{ "ror    $dst, $src, $shift" %}
12130   ins_cost(INSN_COST);
12131   ins_encode %{
12132     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12133             as_Register($shift$$reg));
12134     %}
12135   ins_pipe(ialu_reg_reg_vshift);
12136 %}
12137 
12138 // ror expander
12139 
12140 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12141 %{
12142   effect(DEF dst, USE src, USE shift);
12143 
12144   format %{ "ror    $dst, $src, $shift" %}
12145   ins_cost(INSN_COST);
12146   ins_encode %{
12147     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12148             as_Register($shift$$reg));
12149     %}
12150   ins_pipe(ialu_reg_reg_vshift);
12151 %}
12152 
12153 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12154 %{
12155   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12156 
12157   expand %{
12158     rorL_rReg(dst, src, shift, cr);
12159   %}
12160 %}
12161 
12162 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12163 %{
12164   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12165 
12166   expand %{
12167     rorL_rReg(dst, src, shift, cr);
12168   %}
12169 %}
12170 
12171 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12172 %{
12173   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12174 
12175   expand %{
12176     rorI_rReg(dst, src, shift, cr);
12177   %}
12178 %}
12179 
12180 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12181 %{
12182   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12183 
12184   expand %{
12185     rorI_rReg(dst, src, shift, cr);
12186   %}
12187 %}
12188 
12189 // Add/subtract (extended)
12190 
12191 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12192 %{
12193   match(Set dst (AddL src1 (ConvI2L src2)));
12194   ins_cost(INSN_COST);
12195   format %{ "add  $dst, $src1, $src2, sxtw" %}
12196 
12197    ins_encode %{
12198      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12199             as_Register($src2$$reg), ext::sxtw);
12200    %}
12201   ins_pipe(ialu_reg_reg);
12202 %};
12203 
12204 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12205 %{
12206   match(Set dst (SubL src1 (ConvI2L src2)));
12207   ins_cost(INSN_COST);
12208   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12209 
12210    ins_encode %{
12211      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12212             as_Register($src2$$reg), ext::sxtw);
12213    %}
12214   ins_pipe(ialu_reg_reg);
12215 %};
12216 
12217 
12218 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12219 %{
12220   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12221   ins_cost(INSN_COST);
12222   format %{ "add  $dst, $src1, $src2, sxth" %}
12223 
12224    ins_encode %{
12225      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12226             as_Register($src2$$reg), ext::sxth);
12227    %}
12228   ins_pipe(ialu_reg_reg);
12229 %}
12230 
12231 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12232 %{
12233   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12234   ins_cost(INSN_COST);
12235   format %{ "add  $dst, $src1, $src2, sxtb" %}
12236 
12237    ins_encode %{
12238      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12239             as_Register($src2$$reg), ext::sxtb);
12240    %}
12241   ins_pipe(ialu_reg_reg);
12242 %}
12243 
12244 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12245 %{
12246   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12247   ins_cost(INSN_COST);
12248   format %{ "add  $dst, $src1, $src2, uxtb" %}
12249 
12250    ins_encode %{
12251      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12252             as_Register($src2$$reg), ext::uxtb);
12253    %}
12254   ins_pipe(ialu_reg_reg);
12255 %}
12256 
12257 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12258 %{
12259   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12260   ins_cost(INSN_COST);
12261   format %{ "add  $dst, $src1, $src2, sxth" %}
12262 
12263    ins_encode %{
12264      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12265             as_Register($src2$$reg), ext::sxth);
12266    %}
12267   ins_pipe(ialu_reg_reg);
12268 %}
12269 
12270 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12271 %{
12272   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12273   ins_cost(INSN_COST);
12274   format %{ "add  $dst, $src1, $src2, sxtw" %}
12275 
12276    ins_encode %{
12277      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12278             as_Register($src2$$reg), ext::sxtw);
12279    %}
12280   ins_pipe(ialu_reg_reg);
12281 %}
12282 
12283 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12284 %{
12285   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12286   ins_cost(INSN_COST);
12287   format %{ "add  $dst, $src1, $src2, sxtb" %}
12288 
12289    ins_encode %{
12290      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12291             as_Register($src2$$reg), ext::sxtb);
12292    %}
12293   ins_pipe(ialu_reg_reg);
12294 %}
12295 
12296 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12297 %{
12298   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12299   ins_cost(INSN_COST);
12300   format %{ "add  $dst, $src1, $src2, uxtb" %}
12301 
12302    ins_encode %{
12303      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12304             as_Register($src2$$reg), ext::uxtb);
12305    %}
12306   ins_pipe(ialu_reg_reg);
12307 %}
12308 
12309 
12310 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12311 %{
12312   match(Set dst (AddI src1 (AndI src2 mask)));
12313   ins_cost(INSN_COST);
12314   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12315 
12316    ins_encode %{
12317      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12318             as_Register($src2$$reg), ext::uxtb);
12319    %}
12320   ins_pipe(ialu_reg_reg);
12321 %}
12322 
12323 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12324 %{
12325   match(Set dst (AddI src1 (AndI src2 mask)));
12326   ins_cost(INSN_COST);
12327   format %{ "addw  $dst, $src1, $src2, uxth" %}
12328 
12329    ins_encode %{
12330      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12331             as_Register($src2$$reg), ext::uxth);
12332    %}
12333   ins_pipe(ialu_reg_reg);
12334 %}
12335 
12336 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12337 %{
12338   match(Set dst (AddL src1 (AndL src2 mask)));
12339   ins_cost(INSN_COST);
12340   format %{ "add  $dst, $src1, $src2, uxtb" %}
12341 
12342    ins_encode %{
12343      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12344             as_Register($src2$$reg), ext::uxtb);
12345    %}
12346   ins_pipe(ialu_reg_reg);
12347 %}
12348 
12349 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12350 %{
12351   match(Set dst (AddL src1 (AndL src2 mask)));
12352   ins_cost(INSN_COST);
12353   format %{ "add  $dst, $src1, $src2, uxth" %}
12354 
12355    ins_encode %{
12356      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12357             as_Register($src2$$reg), ext::uxth);
12358    %}
12359   ins_pipe(ialu_reg_reg);
12360 %}
12361 
12362 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12363 %{
12364   match(Set dst (AddL src1 (AndL src2 mask)));
12365   ins_cost(INSN_COST);
12366   format %{ "add  $dst, $src1, $src2, uxtw" %}
12367 
12368    ins_encode %{
12369      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12370             as_Register($src2$$reg), ext::uxtw);
12371    %}
12372   ins_pipe(ialu_reg_reg);
12373 %}
12374 
12375 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12376 %{
12377   match(Set dst (SubI src1 (AndI src2 mask)));
12378   ins_cost(INSN_COST);
12379   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12380 
12381    ins_encode %{
12382      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12383             as_Register($src2$$reg), ext::uxtb);
12384    %}
12385   ins_pipe(ialu_reg_reg);
12386 %}
12387 
12388 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12389 %{
12390   match(Set dst (SubI src1 (AndI src2 mask)));
12391   ins_cost(INSN_COST);
12392   format %{ "subw  $dst, $src1, $src2, uxth" %}
12393 
12394    ins_encode %{
12395      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12396             as_Register($src2$$reg), ext::uxth);
12397    %}
12398   ins_pipe(ialu_reg_reg);
12399 %}
12400 
12401 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12402 %{
12403   match(Set dst (SubL src1 (AndL src2 mask)));
12404   ins_cost(INSN_COST);
12405   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12406 
12407    ins_encode %{
12408      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12409             as_Register($src2$$reg), ext::uxtb);
12410    %}
12411   ins_pipe(ialu_reg_reg);
12412 %}
12413 
12414 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12415 %{
12416   match(Set dst (SubL src1 (AndL src2 mask)));
12417   ins_cost(INSN_COST);
12418   format %{ "sub  $dst, $src1, $src2, uxth" %}
12419 
12420    ins_encode %{
12421      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12422             as_Register($src2$$reg), ext::uxth);
12423    %}
12424   ins_pipe(ialu_reg_reg);
12425 %}
12426 
12427 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12428 %{
12429   match(Set dst (SubL src1 (AndL src2 mask)));
12430   ins_cost(INSN_COST);
12431   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12432 
12433    ins_encode %{
12434      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12435             as_Register($src2$$reg), ext::uxtw);
12436    %}
12437   ins_pipe(ialu_reg_reg);
12438 %}
12439 
12440 
12441 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12442 %{
12443   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12444   ins_cost(1.9 * INSN_COST);
12445   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12446 
12447    ins_encode %{
12448      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12449             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12450    %}
12451   ins_pipe(ialu_reg_reg_shift);
12452 %}
12453 
12454 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12455 %{
12456   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12457   ins_cost(1.9 * INSN_COST);
12458   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12459 
12460    ins_encode %{
12461      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12462             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12463    %}
12464   ins_pipe(ialu_reg_reg_shift);
12465 %}
12466 
12467 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12468 %{
12469   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12470   ins_cost(1.9 * INSN_COST);
12471   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12472 
12473    ins_encode %{
12474      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12475             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12476    %}
12477   ins_pipe(ialu_reg_reg_shift);
12478 %}
12479 
12480 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12481 %{
12482   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12483   ins_cost(1.9 * INSN_COST);
12484   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12485 
12486    ins_encode %{
12487      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12488             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12489    %}
12490   ins_pipe(ialu_reg_reg_shift);
12491 %}
12492 
12493 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12494 %{
12495   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12496   ins_cost(1.9 * INSN_COST);
12497   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12498 
12499    ins_encode %{
12500      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12501             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12502    %}
12503   ins_pipe(ialu_reg_reg_shift);
12504 %}
12505 
12506 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12507 %{
12508   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12509   ins_cost(1.9 * INSN_COST);
12510   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12511 
12512    ins_encode %{
12513      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12514             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12515    %}
12516   ins_pipe(ialu_reg_reg_shift);
12517 %}
12518 
12519 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12520 %{
12521   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12522   ins_cost(1.9 * INSN_COST);
12523   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12524 
12525    ins_encode %{
12526      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12527             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12528    %}
12529   ins_pipe(ialu_reg_reg_shift);
12530 %}
12531 
12532 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12533 %{
12534   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12535   ins_cost(1.9 * INSN_COST);
12536   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12537 
12538    ins_encode %{
12539      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12540             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12541    %}
12542   ins_pipe(ialu_reg_reg_shift);
12543 %}
12544 
12545 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12546 %{
12547   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12548   ins_cost(1.9 * INSN_COST);
12549   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12550 
12551    ins_encode %{
12552      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12553             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12554    %}
12555   ins_pipe(ialu_reg_reg_shift);
12556 %}
12557 
12558 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12559 %{
12560   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12561   ins_cost(1.9 * INSN_COST);
12562   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12563 
12564    ins_encode %{
12565      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12566             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12567    %}
12568   ins_pipe(ialu_reg_reg_shift);
12569 %}
12570 
12571 
12572 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12573 %{
12574   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12575   ins_cost(1.9 * INSN_COST);
12576   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12577 
12578    ins_encode %{
12579      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12580             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12581    %}
12582   ins_pipe(ialu_reg_reg_shift);
12583 %};
12584 
12585 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12586 %{
12587   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12588   ins_cost(1.9 * INSN_COST);
12589   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12590 
12591    ins_encode %{
12592      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12593             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12594    %}
12595   ins_pipe(ialu_reg_reg_shift);
12596 %};
12597 
12598 
12599 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12600 %{
12601   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12602   ins_cost(1.9 * INSN_COST);
12603   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12604 
12605    ins_encode %{
12606      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12607             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12608    %}
12609   ins_pipe(ialu_reg_reg_shift);
12610 %}
12611 
12612 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12613 %{
12614   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12615   ins_cost(1.9 * INSN_COST);
12616   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12617 
12618    ins_encode %{
12619      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12620             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12621    %}
12622   ins_pipe(ialu_reg_reg_shift);
12623 %}
12624 
12625 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12626 %{
12627   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12628   ins_cost(1.9 * INSN_COST);
12629   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12630 
12631    ins_encode %{
12632      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12633             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12634    %}
12635   ins_pipe(ialu_reg_reg_shift);
12636 %}
12637 
12638 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12639 %{
12640   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12641   ins_cost(1.9 * INSN_COST);
12642   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12643 
12644    ins_encode %{
12645      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12646             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12647    %}
12648   ins_pipe(ialu_reg_reg_shift);
12649 %}
12650 
12651 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12652 %{
12653   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12654   ins_cost(1.9 * INSN_COST);
12655   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12656 
12657    ins_encode %{
12658      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12659             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12660    %}
12661   ins_pipe(ialu_reg_reg_shift);
12662 %}
12663 
12664 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12665 %{
12666   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12667   ins_cost(1.9 * INSN_COST);
12668   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12669 
12670    ins_encode %{
12671      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12672             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12673    %}
12674   ins_pipe(ialu_reg_reg_shift);
12675 %}
12676 
12677 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12678 %{
12679   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12680   ins_cost(1.9 * INSN_COST);
12681   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12682 
12683    ins_encode %{
12684      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12685             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12686    %}
12687   ins_pipe(ialu_reg_reg_shift);
12688 %}
12689 
12690 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12691 %{
12692   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12693   ins_cost(1.9 * INSN_COST);
12694   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12695 
12696    ins_encode %{
12697      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12698             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12699    %}
12700   ins_pipe(ialu_reg_reg_shift);
12701 %}
12702 
12703 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12704 %{
12705   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12706   ins_cost(1.9 * INSN_COST);
12707   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12708 
12709    ins_encode %{
12710      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12711             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12712    %}
12713   ins_pipe(ialu_reg_reg_shift);
12714 %}
12715 
12716 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12717 %{
12718   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12719   ins_cost(1.9 * INSN_COST);
12720   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12721 
12722    ins_encode %{
12723      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12724             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12725    %}
12726   ins_pipe(ialu_reg_reg_shift);
12727 %}
12728 // END This section of the file is automatically generated. Do not edit --------------
12729 
12730 // ============================================================================
12731 // Floating Point Arithmetic Instructions
12732 
12733 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12734   match(Set dst (AddF src1 src2));
12735 
12736   ins_cost(INSN_COST * 5);
12737   format %{ "fadds   $dst, $src1, $src2" %}
12738 
12739   ins_encode %{
12740     __ fadds(as_FloatRegister($dst$$reg),
12741              as_FloatRegister($src1$$reg),
12742              as_FloatRegister($src2$$reg));
12743   %}
12744 
12745   ins_pipe(fp_dop_reg_reg_s);
12746 %}
12747 
12748 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12749   match(Set dst (AddD src1 src2));
12750 
12751   ins_cost(INSN_COST * 5);
12752   format %{ "faddd   $dst, $src1, $src2" %}
12753 
12754   ins_encode %{
12755     __ faddd(as_FloatRegister($dst$$reg),
12756              as_FloatRegister($src1$$reg),
12757              as_FloatRegister($src2$$reg));
12758   %}
12759 
12760   ins_pipe(fp_dop_reg_reg_d);
12761 %}
12762 
12763 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12764   match(Set dst (SubF src1 src2));
12765 
12766   ins_cost(INSN_COST * 5);
12767   format %{ "fsubs   $dst, $src1, $src2" %}
12768 
12769   ins_encode %{
12770     __ fsubs(as_FloatRegister($dst$$reg),
12771              as_FloatRegister($src1$$reg),
12772              as_FloatRegister($src2$$reg));
12773   %}
12774 
12775   ins_pipe(fp_dop_reg_reg_s);
12776 %}
12777 
12778 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12779   match(Set dst (SubD src1 src2));
12780 
12781   ins_cost(INSN_COST * 5);
12782   format %{ "fsubd   $dst, $src1, $src2" %}
12783 
12784   ins_encode %{
12785     __ fsubd(as_FloatRegister($dst$$reg),
12786              as_FloatRegister($src1$$reg),
12787              as_FloatRegister($src2$$reg));
12788   %}
12789 
12790   ins_pipe(fp_dop_reg_reg_d);
12791 %}
12792 
12793 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12794   match(Set dst (MulF src1 src2));
12795 
12796   ins_cost(INSN_COST * 6);
12797   format %{ "fmuls   $dst, $src1, $src2" %}
12798 
12799   ins_encode %{
12800     __ fmuls(as_FloatRegister($dst$$reg),
12801              as_FloatRegister($src1$$reg),
12802              as_FloatRegister($src2$$reg));
12803   %}
12804 
12805   ins_pipe(fp_dop_reg_reg_s);
12806 %}
12807 
12808 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12809   match(Set dst (MulD src1 src2));
12810 
12811   ins_cost(INSN_COST * 6);
12812   format %{ "fmuld   $dst, $src1, $src2" %}
12813 
12814   ins_encode %{
12815     __ fmuld(as_FloatRegister($dst$$reg),
12816              as_FloatRegister($src1$$reg),
12817              as_FloatRegister($src2$$reg));
12818   %}
12819 
12820   ins_pipe(fp_dop_reg_reg_d);
12821 %}
12822 
12823 // src1 * src2 + src3
12824 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12825   predicate(UseFMA);
12826   match(Set dst (FmaF src3 (Binary src1 src2)));
12827 
12828   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12829 
12830   ins_encode %{
12831     __ fmadds(as_FloatRegister($dst$$reg),
12832              as_FloatRegister($src1$$reg),
12833              as_FloatRegister($src2$$reg),
12834              as_FloatRegister($src3$$reg));
12835   %}
12836 
12837   ins_pipe(pipe_class_default);
12838 %}
12839 
12840 // src1 * src2 + src3
12841 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12842   predicate(UseFMA);
12843   match(Set dst (FmaD src3 (Binary src1 src2)));
12844 
12845   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12846 
12847   ins_encode %{
12848     __ fmaddd(as_FloatRegister($dst$$reg),
12849              as_FloatRegister($src1$$reg),
12850              as_FloatRegister($src2$$reg),
12851              as_FloatRegister($src3$$reg));
12852   %}
12853 
12854   ins_pipe(pipe_class_default);
12855 %}
12856 
12857 // -src1 * src2 + src3
12858 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12859   predicate(UseFMA);
12860   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12861   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12862 
12863   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12864 
12865   ins_encode %{
12866     __ fmsubs(as_FloatRegister($dst$$reg),
12867               as_FloatRegister($src1$$reg),
12868               as_FloatRegister($src2$$reg),
12869               as_FloatRegister($src3$$reg));
12870   %}
12871 
12872   ins_pipe(pipe_class_default);
12873 %}
12874 
12875 // -src1 * src2 + src3
12876 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12877   predicate(UseFMA);
12878   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12879   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12880 
12881   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12882 
12883   ins_encode %{
12884     __ fmsubd(as_FloatRegister($dst$$reg),
12885               as_FloatRegister($src1$$reg),
12886               as_FloatRegister($src2$$reg),
12887               as_FloatRegister($src3$$reg));
12888   %}
12889 
12890   ins_pipe(pipe_class_default);
12891 %}
12892 
12893 // -src1 * src2 - src3
12894 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12895   predicate(UseFMA);
12896   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12897   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12898 
12899   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12900 
12901   ins_encode %{
12902     __ fnmadds(as_FloatRegister($dst$$reg),
12903                as_FloatRegister($src1$$reg),
12904                as_FloatRegister($src2$$reg),
12905                as_FloatRegister($src3$$reg));
12906   %}
12907 
12908   ins_pipe(pipe_class_default);
12909 %}
12910 
12911 // -src1 * src2 - src3
12912 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12913   predicate(UseFMA);
12914   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12915   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12916 
12917   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12918 
12919   ins_encode %{
12920     __ fnmaddd(as_FloatRegister($dst$$reg),
12921                as_FloatRegister($src1$$reg),
12922                as_FloatRegister($src2$$reg),
12923                as_FloatRegister($src3$$reg));
12924   %}
12925 
12926   ins_pipe(pipe_class_default);
12927 %}
12928 
12929 // src1 * src2 - src3
12930 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12931   predicate(UseFMA);
12932   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12933 
12934   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12935 
12936   ins_encode %{
12937     __ fnmsubs(as_FloatRegister($dst$$reg),
12938                as_FloatRegister($src1$$reg),
12939                as_FloatRegister($src2$$reg),
12940                as_FloatRegister($src3$$reg));
12941   %}
12942 
12943   ins_pipe(pipe_class_default);
12944 %}
12945 
12946 // src1 * src2 - src3
12947 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12948   predicate(UseFMA);
12949   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12950 
12951   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12952 
12953   ins_encode %{
12954   // n.b. insn name should be fnmsubd
12955     __ fnmsub(as_FloatRegister($dst$$reg),
12956               as_FloatRegister($src1$$reg),
12957               as_FloatRegister($src2$$reg),
12958               as_FloatRegister($src3$$reg));
12959   %}
12960 
12961   ins_pipe(pipe_class_default);
12962 %}
12963 
12964 
12965 // Math.max(FF)F
12966 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12967   match(Set dst (MaxF src1 src2));
12968 
12969   format %{ "fmaxs   $dst, $src1, $src2" %}
12970   ins_encode %{
12971     __ fmaxs(as_FloatRegister($dst$$reg),
12972              as_FloatRegister($src1$$reg),
12973              as_FloatRegister($src2$$reg));
12974   %}
12975 
12976   ins_pipe(fp_dop_reg_reg_s);
12977 %}
12978 
12979 // Math.min(FF)F
12980 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12981   match(Set dst (MinF src1 src2));
12982 
12983   format %{ "fmins   $dst, $src1, $src2" %}
12984   ins_encode %{
12985     __ fmins(as_FloatRegister($dst$$reg),
12986              as_FloatRegister($src1$$reg),
12987              as_FloatRegister($src2$$reg));
12988   %}
12989 
12990   ins_pipe(fp_dop_reg_reg_s);
12991 %}
12992 
12993 // Math.max(DD)D
12994 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12995   match(Set dst (MaxD src1 src2));
12996 
12997   format %{ "fmaxd   $dst, $src1, $src2" %}
12998   ins_encode %{
12999     __ fmaxd(as_FloatRegister($dst$$reg),
13000              as_FloatRegister($src1$$reg),
13001              as_FloatRegister($src2$$reg));
13002   %}
13003 
13004   ins_pipe(fp_dop_reg_reg_d);
13005 %}
13006 
13007 // Math.min(DD)D
13008 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13009   match(Set dst (MinD src1 src2));
13010 
13011   format %{ "fmind   $dst, $src1, $src2" %}
13012   ins_encode %{
13013     __ fmind(as_FloatRegister($dst$$reg),
13014              as_FloatRegister($src1$$reg),
13015              as_FloatRegister($src2$$reg));
13016   %}
13017 
13018   ins_pipe(fp_dop_reg_reg_d);
13019 %}
13020 
13021 
13022 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13023   match(Set dst (DivF src1  src2));
13024 
13025   ins_cost(INSN_COST * 18);
13026   format %{ "fdivs   $dst, $src1, $src2" %}
13027 
13028   ins_encode %{
13029     __ fdivs(as_FloatRegister($dst$$reg),
13030              as_FloatRegister($src1$$reg),
13031              as_FloatRegister($src2$$reg));
13032   %}
13033 
13034   ins_pipe(fp_div_s);
13035 %}
13036 
13037 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13038   match(Set dst (DivD src1  src2));
13039 
13040   ins_cost(INSN_COST * 32);
13041   format %{ "fdivd   $dst, $src1, $src2" %}
13042 
13043   ins_encode %{
13044     __ fdivd(as_FloatRegister($dst$$reg),
13045              as_FloatRegister($src1$$reg),
13046              as_FloatRegister($src2$$reg));
13047   %}
13048 
13049   ins_pipe(fp_div_d);
13050 %}
13051 
13052 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13053   match(Set dst (NegF src));
13054 
13055   ins_cost(INSN_COST * 3);
13056   format %{ "fneg   $dst, $src" %}
13057 
13058   ins_encode %{
13059     __ fnegs(as_FloatRegister($dst$$reg),
13060              as_FloatRegister($src$$reg));
13061   %}
13062 
13063   ins_pipe(fp_uop_s);
13064 %}
13065 
13066 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13067   match(Set dst (NegD src));
13068 
13069   ins_cost(INSN_COST * 3);
13070   format %{ "fnegd   $dst, $src" %}
13071 
13072   ins_encode %{
13073     __ fnegd(as_FloatRegister($dst$$reg),
13074              as_FloatRegister($src$$reg));
13075   %}
13076 
13077   ins_pipe(fp_uop_d);
13078 %}
13079 
13080 instruct absF_reg(vRegF dst, vRegF src) %{
13081   match(Set dst (AbsF src));
13082 
13083   ins_cost(INSN_COST * 3);
13084   format %{ "fabss   $dst, $src" %}
13085   ins_encode %{
13086     __ fabss(as_FloatRegister($dst$$reg),
13087              as_FloatRegister($src$$reg));
13088   %}
13089 
13090   ins_pipe(fp_uop_s);
13091 %}
13092 
13093 instruct absD_reg(vRegD dst, vRegD src) %{
13094   match(Set dst (AbsD src));
13095 
13096   ins_cost(INSN_COST * 3);
13097   format %{ "fabsd   $dst, $src" %}
13098   ins_encode %{
13099     __ fabsd(as_FloatRegister($dst$$reg),
13100              as_FloatRegister($src$$reg));
13101   %}
13102 
13103   ins_pipe(fp_uop_d);
13104 %}
13105 
13106 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13107   match(Set dst (SqrtD src));
13108 
13109   ins_cost(INSN_COST * 50);
13110   format %{ "fsqrtd  $dst, $src" %}
13111   ins_encode %{
13112     __ fsqrtd(as_FloatRegister($dst$$reg),
13113              as_FloatRegister($src$$reg));
13114   %}
13115 
13116   ins_pipe(fp_div_s);
13117 %}
13118 
13119 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13120   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13121 
13122   ins_cost(INSN_COST * 50);
13123   format %{ "fsqrts  $dst, $src" %}
13124   ins_encode %{
13125     __ fsqrts(as_FloatRegister($dst$$reg),
13126              as_FloatRegister($src$$reg));
13127   %}
13128 
13129   ins_pipe(fp_div_d);
13130 %}
13131 
13132 // ============================================================================
13133 // Logical Instructions
13134 
13135 // Integer Logical Instructions
13136 
13137 // And Instructions
13138 
13139 
13140 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13141   match(Set dst (AndI src1 src2));
13142 
13143   format %{ "andw  $dst, $src1, $src2\t# int" %}
13144 
13145   ins_cost(INSN_COST);
13146   ins_encode %{
13147     __ andw(as_Register($dst$$reg),
13148             as_Register($src1$$reg),
13149             as_Register($src2$$reg));
13150   %}
13151 
13152   ins_pipe(ialu_reg_reg);
13153 %}
13154 
13155 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13156   match(Set dst (AndI src1 src2));
13157 
13158   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13159 
13160   ins_cost(INSN_COST);
13161   ins_encode %{
13162     __ andw(as_Register($dst$$reg),
13163             as_Register($src1$$reg),
13164             (unsigned long)($src2$$constant));
13165   %}
13166 
13167   ins_pipe(ialu_reg_imm);
13168 %}
13169 
13170 // Or Instructions
13171 
13172 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13173   match(Set dst (OrI src1 src2));
13174 
13175   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13176 
13177   ins_cost(INSN_COST);
13178   ins_encode %{
13179     __ orrw(as_Register($dst$$reg),
13180             as_Register($src1$$reg),
13181             as_Register($src2$$reg));
13182   %}
13183 
13184   ins_pipe(ialu_reg_reg);
13185 %}
13186 
13187 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13188   match(Set dst (OrI src1 src2));
13189 
13190   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13191 
13192   ins_cost(INSN_COST);
13193   ins_encode %{
13194     __ orrw(as_Register($dst$$reg),
13195             as_Register($src1$$reg),
13196             (unsigned long)($src2$$constant));
13197   %}
13198 
13199   ins_pipe(ialu_reg_imm);
13200 %}
13201 
13202 // Xor Instructions
13203 
13204 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13205   match(Set dst (XorI src1 src2));
13206 
13207   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13208 
13209   ins_cost(INSN_COST);
13210   ins_encode %{
13211     __ eorw(as_Register($dst$$reg),
13212             as_Register($src1$$reg),
13213             as_Register($src2$$reg));
13214   %}
13215 
13216   ins_pipe(ialu_reg_reg);
13217 %}
13218 
13219 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13220   match(Set dst (XorI src1 src2));
13221 
13222   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13223 
13224   ins_cost(INSN_COST);
13225   ins_encode %{
13226     __ eorw(as_Register($dst$$reg),
13227             as_Register($src1$$reg),
13228             (unsigned long)($src2$$constant));
13229   %}
13230 
13231   ins_pipe(ialu_reg_imm);
13232 %}
13233 
13234 // Long Logical Instructions
13235 // TODO
13236 
13237 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13238   match(Set dst (AndL src1 src2));
13239 
13240   format %{ "and  $dst, $src1, $src2\t# int" %}
13241 
13242   ins_cost(INSN_COST);
13243   ins_encode %{
13244     __ andr(as_Register($dst$$reg),
13245             as_Register($src1$$reg),
13246             as_Register($src2$$reg));
13247   %}
13248 
13249   ins_pipe(ialu_reg_reg);
13250 %}
13251 
13252 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13253   match(Set dst (AndL src1 src2));
13254 
13255   format %{ "and  $dst, $src1, $src2\t# int" %}
13256 
13257   ins_cost(INSN_COST);
13258   ins_encode %{
13259     __ andr(as_Register($dst$$reg),
13260             as_Register($src1$$reg),
13261             (unsigned long)($src2$$constant));
13262   %}
13263 
13264   ins_pipe(ialu_reg_imm);
13265 %}
13266 
13267 // Or Instructions
13268 
13269 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13270   match(Set dst (OrL src1 src2));
13271 
13272   format %{ "orr  $dst, $src1, $src2\t# int" %}
13273 
13274   ins_cost(INSN_COST);
13275   ins_encode %{
13276     __ orr(as_Register($dst$$reg),
13277            as_Register($src1$$reg),
13278            as_Register($src2$$reg));
13279   %}
13280 
13281   ins_pipe(ialu_reg_reg);
13282 %}
13283 
13284 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13285   match(Set dst (OrL src1 src2));
13286 
13287   format %{ "orr  $dst, $src1, $src2\t# int" %}
13288 
13289   ins_cost(INSN_COST);
13290   ins_encode %{
13291     __ orr(as_Register($dst$$reg),
13292            as_Register($src1$$reg),
13293            (unsigned long)($src2$$constant));
13294   %}
13295 
13296   ins_pipe(ialu_reg_imm);
13297 %}
13298 
13299 // Xor Instructions
13300 
13301 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13302   match(Set dst (XorL src1 src2));
13303 
13304   format %{ "eor  $dst, $src1, $src2\t# int" %}
13305 
13306   ins_cost(INSN_COST);
13307   ins_encode %{
13308     __ eor(as_Register($dst$$reg),
13309            as_Register($src1$$reg),
13310            as_Register($src2$$reg));
13311   %}
13312 
13313   ins_pipe(ialu_reg_reg);
13314 %}
13315 
13316 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13317   match(Set dst (XorL src1 src2));
13318 
13319   ins_cost(INSN_COST);
13320   format %{ "eor  $dst, $src1, $src2\t# int" %}
13321 
13322   ins_encode %{
13323     __ eor(as_Register($dst$$reg),
13324            as_Register($src1$$reg),
13325            (unsigned long)($src2$$constant));
13326   %}
13327 
13328   ins_pipe(ialu_reg_imm);
13329 %}
13330 
13331 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13332 %{
13333   match(Set dst (ConvI2L src));
13334 
13335   ins_cost(INSN_COST);
13336   format %{ "sxtw  $dst, $src\t# i2l" %}
13337   ins_encode %{
13338     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13339   %}
13340   ins_pipe(ialu_reg_shift);
13341 %}
13342 
13343 // this pattern occurs in bigmath arithmetic
13344 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13345 %{
13346   match(Set dst (AndL (ConvI2L src) mask));
13347 
13348   ins_cost(INSN_COST);
13349   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13350   ins_encode %{
13351     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13352   %}
13353 
13354   ins_pipe(ialu_reg_shift);
13355 %}
13356 
13357 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13358   match(Set dst (ConvL2I src));
13359 
13360   ins_cost(INSN_COST);
13361   format %{ "movw  $dst, $src \t// l2i" %}
13362 
13363   ins_encode %{
13364     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13365   %}
13366 
13367   ins_pipe(ialu_reg);
13368 %}
13369 
13370 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13371 %{
13372   match(Set dst (Conv2B src));
13373   effect(KILL cr);
13374 
13375   format %{
13376     "cmpw $src, zr\n\t"
13377     "cset $dst, ne"
13378   %}
13379 
13380   ins_encode %{
13381     __ cmpw(as_Register($src$$reg), zr);
13382     __ cset(as_Register($dst$$reg), Assembler::NE);
13383   %}
13384 
13385   ins_pipe(ialu_reg);
13386 %}
13387 
13388 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13389 %{
13390   match(Set dst (Conv2B src));
13391   effect(KILL cr);
13392 
13393   format %{
13394     "cmp  $src, zr\n\t"
13395     "cset $dst, ne"
13396   %}
13397 
13398   ins_encode %{
13399     __ cmp(as_Register($src$$reg), zr);
13400     __ cset(as_Register($dst$$reg), Assembler::NE);
13401   %}
13402 
13403   ins_pipe(ialu_reg);
13404 %}
13405 
13406 instruct convD2F_reg(vRegF dst, vRegD src) %{
13407   match(Set dst (ConvD2F src));
13408 
13409   ins_cost(INSN_COST * 5);
13410   format %{ "fcvtd  $dst, $src \t// d2f" %}
13411 
13412   ins_encode %{
13413     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13414   %}
13415 
13416   ins_pipe(fp_d2f);
13417 %}
13418 
13419 instruct convF2D_reg(vRegD dst, vRegF src) %{
13420   match(Set dst (ConvF2D src));
13421 
13422   ins_cost(INSN_COST * 5);
13423   format %{ "fcvts  $dst, $src \t// f2d" %}
13424 
13425   ins_encode %{
13426     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13427   %}
13428 
13429   ins_pipe(fp_f2d);
13430 %}
13431 
13432 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13433   match(Set dst (ConvF2I src));
13434 
13435   ins_cost(INSN_COST * 5);
13436   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13437 
13438   ins_encode %{
13439     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13440   %}
13441 
13442   ins_pipe(fp_f2i);
13443 %}
13444 
13445 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13446   match(Set dst (ConvF2L src));
13447 
13448   ins_cost(INSN_COST * 5);
13449   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13450 
13451   ins_encode %{
13452     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13453   %}
13454 
13455   ins_pipe(fp_f2l);
13456 %}
13457 
13458 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13459   match(Set dst (ConvI2F src));
13460 
13461   ins_cost(INSN_COST * 5);
13462   format %{ "scvtfws  $dst, $src \t// i2f" %}
13463 
13464   ins_encode %{
13465     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13466   %}
13467 
13468   ins_pipe(fp_i2f);
13469 %}
13470 
13471 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13472   match(Set dst (ConvL2F src));
13473 
13474   ins_cost(INSN_COST * 5);
13475   format %{ "scvtfs  $dst, $src \t// l2f" %}
13476 
13477   ins_encode %{
13478     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13479   %}
13480 
13481   ins_pipe(fp_l2f);
13482 %}
13483 
13484 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13485   match(Set dst (ConvD2I src));
13486 
13487   ins_cost(INSN_COST * 5);
13488   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13489 
13490   ins_encode %{
13491     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13492   %}
13493 
13494   ins_pipe(fp_d2i);
13495 %}
13496 
13497 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13498   match(Set dst (ConvD2L src));
13499 
13500   ins_cost(INSN_COST * 5);
13501   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13502 
13503   ins_encode %{
13504     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13505   %}
13506 
13507   ins_pipe(fp_d2l);
13508 %}
13509 
13510 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13511   match(Set dst (ConvI2D src));
13512 
13513   ins_cost(INSN_COST * 5);
13514   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13515 
13516   ins_encode %{
13517     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13518   %}
13519 
13520   ins_pipe(fp_i2d);
13521 %}
13522 
13523 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13524   match(Set dst (ConvL2D src));
13525 
13526   ins_cost(INSN_COST * 5);
13527   format %{ "scvtfd  $dst, $src \t// l2d" %}
13528 
13529   ins_encode %{
13530     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13531   %}
13532 
13533   ins_pipe(fp_l2d);
13534 %}
13535 
13536 // stack <-> reg and reg <-> reg shuffles with no conversion
13537 
13538 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13539 
13540   match(Set dst (MoveF2I src));
13541 
13542   effect(DEF dst, USE src);
13543 
13544   ins_cost(4 * INSN_COST);
13545 
13546   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13547 
13548   ins_encode %{
13549     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13550   %}
13551 
13552   ins_pipe(iload_reg_reg);
13553 
13554 %}
13555 
13556 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13557 
13558   match(Set dst (MoveI2F src));
13559 
13560   effect(DEF dst, USE src);
13561 
13562   ins_cost(4 * INSN_COST);
13563 
13564   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13565 
13566   ins_encode %{
13567     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13568   %}
13569 
13570   ins_pipe(pipe_class_memory);
13571 
13572 %}
13573 
13574 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13575 
13576   match(Set dst (MoveD2L src));
13577 
13578   effect(DEF dst, USE src);
13579 
13580   ins_cost(4 * INSN_COST);
13581 
13582   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13583 
13584   ins_encode %{
13585     __ ldr($dst$$Register, Address(sp, $src$$disp));
13586   %}
13587 
13588   ins_pipe(iload_reg_reg);
13589 
13590 %}
13591 
13592 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13593 
13594   match(Set dst (MoveL2D src));
13595 
13596   effect(DEF dst, USE src);
13597 
13598   ins_cost(4 * INSN_COST);
13599 
13600   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13601 
13602   ins_encode %{
13603     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13604   %}
13605 
13606   ins_pipe(pipe_class_memory);
13607 
13608 %}
13609 
13610 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13611 
13612   match(Set dst (MoveF2I src));
13613 
13614   effect(DEF dst, USE src);
13615 
13616   ins_cost(INSN_COST);
13617 
13618   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13619 
13620   ins_encode %{
13621     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13622   %}
13623 
13624   ins_pipe(pipe_class_memory);
13625 
13626 %}
13627 
13628 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13629 
13630   match(Set dst (MoveI2F src));
13631 
13632   effect(DEF dst, USE src);
13633 
13634   ins_cost(INSN_COST);
13635 
13636   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13637 
13638   ins_encode %{
13639     __ strw($src$$Register, Address(sp, $dst$$disp));
13640   %}
13641 
13642   ins_pipe(istore_reg_reg);
13643 
13644 %}
13645 
13646 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13647 
13648   match(Set dst (MoveD2L src));
13649 
13650   effect(DEF dst, USE src);
13651 
13652   ins_cost(INSN_COST);
13653 
13654   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13655 
13656   ins_encode %{
13657     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13658   %}
13659 
13660   ins_pipe(pipe_class_memory);
13661 
13662 %}
13663 
13664 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13665 
13666   match(Set dst (MoveL2D src));
13667 
13668   effect(DEF dst, USE src);
13669 
13670   ins_cost(INSN_COST);
13671 
13672   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13673 
13674   ins_encode %{
13675     __ str($src$$Register, Address(sp, $dst$$disp));
13676   %}
13677 
13678   ins_pipe(istore_reg_reg);
13679 
13680 %}
13681 
13682 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13683 
13684   match(Set dst (MoveF2I src));
13685 
13686   effect(DEF dst, USE src);
13687 
13688   ins_cost(INSN_COST);
13689 
13690   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13691 
13692   ins_encode %{
13693     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13694   %}
13695 
13696   ins_pipe(fp_f2i);
13697 
13698 %}
13699 
13700 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13701 
13702   match(Set dst (MoveI2F src));
13703 
13704   effect(DEF dst, USE src);
13705 
13706   ins_cost(INSN_COST);
13707 
13708   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13709 
13710   ins_encode %{
13711     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13712   %}
13713 
13714   ins_pipe(fp_i2f);
13715 
13716 %}
13717 
13718 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13719 
13720   match(Set dst (MoveD2L src));
13721 
13722   effect(DEF dst, USE src);
13723 
13724   ins_cost(INSN_COST);
13725 
13726   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13727 
13728   ins_encode %{
13729     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13730   %}
13731 
13732   ins_pipe(fp_d2l);
13733 
13734 %}
13735 
13736 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13737 
13738   match(Set dst (MoveL2D src));
13739 
13740   effect(DEF dst, USE src);
13741 
13742   ins_cost(INSN_COST);
13743 
13744   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13745 
13746   ins_encode %{
13747     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13748   %}
13749 
13750   ins_pipe(fp_l2d);
13751 
13752 %}
13753 
13754 // ============================================================================
13755 // clearing of an array
13756 
13757 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13758 %{
13759   match(Set dummy (ClearArray cnt base));
13760   effect(USE_KILL cnt, USE_KILL base);
13761 
13762   ins_cost(4 * INSN_COST);
13763   format %{ "ClearArray $cnt, $base" %}
13764 
13765   ins_encode %{
13766     __ zero_words($base$$Register, $cnt$$Register);
13767   %}
13768 
13769   ins_pipe(pipe_class_memory);
13770 %}
13771 
13772 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13773 %{
13774   predicate((u_int64_t)n->in(2)->get_long()
13775             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13776   match(Set dummy (ClearArray cnt base));
13777   effect(USE_KILL base);
13778 
13779   ins_cost(4 * INSN_COST);
13780   format %{ "ClearArray $cnt, $base" %}
13781 
13782   ins_encode %{
13783     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13784   %}
13785 
13786   ins_pipe(pipe_class_memory);
13787 %}
13788 
13789 // ============================================================================
13790 // Overflow Math Instructions
13791 
13792 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13793 %{
13794   match(Set cr (OverflowAddI op1 op2));
13795 
13796   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13797   ins_cost(INSN_COST);
13798   ins_encode %{
13799     __ cmnw($op1$$Register, $op2$$Register);
13800   %}
13801 
13802   ins_pipe(icmp_reg_reg);
13803 %}
13804 
13805 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13806 %{
13807   match(Set cr (OverflowAddI op1 op2));
13808 
13809   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13810   ins_cost(INSN_COST);
13811   ins_encode %{
13812     __ cmnw($op1$$Register, $op2$$constant);
13813   %}
13814 
13815   ins_pipe(icmp_reg_imm);
13816 %}
13817 
13818 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13819 %{
13820   match(Set cr (OverflowAddL op1 op2));
13821 
13822   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13823   ins_cost(INSN_COST);
13824   ins_encode %{
13825     __ cmn($op1$$Register, $op2$$Register);
13826   %}
13827 
13828   ins_pipe(icmp_reg_reg);
13829 %}
13830 
13831 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13832 %{
13833   match(Set cr (OverflowAddL op1 op2));
13834 
13835   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13836   ins_cost(INSN_COST);
13837   ins_encode %{
13838     __ cmn($op1$$Register, $op2$$constant);
13839   %}
13840 
13841   ins_pipe(icmp_reg_imm);
13842 %}
13843 
13844 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13845 %{
13846   match(Set cr (OverflowSubI op1 op2));
13847 
13848   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13849   ins_cost(INSN_COST);
13850   ins_encode %{
13851     __ cmpw($op1$$Register, $op2$$Register);
13852   %}
13853 
13854   ins_pipe(icmp_reg_reg);
13855 %}
13856 
13857 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13858 %{
13859   match(Set cr (OverflowSubI op1 op2));
13860 
13861   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13862   ins_cost(INSN_COST);
13863   ins_encode %{
13864     __ cmpw($op1$$Register, $op2$$constant);
13865   %}
13866 
13867   ins_pipe(icmp_reg_imm);
13868 %}
13869 
13870 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13871 %{
13872   match(Set cr (OverflowSubL op1 op2));
13873 
13874   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13875   ins_cost(INSN_COST);
13876   ins_encode %{
13877     __ cmp($op1$$Register, $op2$$Register);
13878   %}
13879 
13880   ins_pipe(icmp_reg_reg);
13881 %}
13882 
13883 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13884 %{
13885   match(Set cr (OverflowSubL op1 op2));
13886 
13887   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13888   ins_cost(INSN_COST);
13889   ins_encode %{
13890     __ subs(zr, $op1$$Register, $op2$$constant);
13891   %}
13892 
13893   ins_pipe(icmp_reg_imm);
13894 %}
13895 
13896 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13897 %{
13898   match(Set cr (OverflowSubI zero op1));
13899 
13900   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13901   ins_cost(INSN_COST);
13902   ins_encode %{
13903     __ cmpw(zr, $op1$$Register);
13904   %}
13905 
13906   ins_pipe(icmp_reg_imm);
13907 %}
13908 
13909 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13910 %{
13911   match(Set cr (OverflowSubL zero op1));
13912 
13913   format %{ "cmp   zr, $op1\t# overflow check long" %}
13914   ins_cost(INSN_COST);
13915   ins_encode %{
13916     __ cmp(zr, $op1$$Register);
13917   %}
13918 
13919   ins_pipe(icmp_reg_imm);
13920 %}
13921 
13922 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13923 %{
13924   match(Set cr (OverflowMulI op1 op2));
13925 
13926   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13927             "cmp   rscratch1, rscratch1, sxtw\n\t"
13928             "movw  rscratch1, #0x80000000\n\t"
13929             "cselw rscratch1, rscratch1, zr, NE\n\t"
13930             "cmpw  rscratch1, #1" %}
13931   ins_cost(5 * INSN_COST);
13932   ins_encode %{
13933     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13934     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13935     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13936     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13937     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13938   %}
13939 
13940   ins_pipe(pipe_slow);
13941 %}
13942 
13943 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13944 %{
13945   match(If cmp (OverflowMulI op1 op2));
13946   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13947             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13948   effect(USE labl, KILL cr);
13949 
13950   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13951             "cmp   rscratch1, rscratch1, sxtw\n\t"
13952             "b$cmp   $labl" %}
13953   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13954   ins_encode %{
13955     Label* L = $labl$$label;
13956     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13957     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13958     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13959     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13960   %}
13961 
13962   ins_pipe(pipe_serial);
13963 %}
13964 
13965 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13966 %{
13967   match(Set cr (OverflowMulL op1 op2));
13968 
13969   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13970             "smulh rscratch2, $op1, $op2\n\t"
13971             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13972             "movw  rscratch1, #0x80000000\n\t"
13973             "cselw rscratch1, rscratch1, zr, NE\n\t"
13974             "cmpw  rscratch1, #1" %}
13975   ins_cost(6 * INSN_COST);
13976   ins_encode %{
13977     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13978     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13979     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13980     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13981     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13982     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13983   %}
13984 
13985   ins_pipe(pipe_slow);
13986 %}
13987 
13988 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13989 %{
13990   match(If cmp (OverflowMulL op1 op2));
13991   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13992             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13993   effect(USE labl, KILL cr);
13994 
13995   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13996             "smulh rscratch2, $op1, $op2\n\t"
13997             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13998             "b$cmp $labl" %}
13999   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14000   ins_encode %{
14001     Label* L = $labl$$label;
14002     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14003     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14004     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14005     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14006     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14007   %}
14008 
14009   ins_pipe(pipe_serial);
14010 %}
14011 
14012 // ============================================================================
14013 // Compare Instructions
14014 
14015 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14016 %{
14017   match(Set cr (CmpI op1 op2));
14018 
14019   effect(DEF cr, USE op1, USE op2);
14020 
14021   ins_cost(INSN_COST);
14022   format %{ "cmpw  $op1, $op2" %}
14023 
14024   ins_encode(aarch64_enc_cmpw(op1, op2));
14025 
14026   ins_pipe(icmp_reg_reg);
14027 %}
14028 
14029 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14030 %{
14031   match(Set cr (CmpI op1 zero));
14032 
14033   effect(DEF cr, USE op1);
14034 
14035   ins_cost(INSN_COST);
14036   format %{ "cmpw $op1, 0" %}
14037 
14038   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14039 
14040   ins_pipe(icmp_reg_imm);
14041 %}
14042 
14043 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14044 %{
14045   match(Set cr (CmpI op1 op2));
14046 
14047   effect(DEF cr, USE op1);
14048 
14049   ins_cost(INSN_COST);
14050   format %{ "cmpw  $op1, $op2" %}
14051 
14052   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14053 
14054   ins_pipe(icmp_reg_imm);
14055 %}
14056 
14057 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14058 %{
14059   match(Set cr (CmpI op1 op2));
14060 
14061   effect(DEF cr, USE op1);
14062 
14063   ins_cost(INSN_COST * 2);
14064   format %{ "cmpw  $op1, $op2" %}
14065 
14066   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14067 
14068   ins_pipe(icmp_reg_imm);
14069 %}
14070 
14071 // Unsigned compare Instructions; really, same as signed compare
14072 // except it should only be used to feed an If or a CMovI which takes a
14073 // cmpOpU.
14074 
14075 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14076 %{
14077   match(Set cr (CmpU op1 op2));
14078 
14079   effect(DEF cr, USE op1, USE op2);
14080 
14081   ins_cost(INSN_COST);
14082   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14083 
14084   ins_encode(aarch64_enc_cmpw(op1, op2));
14085 
14086   ins_pipe(icmp_reg_reg);
14087 %}
14088 
14089 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14090 %{
14091   match(Set cr (CmpU op1 zero));
14092 
14093   effect(DEF cr, USE op1);
14094 
14095   ins_cost(INSN_COST);
14096   format %{ "cmpw $op1, #0\t# unsigned" %}
14097 
14098   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14099 
14100   ins_pipe(icmp_reg_imm);
14101 %}
14102 
14103 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14104 %{
14105   match(Set cr (CmpU op1 op2));
14106 
14107   effect(DEF cr, USE op1);
14108 
14109   ins_cost(INSN_COST);
14110   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14111 
14112   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14113 
14114   ins_pipe(icmp_reg_imm);
14115 %}
14116 
14117 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14118 %{
14119   match(Set cr (CmpU op1 op2));
14120 
14121   effect(DEF cr, USE op1);
14122 
14123   ins_cost(INSN_COST * 2);
14124   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14125 
14126   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14127 
14128   ins_pipe(icmp_reg_imm);
14129 %}
14130 
14131 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14132 %{
14133   match(Set cr (CmpL op1 op2));
14134 
14135   effect(DEF cr, USE op1, USE op2);
14136 
14137   ins_cost(INSN_COST);
14138   format %{ "cmp  $op1, $op2" %}
14139 
14140   ins_encode(aarch64_enc_cmp(op1, op2));
14141 
14142   ins_pipe(icmp_reg_reg);
14143 %}
14144 
14145 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14146 %{
14147   match(Set cr (CmpL op1 zero));
14148 
14149   effect(DEF cr, USE op1);
14150 
14151   ins_cost(INSN_COST);
14152   format %{ "tst  $op1" %}
14153 
14154   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14155 
14156   ins_pipe(icmp_reg_imm);
14157 %}
14158 
14159 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14160 %{
14161   match(Set cr (CmpL op1 op2));
14162 
14163   effect(DEF cr, USE op1);
14164 
14165   ins_cost(INSN_COST);
14166   format %{ "cmp  $op1, $op2" %}
14167 
14168   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14169 
14170   ins_pipe(icmp_reg_imm);
14171 %}
14172 
14173 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14174 %{
14175   match(Set cr (CmpL op1 op2));
14176 
14177   effect(DEF cr, USE op1);
14178 
14179   ins_cost(INSN_COST * 2);
14180   format %{ "cmp  $op1, $op2" %}
14181 
14182   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14183 
14184   ins_pipe(icmp_reg_imm);
14185 %}
14186 
14187 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14188 %{
14189   match(Set cr (CmpUL op1 op2));
14190 
14191   effect(DEF cr, USE op1, USE op2);
14192 
14193   ins_cost(INSN_COST);
14194   format %{ "cmp  $op1, $op2" %}
14195 
14196   ins_encode(aarch64_enc_cmp(op1, op2));
14197 
14198   ins_pipe(icmp_reg_reg);
14199 %}
14200 
14201 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14202 %{
14203   match(Set cr (CmpUL op1 zero));
14204 
14205   effect(DEF cr, USE op1);
14206 
14207   ins_cost(INSN_COST);
14208   format %{ "tst  $op1" %}
14209 
14210   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14211 
14212   ins_pipe(icmp_reg_imm);
14213 %}
14214 
14215 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14216 %{
14217   match(Set cr (CmpUL op1 op2));
14218 
14219   effect(DEF cr, USE op1);
14220 
14221   ins_cost(INSN_COST);
14222   format %{ "cmp  $op1, $op2" %}
14223 
14224   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14225 
14226   ins_pipe(icmp_reg_imm);
14227 %}
14228 
14229 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14230 %{
14231   match(Set cr (CmpUL op1 op2));
14232 
14233   effect(DEF cr, USE op1);
14234 
14235   ins_cost(INSN_COST * 2);
14236   format %{ "cmp  $op1, $op2" %}
14237 
14238   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14239 
14240   ins_pipe(icmp_reg_imm);
14241 %}
14242 
14243 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14244 %{
14245   match(Set cr (CmpP op1 op2));
14246 
14247   effect(DEF cr, USE op1, USE op2);
14248 
14249   ins_cost(INSN_COST);
14250   format %{ "cmp  $op1, $op2\t // ptr" %}
14251 
14252   ins_encode(aarch64_enc_cmpp(op1, op2));
14253 
14254   ins_pipe(icmp_reg_reg);
14255 %}
14256 
14257 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14258 %{
14259   match(Set cr (CmpN op1 op2));
14260 
14261   effect(DEF cr, USE op1, USE op2);
14262 
14263   ins_cost(INSN_COST);
14264   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14265 
14266   ins_encode(aarch64_enc_cmpn(op1, op2));
14267 
14268   ins_pipe(icmp_reg_reg);
14269 %}
14270 
14271 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14272 %{
14273   match(Set cr (CmpP op1 zero));
14274 
14275   effect(DEF cr, USE op1, USE zero);
14276 
14277   ins_cost(INSN_COST);
14278   format %{ "cmp  $op1, 0\t // ptr" %}
14279 
14280   ins_encode(aarch64_enc_testp(op1));
14281 
14282   ins_pipe(icmp_reg_imm);
14283 %}
14284 
14285 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14286 %{
14287   match(Set cr (CmpN op1 zero));
14288 
14289   effect(DEF cr, USE op1, USE zero);
14290 
14291   ins_cost(INSN_COST);
14292   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14293 
14294   ins_encode(aarch64_enc_testn(op1));
14295 
14296   ins_pipe(icmp_reg_imm);
14297 %}
14298 
14299 // FP comparisons
14300 //
14301 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14302 // using normal cmpOp. See declaration of rFlagsReg for details.
14303 
14304 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14305 %{
14306   match(Set cr (CmpF src1 src2));
14307 
14308   ins_cost(3 * INSN_COST);
14309   format %{ "fcmps $src1, $src2" %}
14310 
14311   ins_encode %{
14312     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14313   %}
14314 
14315   ins_pipe(pipe_class_compare);
14316 %}
14317 
14318 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14319 %{
14320   match(Set cr (CmpF src1 src2));
14321 
14322   ins_cost(3 * INSN_COST);
14323   format %{ "fcmps $src1, 0.0" %}
14324 
14325   ins_encode %{
14326     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
14327   %}
14328 
14329   ins_pipe(pipe_class_compare);
14330 %}
14331 // FROM HERE
14332 
14333 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14334 %{
14335   match(Set cr (CmpD src1 src2));
14336 
14337   ins_cost(3 * INSN_COST);
14338   format %{ "fcmpd $src1, $src2" %}
14339 
14340   ins_encode %{
14341     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14342   %}
14343 
14344   ins_pipe(pipe_class_compare);
14345 %}
14346 
14347 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14348 %{
14349   match(Set cr (CmpD src1 src2));
14350 
14351   ins_cost(3 * INSN_COST);
14352   format %{ "fcmpd $src1, 0.0" %}
14353 
14354   ins_encode %{
14355     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
14356   %}
14357 
14358   ins_pipe(pipe_class_compare);
14359 %}
14360 
14361 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14362 %{
14363   match(Set dst (CmpF3 src1 src2));
14364   effect(KILL cr);
14365 
14366   ins_cost(5 * INSN_COST);
14367   format %{ "fcmps $src1, $src2\n\t"
14368             "csinvw($dst, zr, zr, eq\n\t"
14369             "csnegw($dst, $dst, $dst, lt)"
14370   %}
14371 
14372   ins_encode %{
14373     Label done;
14374     FloatRegister s1 = as_FloatRegister($src1$$reg);
14375     FloatRegister s2 = as_FloatRegister($src2$$reg);
14376     Register d = as_Register($dst$$reg);
14377     __ fcmps(s1, s2);
14378     // installs 0 if EQ else -1
14379     __ csinvw(d, zr, zr, Assembler::EQ);
14380     // keeps -1 if less or unordered else installs 1
14381     __ csnegw(d, d, d, Assembler::LT);
14382     __ bind(done);
14383   %}
14384 
14385   ins_pipe(pipe_class_default);
14386 
14387 %}
14388 
14389 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14390 %{
14391   match(Set dst (CmpD3 src1 src2));
14392   effect(KILL cr);
14393 
14394   ins_cost(5 * INSN_COST);
14395   format %{ "fcmpd $src1, $src2\n\t"
14396             "csinvw($dst, zr, zr, eq\n\t"
14397             "csnegw($dst, $dst, $dst, lt)"
14398   %}
14399 
14400   ins_encode %{
14401     Label done;
14402     FloatRegister s1 = as_FloatRegister($src1$$reg);
14403     FloatRegister s2 = as_FloatRegister($src2$$reg);
14404     Register d = as_Register($dst$$reg);
14405     __ fcmpd(s1, s2);
14406     // installs 0 if EQ else -1
14407     __ csinvw(d, zr, zr, Assembler::EQ);
14408     // keeps -1 if less or unordered else installs 1
14409     __ csnegw(d, d, d, Assembler::LT);
14410     __ bind(done);
14411   %}
14412   ins_pipe(pipe_class_default);
14413 
14414 %}
14415 
14416 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14417 %{
14418   match(Set dst (CmpF3 src1 zero));
14419   effect(KILL cr);
14420 
14421   ins_cost(5 * INSN_COST);
14422   format %{ "fcmps $src1, 0.0\n\t"
14423             "csinvw($dst, zr, zr, eq\n\t"
14424             "csnegw($dst, $dst, $dst, lt)"
14425   %}
14426 
14427   ins_encode %{
14428     Label done;
14429     FloatRegister s1 = as_FloatRegister($src1$$reg);
14430     Register d = as_Register($dst$$reg);
14431     __ fcmps(s1, 0.0);
14432     // installs 0 if EQ else -1
14433     __ csinvw(d, zr, zr, Assembler::EQ);
14434     // keeps -1 if less or unordered else installs 1
14435     __ csnegw(d, d, d, Assembler::LT);
14436     __ bind(done);
14437   %}
14438 
14439   ins_pipe(pipe_class_default);
14440 
14441 %}
14442 
14443 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14444 %{
14445   match(Set dst (CmpD3 src1 zero));
14446   effect(KILL cr);
14447 
14448   ins_cost(5 * INSN_COST);
14449   format %{ "fcmpd $src1, 0.0\n\t"
14450             "csinvw($dst, zr, zr, eq\n\t"
14451             "csnegw($dst, $dst, $dst, lt)"
14452   %}
14453 
14454   ins_encode %{
14455     Label done;
14456     FloatRegister s1 = as_FloatRegister($src1$$reg);
14457     Register d = as_Register($dst$$reg);
14458     __ fcmpd(s1, 0.0);
14459     // installs 0 if EQ else -1
14460     __ csinvw(d, zr, zr, Assembler::EQ);
14461     // keeps -1 if less or unordered else installs 1
14462     __ csnegw(d, d, d, Assembler::LT);
14463     __ bind(done);
14464   %}
14465   ins_pipe(pipe_class_default);
14466 
14467 %}
14468 
14469 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14470 %{
14471   match(Set dst (CmpLTMask p q));
14472   effect(KILL cr);
14473 
14474   ins_cost(3 * INSN_COST);
14475 
14476   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14477             "csetw $dst, lt\n\t"
14478             "subw $dst, zr, $dst"
14479   %}
14480 
14481   ins_encode %{
14482     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14483     __ csetw(as_Register($dst$$reg), Assembler::LT);
14484     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14485   %}
14486 
14487   ins_pipe(ialu_reg_reg);
14488 %}
14489 
14490 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14491 %{
14492   match(Set dst (CmpLTMask src zero));
14493   effect(KILL cr);
14494 
14495   ins_cost(INSN_COST);
14496 
14497   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14498 
14499   ins_encode %{
14500     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14501   %}
14502 
14503   ins_pipe(ialu_reg_shift);
14504 %}
14505 
14506 // ============================================================================
14507 // Max and Min
14508 
14509 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14510 %{
14511   effect( DEF dst, USE src1, USE src2, USE cr );
14512 
14513   ins_cost(INSN_COST * 2);
14514   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
14515 
14516   ins_encode %{
14517     __ cselw(as_Register($dst$$reg),
14518              as_Register($src1$$reg),
14519              as_Register($src2$$reg),
14520              Assembler::LT);
14521   %}
14522 
14523   ins_pipe(icond_reg_reg);
14524 %}
14525 
14526 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14527 %{
14528   match(Set dst (MinI src1 src2));
14529   ins_cost(INSN_COST * 3);
14530 
14531   expand %{
14532     rFlagsReg cr;
14533     compI_reg_reg(cr, src1, src2);
14534     cmovI_reg_reg_lt(dst, src1, src2, cr);
14535   %}
14536 
14537 %}
14538 // FROM HERE
14539 
14540 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14541 %{
14542   effect( DEF dst, USE src1, USE src2, USE cr );
14543 
14544   ins_cost(INSN_COST * 2);
14545   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
14546 
14547   ins_encode %{
14548     __ cselw(as_Register($dst$$reg),
14549              as_Register($src1$$reg),
14550              as_Register($src2$$reg),
14551              Assembler::GT);
14552   %}
14553 
14554   ins_pipe(icond_reg_reg);
14555 %}
14556 
14557 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
14558 %{
14559   match(Set dst (MaxI src1 src2));
14560   ins_cost(INSN_COST * 3);
14561   expand %{
14562     rFlagsReg cr;
14563     compI_reg_reg(cr, src1, src2);
14564     cmovI_reg_reg_gt(dst, src1, src2, cr);
14565   %}
14566 %}
14567 
14568 // ============================================================================
14569 // Branch Instructions
14570 
14571 // Direct Branch.
14572 instruct branch(label lbl)
14573 %{
14574   match(Goto);
14575 
14576   effect(USE lbl);
14577 
14578   ins_cost(BRANCH_COST);
14579   format %{ "b  $lbl" %}
14580 
14581   ins_encode(aarch64_enc_b(lbl));
14582 
14583   ins_pipe(pipe_branch);
14584 %}
14585 
14586 // Conditional Near Branch
14587 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14588 %{
14589   // Same match rule as `branchConFar'.
14590   match(If cmp cr);
14591 
14592   effect(USE lbl);
14593 
14594   ins_cost(BRANCH_COST);
14595   // If set to 1 this indicates that the current instruction is a
14596   // short variant of a long branch. This avoids using this
14597   // instruction in first-pass matching. It will then only be used in
14598   // the `Shorten_branches' pass.
14599   // ins_short_branch(1);
14600   format %{ "b$cmp  $lbl" %}
14601 
14602   ins_encode(aarch64_enc_br_con(cmp, lbl));
14603 
14604   ins_pipe(pipe_branch_cond);
14605 %}
14606 
14607 // Conditional Near Branch Unsigned
14608 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14609 %{
14610   // Same match rule as `branchConFar'.
14611   match(If cmp cr);
14612 
14613   effect(USE lbl);
14614 
14615   ins_cost(BRANCH_COST);
14616   // If set to 1 this indicates that the current instruction is a
14617   // short variant of a long branch. This avoids using this
14618   // instruction in first-pass matching. It will then only be used in
14619   // the `Shorten_branches' pass.
14620   // ins_short_branch(1);
14621   format %{ "b$cmp  $lbl\t# unsigned" %}
14622 
14623   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14624 
14625   ins_pipe(pipe_branch_cond);
14626 %}
14627 
14628 // Make use of CBZ and CBNZ.  These instructions, as well as being
14629 // shorter than (cmp; branch), have the additional benefit of not
14630 // killing the flags.
14631 
14632 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14633   match(If cmp (CmpI op1 op2));
14634   effect(USE labl);
14635 
14636   ins_cost(BRANCH_COST);
14637   format %{ "cbw$cmp   $op1, $labl" %}
14638   ins_encode %{
14639     Label* L = $labl$$label;
14640     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14641     if (cond == Assembler::EQ)
14642       __ cbzw($op1$$Register, *L);
14643     else
14644       __ cbnzw($op1$$Register, *L);
14645   %}
14646   ins_pipe(pipe_cmp_branch);
14647 %}
14648 
14649 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14650   match(If cmp (CmpL op1 op2));
14651   effect(USE labl);
14652 
14653   ins_cost(BRANCH_COST);
14654   format %{ "cb$cmp   $op1, $labl" %}
14655   ins_encode %{
14656     Label* L = $labl$$label;
14657     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14658     if (cond == Assembler::EQ)
14659       __ cbz($op1$$Register, *L);
14660     else
14661       __ cbnz($op1$$Register, *L);
14662   %}
14663   ins_pipe(pipe_cmp_branch);
14664 %}
14665 
14666 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14667   match(If cmp (CmpP op1 op2));
14668   effect(USE labl);
14669 
14670   ins_cost(BRANCH_COST);
14671   format %{ "cb$cmp   $op1, $labl" %}
14672   ins_encode %{
14673     Label* L = $labl$$label;
14674     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14675     if (cond == Assembler::EQ)
14676       __ cbz($op1$$Register, *L);
14677     else
14678       __ cbnz($op1$$Register, *L);
14679   %}
14680   ins_pipe(pipe_cmp_branch);
14681 %}
14682 
14683 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14684   match(If cmp (CmpN op1 op2));
14685   effect(USE labl);
14686 
14687   ins_cost(BRANCH_COST);
14688   format %{ "cbw$cmp   $op1, $labl" %}
14689   ins_encode %{
14690     Label* L = $labl$$label;
14691     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14692     if (cond == Assembler::EQ)
14693       __ cbzw($op1$$Register, *L);
14694     else
14695       __ cbnzw($op1$$Register, *L);
14696   %}
14697   ins_pipe(pipe_cmp_branch);
14698 %}
14699 
14700 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14701   match(If cmp (CmpP (DecodeN oop) zero));
14702   effect(USE labl);
14703 
14704   ins_cost(BRANCH_COST);
14705   format %{ "cb$cmp   $oop, $labl" %}
14706   ins_encode %{
14707     Label* L = $labl$$label;
14708     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14709     if (cond == Assembler::EQ)
14710       __ cbzw($oop$$Register, *L);
14711     else
14712       __ cbnzw($oop$$Register, *L);
14713   %}
14714   ins_pipe(pipe_cmp_branch);
14715 %}
14716 
14717 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14718   match(If cmp (CmpU op1 op2));
14719   effect(USE labl);
14720 
14721   ins_cost(BRANCH_COST);
14722   format %{ "cbw$cmp   $op1, $labl" %}
14723   ins_encode %{
14724     Label* L = $labl$$label;
14725     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14726     if (cond == Assembler::EQ || cond == Assembler::LS)
14727       __ cbzw($op1$$Register, *L);
14728     else
14729       __ cbnzw($op1$$Register, *L);
14730   %}
14731   ins_pipe(pipe_cmp_branch);
14732 %}
14733 
14734 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14735   match(If cmp (CmpUL op1 op2));
14736   effect(USE labl);
14737 
14738   ins_cost(BRANCH_COST);
14739   format %{ "cb$cmp   $op1, $labl" %}
14740   ins_encode %{
14741     Label* L = $labl$$label;
14742     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14743     if (cond == Assembler::EQ || cond == Assembler::LS)
14744       __ cbz($op1$$Register, *L);
14745     else
14746       __ cbnz($op1$$Register, *L);
14747   %}
14748   ins_pipe(pipe_cmp_branch);
14749 %}
14750 
14751 // Test bit and Branch
14752 
14753 // Patterns for short (< 32KiB) variants
14754 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14755   match(If cmp (CmpL op1 op2));
14756   effect(USE labl);
14757 
14758   ins_cost(BRANCH_COST);
14759   format %{ "cb$cmp   $op1, $labl # long" %}
14760   ins_encode %{
14761     Label* L = $labl$$label;
14762     Assembler::Condition cond =
14763       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14764     __ tbr(cond, $op1$$Register, 63, *L);
14765   %}
14766   ins_pipe(pipe_cmp_branch);
14767   ins_short_branch(1);
14768 %}
14769 
14770 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14771   match(If cmp (CmpI op1 op2));
14772   effect(USE labl);
14773 
14774   ins_cost(BRANCH_COST);
14775   format %{ "cb$cmp   $op1, $labl # int" %}
14776   ins_encode %{
14777     Label* L = $labl$$label;
14778     Assembler::Condition cond =
14779       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14780     __ tbr(cond, $op1$$Register, 31, *L);
14781   %}
14782   ins_pipe(pipe_cmp_branch);
14783   ins_short_branch(1);
14784 %}
14785 
14786 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14787   match(If cmp (CmpL (AndL op1 op2) op3));
14788   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14789   effect(USE labl);
14790 
14791   ins_cost(BRANCH_COST);
14792   format %{ "tb$cmp   $op1, $op2, $labl" %}
14793   ins_encode %{
14794     Label* L = $labl$$label;
14795     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14796     int bit = exact_log2($op2$$constant);
14797     __ tbr(cond, $op1$$Register, bit, *L);
14798   %}
14799   ins_pipe(pipe_cmp_branch);
14800   ins_short_branch(1);
14801 %}
14802 
14803 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14804   match(If cmp (CmpI (AndI op1 op2) op3));
14805   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14806   effect(USE labl);
14807 
14808   ins_cost(BRANCH_COST);
14809   format %{ "tb$cmp   $op1, $op2, $labl" %}
14810   ins_encode %{
14811     Label* L = $labl$$label;
14812     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14813     int bit = exact_log2($op2$$constant);
14814     __ tbr(cond, $op1$$Register, bit, *L);
14815   %}
14816   ins_pipe(pipe_cmp_branch);
14817   ins_short_branch(1);
14818 %}
14819 
14820 // And far variants
14821 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14822   match(If cmp (CmpL op1 op2));
14823   effect(USE labl);
14824 
14825   ins_cost(BRANCH_COST);
14826   format %{ "cb$cmp   $op1, $labl # long" %}
14827   ins_encode %{
14828     Label* L = $labl$$label;
14829     Assembler::Condition cond =
14830       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14831     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14832   %}
14833   ins_pipe(pipe_cmp_branch);
14834 %}
14835 
14836 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14837   match(If cmp (CmpI op1 op2));
14838   effect(USE labl);
14839 
14840   ins_cost(BRANCH_COST);
14841   format %{ "cb$cmp   $op1, $labl # int" %}
14842   ins_encode %{
14843     Label* L = $labl$$label;
14844     Assembler::Condition cond =
14845       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14846     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14847   %}
14848   ins_pipe(pipe_cmp_branch);
14849 %}
14850 
14851 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14852   match(If cmp (CmpL (AndL op1 op2) op3));
14853   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14854   effect(USE labl);
14855 
14856   ins_cost(BRANCH_COST);
14857   format %{ "tb$cmp   $op1, $op2, $labl" %}
14858   ins_encode %{
14859     Label* L = $labl$$label;
14860     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14861     int bit = exact_log2($op2$$constant);
14862     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14863   %}
14864   ins_pipe(pipe_cmp_branch);
14865 %}
14866 
14867 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14868   match(If cmp (CmpI (AndI op1 op2) op3));
14869   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14870   effect(USE labl);
14871 
14872   ins_cost(BRANCH_COST);
14873   format %{ "tb$cmp   $op1, $op2, $labl" %}
14874   ins_encode %{
14875     Label* L = $labl$$label;
14876     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14877     int bit = exact_log2($op2$$constant);
14878     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14879   %}
14880   ins_pipe(pipe_cmp_branch);
14881 %}
14882 
14883 // Test bits
14884 
14885 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14886   match(Set cr (CmpL (AndL op1 op2) op3));
14887   predicate(Assembler::operand_valid_for_logical_immediate
14888             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14889 
14890   ins_cost(INSN_COST);
14891   format %{ "tst $op1, $op2 # long" %}
14892   ins_encode %{
14893     __ tst($op1$$Register, $op2$$constant);
14894   %}
14895   ins_pipe(ialu_reg_reg);
14896 %}
14897 
14898 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14899   match(Set cr (CmpI (AndI op1 op2) op3));
14900   predicate(Assembler::operand_valid_for_logical_immediate
14901             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14902 
14903   ins_cost(INSN_COST);
14904   format %{ "tst $op1, $op2 # int" %}
14905   ins_encode %{
14906     __ tstw($op1$$Register, $op2$$constant);
14907   %}
14908   ins_pipe(ialu_reg_reg);
14909 %}
14910 
14911 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14912   match(Set cr (CmpL (AndL op1 op2) op3));
14913 
14914   ins_cost(INSN_COST);
14915   format %{ "tst $op1, $op2 # long" %}
14916   ins_encode %{
14917     __ tst($op1$$Register, $op2$$Register);
14918   %}
14919   ins_pipe(ialu_reg_reg);
14920 %}
14921 
14922 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14923   match(Set cr (CmpI (AndI op1 op2) op3));
14924 
14925   ins_cost(INSN_COST);
14926   format %{ "tstw $op1, $op2 # int" %}
14927   ins_encode %{
14928     __ tstw($op1$$Register, $op2$$Register);
14929   %}
14930   ins_pipe(ialu_reg_reg);
14931 %}
14932 
14933 
14934 // Conditional Far Branch
14935 // Conditional Far Branch Unsigned
14936 // TODO: fixme
14937 
14938 // counted loop end branch near
14939 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14940 %{
14941   match(CountedLoopEnd cmp cr);
14942 
14943   effect(USE lbl);
14944 
14945   ins_cost(BRANCH_COST);
14946   // short variant.
14947   // ins_short_branch(1);
14948   format %{ "b$cmp $lbl \t// counted loop end" %}
14949 
14950   ins_encode(aarch64_enc_br_con(cmp, lbl));
14951 
14952   ins_pipe(pipe_branch);
14953 %}
14954 
14955 // counted loop end branch near Unsigned
14956 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14957 %{
14958   match(CountedLoopEnd cmp cr);
14959 
14960   effect(USE lbl);
14961 
14962   ins_cost(BRANCH_COST);
14963   // short variant.
14964   // ins_short_branch(1);
14965   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14966 
14967   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14968 
14969   ins_pipe(pipe_branch);
14970 %}
14971 
14972 // counted loop end branch far
14973 // counted loop end branch far unsigned
14974 // TODO: fixme
14975 
14976 // ============================================================================
14977 // inlined locking and unlocking
14978 
14979 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14980 %{
14981   match(Set cr (FastLock object box));
14982   effect(TEMP tmp, TEMP tmp2);
14983 
14984   // TODO
14985   // identify correct cost
14986   ins_cost(5 * INSN_COST);
14987   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14988 
14989   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14990 
14991   ins_pipe(pipe_serial);
14992 %}
14993 
14994 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14995 %{
14996   match(Set cr (FastUnlock object box));
14997   effect(TEMP tmp, TEMP tmp2);
14998 
14999   ins_cost(5 * INSN_COST);
15000   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15001 
15002   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15003 
15004   ins_pipe(pipe_serial);
15005 %}
15006 
15007 
15008 // ============================================================================
15009 // Safepoint Instructions
15010 
15011 // TODO
15012 // provide a near and far version of this code
15013 
15014 instruct safePoint(rFlagsReg cr, iRegP poll)
15015 %{
15016   match(SafePoint poll);
15017   effect(KILL cr);
15018 
15019   format %{
15020     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15021   %}
15022   ins_encode %{
15023     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15024   %}
15025   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15026 %}
15027 
15028 
15029 // ============================================================================
15030 // Procedure Call/Return Instructions
15031 
15032 // Call Java Static Instruction
15033 
15034 instruct CallStaticJavaDirect(method meth)
15035 %{
15036   match(CallStaticJava);
15037 
15038   effect(USE meth);
15039 
15040   ins_cost(CALL_COST);
15041 
15042   format %{ "call,static $meth \t// ==> " %}
15043 
15044   ins_encode( aarch64_enc_java_static_call(meth),
15045               aarch64_enc_call_epilog );
15046 
15047   ins_pipe(pipe_class_call);
15048 %}
15049 
15050 // TO HERE
15051 
15052 // Call Java Dynamic Instruction
15053 instruct CallDynamicJavaDirect(method meth)
15054 %{
15055   match(CallDynamicJava);
15056 
15057   effect(USE meth);
15058 
15059   ins_cost(CALL_COST);
15060 
15061   format %{ "CALL,dynamic $meth \t// ==> " %}
15062 
15063   ins_encode( aarch64_enc_java_dynamic_call(meth),
15064                aarch64_enc_call_epilog );
15065 
15066   ins_pipe(pipe_class_call);
15067 %}
15068 
15069 // Call Runtime Instruction
15070 
15071 instruct CallRuntimeDirect(method meth)
15072 %{
15073   match(CallRuntime);
15074 
15075   effect(USE meth);
15076 
15077   ins_cost(CALL_COST);
15078 
15079   format %{ "CALL, runtime $meth" %}
15080 
15081   ins_encode( aarch64_enc_java_to_runtime(meth) );
15082 
15083   ins_pipe(pipe_class_call);
15084 %}
15085 
15086 // Call Runtime Instruction
15087 
15088 instruct CallLeafDirect(method meth)
15089 %{
15090   match(CallLeaf);
15091 
15092   effect(USE meth);
15093 
15094   ins_cost(CALL_COST);
15095 
15096   format %{ "CALL, runtime leaf $meth" %}
15097 
15098   ins_encode( aarch64_enc_java_to_runtime(meth) );
15099 
15100   ins_pipe(pipe_class_call);
15101 %}
15102 
15103 // Call Runtime Instruction
15104 
15105 instruct CallLeafNoFPDirect(method meth)
15106 %{
15107   match(CallLeafNoFP);
15108 
15109   effect(USE meth);
15110 
15111   ins_cost(CALL_COST);
15112 
15113   format %{ "CALL, runtime leaf nofp $meth" %}
15114 
15115   ins_encode( aarch64_enc_java_to_runtime(meth) );
15116 
15117   ins_pipe(pipe_class_call);
15118 %}
15119 
15120 // Tail Call; Jump from runtime stub to Java code.
15121 // Also known as an 'interprocedural jump'.
15122 // Target of jump will eventually return to caller.
15123 // TailJump below removes the return address.
15124 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15125 %{
15126   match(TailCall jump_target method_oop);
15127 
15128   ins_cost(CALL_COST);
15129 
15130   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15131 
15132   ins_encode(aarch64_enc_tail_call(jump_target));
15133 
15134   ins_pipe(pipe_class_call);
15135 %}
15136 
15137 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15138 %{
15139   match(TailJump jump_target ex_oop);
15140 
15141   ins_cost(CALL_COST);
15142 
15143   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15144 
15145   ins_encode(aarch64_enc_tail_jmp(jump_target));
15146 
15147   ins_pipe(pipe_class_call);
15148 %}
15149 
15150 // Create exception oop: created by stack-crawling runtime code.
15151 // Created exception is now available to this handler, and is setup
15152 // just prior to jumping to this handler. No code emitted.
15153 // TODO check
15154 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15155 instruct CreateException(iRegP_R0 ex_oop)
15156 %{
15157   match(Set ex_oop (CreateEx));
15158 
15159   format %{ " -- \t// exception oop; no code emitted" %}
15160 
15161   size(0);
15162 
15163   ins_encode( /*empty*/ );
15164 
15165   ins_pipe(pipe_class_empty);
15166 %}
15167 
15168 // Rethrow exception: The exception oop will come in the first
15169 // argument position. Then JUMP (not call) to the rethrow stub code.
15170 instruct RethrowException() %{
15171   match(Rethrow);
15172   ins_cost(CALL_COST);
15173 
15174   format %{ "b rethrow_stub" %}
15175 
15176   ins_encode( aarch64_enc_rethrow() );
15177 
15178   ins_pipe(pipe_class_call);
15179 %}
15180 
15181 
15182 // Return Instruction
15183 // epilog node loads ret address into lr as part of frame pop
15184 instruct Ret()
15185 %{
15186   match(Return);
15187 
15188   format %{ "ret\t// return register" %}
15189 
15190   ins_encode( aarch64_enc_ret() );
15191 
15192   ins_pipe(pipe_branch);
15193 %}
15194 
15195 // Die now.
15196 instruct ShouldNotReachHere() %{
15197   match(Halt);
15198 
15199   ins_cost(CALL_COST);
15200   format %{ "ShouldNotReachHere" %}
15201 
15202   ins_encode %{
15203     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
15204     // return true
15205     __ dpcs1(0xdead + 1);
15206   %}
15207 
15208   ins_pipe(pipe_class_default);
15209 %}
15210 
15211 // ============================================================================
15212 // Partial Subtype Check
15213 //
15214 // superklass array for an instance of the superklass.  Set a hidden
15215 // internal cache on a hit (cache is checked with exposed code in
15216 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15217 // encoding ALSO sets flags.
15218 
15219 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15220 %{
15221   match(Set result (PartialSubtypeCheck sub super));
15222   effect(KILL cr, KILL temp);
15223 
15224   ins_cost(1100);  // slightly larger than the next version
15225   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15226 
15227   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15228 
15229   opcode(0x1); // Force zero of result reg on hit
15230 
15231   ins_pipe(pipe_class_memory);
15232 %}
15233 
15234 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15235 %{
15236   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15237   effect(KILL temp, KILL result);
15238 
15239   ins_cost(1100);  // slightly larger than the next version
15240   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15241 
15242   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15243 
15244   opcode(0x0); // Don't zero result reg on hit
15245 
15246   ins_pipe(pipe_class_memory);
15247 %}
15248 
15249 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15250                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15251 %{
15252   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15253   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15254   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15255 
15256   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15257   ins_encode %{
15258     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15259     __ string_compare($str1$$Register, $str2$$Register,
15260                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15261                       $tmp1$$Register, $tmp2$$Register,
15262                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
15263   %}
15264   ins_pipe(pipe_class_memory);
15265 %}
15266 
15267 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15268                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15269 %{
15270   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15271   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15272   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15273 
15274   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15275   ins_encode %{
15276     __ string_compare($str1$$Register, $str2$$Register,
15277                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15278                       $tmp1$$Register, $tmp2$$Register,
15279                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
15280   %}
15281   ins_pipe(pipe_class_memory);
15282 %}
15283 
15284 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15285                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15286                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15287 %{
15288   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15289   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15290   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15291          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15292 
15293   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15294   ins_encode %{
15295     __ string_compare($str1$$Register, $str2$$Register,
15296                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15297                       $tmp1$$Register, $tmp2$$Register,
15298                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15299                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
15300   %}
15301   ins_pipe(pipe_class_memory);
15302 %}
15303 
15304 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15305                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15306                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15307 %{
15308   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15309   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15310   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15311          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15312 
15313   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15314   ins_encode %{
15315     __ string_compare($str1$$Register, $str2$$Register,
15316                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15317                       $tmp1$$Register, $tmp2$$Register,
15318                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15319                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
15320   %}
15321   ins_pipe(pipe_class_memory);
15322 %}
15323 
15324 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15325        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15326        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15327 %{
15328   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15329   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15330   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15331          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15332   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15333 
15334   ins_encode %{
15335     __ string_indexof($str1$$Register, $str2$$Register,
15336                       $cnt1$$Register, $cnt2$$Register,
15337                       $tmp1$$Register, $tmp2$$Register,
15338                       $tmp3$$Register, $tmp4$$Register,
15339                       $tmp5$$Register, $tmp6$$Register,
15340                       -1, $result$$Register, StrIntrinsicNode::UU);
15341   %}
15342   ins_pipe(pipe_class_memory);
15343 %}
15344 
15345 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15346        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15347        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15348 %{
15349   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15350   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15351   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15352          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15353   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15354 
15355   ins_encode %{
15356     __ string_indexof($str1$$Register, $str2$$Register,
15357                       $cnt1$$Register, $cnt2$$Register,
15358                       $tmp1$$Register, $tmp2$$Register,
15359                       $tmp3$$Register, $tmp4$$Register,
15360                       $tmp5$$Register, $tmp6$$Register,
15361                       -1, $result$$Register, StrIntrinsicNode::LL);
15362   %}
15363   ins_pipe(pipe_class_memory);
15364 %}
15365 
15366 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15367        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15368        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15369 %{
15370   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15371   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15372   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15373          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15374   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15375 
15376   ins_encode %{
15377     __ string_indexof($str1$$Register, $str2$$Register,
15378                       $cnt1$$Register, $cnt2$$Register,
15379                       $tmp1$$Register, $tmp2$$Register,
15380                       $tmp3$$Register, $tmp4$$Register,
15381                       $tmp5$$Register, $tmp6$$Register,
15382                       -1, $result$$Register, StrIntrinsicNode::UL);
15383   %}
15384   ins_pipe(pipe_class_memory);
15385 %}
15386 
15387 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15388                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15389                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15390 %{
15391   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15392   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15393   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15394          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15395   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15396 
15397   ins_encode %{
15398     int icnt2 = (int)$int_cnt2$$constant;
15399     __ string_indexof($str1$$Register, $str2$$Register,
15400                       $cnt1$$Register, zr,
15401                       $tmp1$$Register, $tmp2$$Register,
15402                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15403                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15404   %}
15405   ins_pipe(pipe_class_memory);
15406 %}
15407 
15408 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15409                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15410                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15411 %{
15412   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15413   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15414   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15415          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15416   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15417 
15418   ins_encode %{
15419     int icnt2 = (int)$int_cnt2$$constant;
15420     __ string_indexof($str1$$Register, $str2$$Register,
15421                       $cnt1$$Register, zr,
15422                       $tmp1$$Register, $tmp2$$Register,
15423                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15424                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15425   %}
15426   ins_pipe(pipe_class_memory);
15427 %}
15428 
15429 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15430                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15431                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15432 %{
15433   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15434   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15435   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15436          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15437   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15438 
15439   ins_encode %{
15440     int icnt2 = (int)$int_cnt2$$constant;
15441     __ string_indexof($str1$$Register, $str2$$Register,
15442                       $cnt1$$Register, zr,
15443                       $tmp1$$Register, $tmp2$$Register,
15444                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15445                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15446   %}
15447   ins_pipe(pipe_class_memory);
15448 %}
15449 
15450 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15451                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15452                               iRegINoSp tmp3, rFlagsReg cr)
15453 %{
15454   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15455   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15456          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15457 
15458   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15459 
15460   ins_encode %{
15461     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15462                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15463                            $tmp3$$Register);
15464   %}
15465   ins_pipe(pipe_class_memory);
15466 %}
15467 
15468 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15469                         iRegI_R0 result, rFlagsReg cr)
15470 %{
15471   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15472   match(Set result (StrEquals (Binary str1 str2) cnt));
15473   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15474 
15475   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15476   ins_encode %{
15477     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15478     __ string_equals($str1$$Register, $str2$$Register,
15479                      $result$$Register, $cnt$$Register, 1);
15480   %}
15481   ins_pipe(pipe_class_memory);
15482 %}
15483 
15484 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15485                         iRegI_R0 result, rFlagsReg cr)
15486 %{
15487   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15488   match(Set result (StrEquals (Binary str1 str2) cnt));
15489   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15490 
15491   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15492   ins_encode %{
15493     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15494     __ string_equals($str1$$Register, $str2$$Register,
15495                      $result$$Register, $cnt$$Register, 2);
15496   %}
15497   ins_pipe(pipe_class_memory);
15498 %}
15499 
15500 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15501                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15502                        iRegP_R10 tmp, rFlagsReg cr)
15503 %{
15504   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15505   match(Set result (AryEq ary1 ary2));
15506   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15507 
15508   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15509   ins_encode %{
15510     __ arrays_equals($ary1$$Register, $ary2$$Register,
15511                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15512                      $result$$Register, $tmp$$Register, 1);
15513     %}
15514   ins_pipe(pipe_class_memory);
15515 %}
15516 
15517 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15518                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15519                        iRegP_R10 tmp, rFlagsReg cr)
15520 %{
15521   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15522   match(Set result (AryEq ary1 ary2));
15523   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15524 
15525   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15526   ins_encode %{
15527     __ arrays_equals($ary1$$Register, $ary2$$Register,
15528                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15529                      $result$$Register, $tmp$$Register, 2);
15530   %}
15531   ins_pipe(pipe_class_memory);
15532 %}
15533 
15534 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15535 %{
15536   match(Set result (HasNegatives ary1 len));
15537   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15538   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15539   ins_encode %{
15540     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15541   %}
15542   ins_pipe( pipe_slow );
15543 %}
15544 
15545 // fast char[] to byte[] compression
15546 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15547                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15548                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15549                          iRegI_R0 result, rFlagsReg cr)
15550 %{
15551   match(Set result (StrCompressedCopy src (Binary dst len)));
15552   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15553 
15554   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15555   ins_encode %{
15556     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15557                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15558                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15559                            $result$$Register);
15560   %}
15561   ins_pipe( pipe_slow );
15562 %}
15563 
15564 // fast byte[] to char[] inflation
15565 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15566                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15567 %{
15568   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15569   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15570 
15571   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15572   ins_encode %{
15573     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15574                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15575   %}
15576   ins_pipe(pipe_class_memory);
15577 %}
15578 
15579 // encode char[] to byte[] in ISO_8859_1
15580 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15581                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15582                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15583                           iRegI_R0 result, rFlagsReg cr)
15584 %{
15585   match(Set result (EncodeISOArray src (Binary dst len)));
15586   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15587          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15588 
15589   format %{ "Encode array $src,$dst,$len -> $result" %}
15590   ins_encode %{
15591     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15592          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15593          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15594   %}
15595   ins_pipe( pipe_class_memory );
15596 %}
15597 
15598 // ============================================================================
15599 // This name is KNOWN by the ADLC and cannot be changed.
15600 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15601 // for this guy.
15602 instruct tlsLoadP(thread_RegP dst)
15603 %{
15604   match(Set dst (ThreadLocal));
15605 
15606   ins_cost(0);
15607 
15608   format %{ " -- \t// $dst=Thread::current(), empty" %}
15609 
15610   size(0);
15611 
15612   ins_encode( /*empty*/ );
15613 
15614   ins_pipe(pipe_class_empty);
15615 %}
15616 
15617 // ====================VECTOR INSTRUCTIONS=====================================
15618 
15619 // Load vector (32 bits)
15620 instruct loadV4(vecD dst, vmem4 mem)
15621 %{
15622   predicate(n->as_LoadVector()->memory_size() == 4);
15623   match(Set dst (LoadVector mem));
15624   ins_cost(4 * INSN_COST);
15625   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15626   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15627   ins_pipe(vload_reg_mem64);
15628 %}
15629 
15630 // Load vector (64 bits)
15631 instruct loadV8(vecD dst, vmem8 mem)
15632 %{
15633   predicate(n->as_LoadVector()->memory_size() == 8);
15634   match(Set dst (LoadVector mem));
15635   ins_cost(4 * INSN_COST);
15636   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15637   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15638   ins_pipe(vload_reg_mem64);
15639 %}
15640 
15641 // Load Vector (128 bits)
15642 instruct loadV16(vecX dst, vmem16 mem)
15643 %{
15644   predicate(n->as_LoadVector()->memory_size() == 16);
15645   match(Set dst (LoadVector mem));
15646   ins_cost(4 * INSN_COST);
15647   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15648   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15649   ins_pipe(vload_reg_mem128);
15650 %}
15651 
15652 // Store Vector (32 bits)
15653 instruct storeV4(vecD src, vmem4 mem)
15654 %{
15655   predicate(n->as_StoreVector()->memory_size() == 4);
15656   match(Set mem (StoreVector mem src));
15657   ins_cost(4 * INSN_COST);
15658   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15659   ins_encode( aarch64_enc_strvS(src, mem) );
15660   ins_pipe(vstore_reg_mem64);
15661 %}
15662 
15663 // Store Vector (64 bits)
15664 instruct storeV8(vecD src, vmem8 mem)
15665 %{
15666   predicate(n->as_StoreVector()->memory_size() == 8);
15667   match(Set mem (StoreVector mem src));
15668   ins_cost(4 * INSN_COST);
15669   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15670   ins_encode( aarch64_enc_strvD(src, mem) );
15671   ins_pipe(vstore_reg_mem64);
15672 %}
15673 
15674 // Store Vector (128 bits)
15675 instruct storeV16(vecX src, vmem16 mem)
15676 %{
15677   predicate(n->as_StoreVector()->memory_size() == 16);
15678   match(Set mem (StoreVector mem src));
15679   ins_cost(4 * INSN_COST);
15680   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15681   ins_encode( aarch64_enc_strvQ(src, mem) );
15682   ins_pipe(vstore_reg_mem128);
15683 %}
15684 
15685 instruct replicate8B(vecD dst, iRegIorL2I src)
15686 %{
15687   predicate(n->as_Vector()->length() == 4 ||
15688             n->as_Vector()->length() == 8);
15689   match(Set dst (ReplicateB src));
15690   ins_cost(INSN_COST);
15691   format %{ "dup  $dst, $src\t# vector (8B)" %}
15692   ins_encode %{
15693     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15694   %}
15695   ins_pipe(vdup_reg_reg64);
15696 %}
15697 
15698 instruct replicate16B(vecX dst, iRegIorL2I src)
15699 %{
15700   predicate(n->as_Vector()->length() == 16);
15701   match(Set dst (ReplicateB src));
15702   ins_cost(INSN_COST);
15703   format %{ "dup  $dst, $src\t# vector (16B)" %}
15704   ins_encode %{
15705     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15706   %}
15707   ins_pipe(vdup_reg_reg128);
15708 %}
15709 
15710 instruct replicate8B_imm(vecD dst, immI con)
15711 %{
15712   predicate(n->as_Vector()->length() == 4 ||
15713             n->as_Vector()->length() == 8);
15714   match(Set dst (ReplicateB con));
15715   ins_cost(INSN_COST);
15716   format %{ "movi  $dst, $con\t# vector(8B)" %}
15717   ins_encode %{
15718     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15719   %}
15720   ins_pipe(vmovi_reg_imm64);
15721 %}
15722 
15723 instruct replicate16B_imm(vecX dst, immI con)
15724 %{
15725   predicate(n->as_Vector()->length() == 16);
15726   match(Set dst (ReplicateB con));
15727   ins_cost(INSN_COST);
15728   format %{ "movi  $dst, $con\t# vector(16B)" %}
15729   ins_encode %{
15730     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15731   %}
15732   ins_pipe(vmovi_reg_imm128);
15733 %}
15734 
15735 instruct replicate4S(vecD dst, iRegIorL2I src)
15736 %{
15737   predicate(n->as_Vector()->length() == 2 ||
15738             n->as_Vector()->length() == 4);
15739   match(Set dst (ReplicateS src));
15740   ins_cost(INSN_COST);
15741   format %{ "dup  $dst, $src\t# vector (4S)" %}
15742   ins_encode %{
15743     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15744   %}
15745   ins_pipe(vdup_reg_reg64);
15746 %}
15747 
15748 instruct replicate8S(vecX dst, iRegIorL2I src)
15749 %{
15750   predicate(n->as_Vector()->length() == 8);
15751   match(Set dst (ReplicateS src));
15752   ins_cost(INSN_COST);
15753   format %{ "dup  $dst, $src\t# vector (8S)" %}
15754   ins_encode %{
15755     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15756   %}
15757   ins_pipe(vdup_reg_reg128);
15758 %}
15759 
15760 instruct replicate4S_imm(vecD dst, immI con)
15761 %{
15762   predicate(n->as_Vector()->length() == 2 ||
15763             n->as_Vector()->length() == 4);
15764   match(Set dst (ReplicateS con));
15765   ins_cost(INSN_COST);
15766   format %{ "movi  $dst, $con\t# vector(4H)" %}
15767   ins_encode %{
15768     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15769   %}
15770   ins_pipe(vmovi_reg_imm64);
15771 %}
15772 
15773 instruct replicate8S_imm(vecX dst, immI con)
15774 %{
15775   predicate(n->as_Vector()->length() == 8);
15776   match(Set dst (ReplicateS con));
15777   ins_cost(INSN_COST);
15778   format %{ "movi  $dst, $con\t# vector(8H)" %}
15779   ins_encode %{
15780     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15781   %}
15782   ins_pipe(vmovi_reg_imm128);
15783 %}
15784 
15785 instruct replicate2I(vecD dst, iRegIorL2I src)
15786 %{
15787   predicate(n->as_Vector()->length() == 2);
15788   match(Set dst (ReplicateI src));
15789   ins_cost(INSN_COST);
15790   format %{ "dup  $dst, $src\t# vector (2I)" %}
15791   ins_encode %{
15792     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15793   %}
15794   ins_pipe(vdup_reg_reg64);
15795 %}
15796 
15797 instruct replicate4I(vecX dst, iRegIorL2I src)
15798 %{
15799   predicate(n->as_Vector()->length() == 4);
15800   match(Set dst (ReplicateI src));
15801   ins_cost(INSN_COST);
15802   format %{ "dup  $dst, $src\t# vector (4I)" %}
15803   ins_encode %{
15804     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15805   %}
15806   ins_pipe(vdup_reg_reg128);
15807 %}
15808 
15809 instruct replicate2I_imm(vecD dst, immI con)
15810 %{
15811   predicate(n->as_Vector()->length() == 2);
15812   match(Set dst (ReplicateI con));
15813   ins_cost(INSN_COST);
15814   format %{ "movi  $dst, $con\t# vector(2I)" %}
15815   ins_encode %{
15816     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15817   %}
15818   ins_pipe(vmovi_reg_imm64);
15819 %}
15820 
15821 instruct replicate4I_imm(vecX dst, immI con)
15822 %{
15823   predicate(n->as_Vector()->length() == 4);
15824   match(Set dst (ReplicateI con));
15825   ins_cost(INSN_COST);
15826   format %{ "movi  $dst, $con\t# vector(4I)" %}
15827   ins_encode %{
15828     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15829   %}
15830   ins_pipe(vmovi_reg_imm128);
15831 %}
15832 
15833 instruct replicate2L(vecX dst, iRegL src)
15834 %{
15835   predicate(n->as_Vector()->length() == 2);
15836   match(Set dst (ReplicateL src));
15837   ins_cost(INSN_COST);
15838   format %{ "dup  $dst, $src\t# vector (2L)" %}
15839   ins_encode %{
15840     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15841   %}
15842   ins_pipe(vdup_reg_reg128);
15843 %}
15844 
15845 instruct replicate2L_zero(vecX dst, immI0 zero)
15846 %{
15847   predicate(n->as_Vector()->length() == 2);
15848   match(Set dst (ReplicateI zero));
15849   ins_cost(INSN_COST);
15850   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15851   ins_encode %{
15852     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15853            as_FloatRegister($dst$$reg),
15854            as_FloatRegister($dst$$reg));
15855   %}
15856   ins_pipe(vmovi_reg_imm128);
15857 %}
15858 
15859 instruct replicate2F(vecD dst, vRegF src)
15860 %{
15861   predicate(n->as_Vector()->length() == 2);
15862   match(Set dst (ReplicateF src));
15863   ins_cost(INSN_COST);
15864   format %{ "dup  $dst, $src\t# vector (2F)" %}
15865   ins_encode %{
15866     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15867            as_FloatRegister($src$$reg));
15868   %}
15869   ins_pipe(vdup_reg_freg64);
15870 %}
15871 
15872 instruct replicate4F(vecX dst, vRegF src)
15873 %{
15874   predicate(n->as_Vector()->length() == 4);
15875   match(Set dst (ReplicateF src));
15876   ins_cost(INSN_COST);
15877   format %{ "dup  $dst, $src\t# vector (4F)" %}
15878   ins_encode %{
15879     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15880            as_FloatRegister($src$$reg));
15881   %}
15882   ins_pipe(vdup_reg_freg128);
15883 %}
15884 
15885 instruct replicate2D(vecX dst, vRegD src)
15886 %{
15887   predicate(n->as_Vector()->length() == 2);
15888   match(Set dst (ReplicateD src));
15889   ins_cost(INSN_COST);
15890   format %{ "dup  $dst, $src\t# vector (2D)" %}
15891   ins_encode %{
15892     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15893            as_FloatRegister($src$$reg));
15894   %}
15895   ins_pipe(vdup_reg_dreg128);
15896 %}
15897 
15898 // ====================REDUCTION ARITHMETIC====================================
15899 
15900 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15901 %{
15902   match(Set dst (AddReductionVI src1 src2));
15903   ins_cost(INSN_COST);
15904   effect(TEMP tmp, TEMP tmp2);
15905   format %{ "umov  $tmp, $src2, S, 0\n\t"
15906             "umov  $tmp2, $src2, S, 1\n\t"
15907             "addw  $dst, $src1, $tmp\n\t"
15908             "addw  $dst, $dst, $tmp2\t add reduction2i"
15909   %}
15910   ins_encode %{
15911     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15912     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15913     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15914     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15915   %}
15916   ins_pipe(pipe_class_default);
15917 %}
15918 
15919 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15920 %{
15921   match(Set dst (AddReductionVI src1 src2));
15922   ins_cost(INSN_COST);
15923   effect(TEMP tmp, TEMP tmp2);
15924   format %{ "addv  $tmp, T4S, $src2\n\t"
15925             "umov  $tmp2, $tmp, S, 0\n\t"
15926             "addw  $dst, $tmp2, $src1\t add reduction4i"
15927   %}
15928   ins_encode %{
15929     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15930             as_FloatRegister($src2$$reg));
15931     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15932     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15933   %}
15934   ins_pipe(pipe_class_default);
15935 %}
15936 
15937 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15938 %{
15939   match(Set dst (MulReductionVI src1 src2));
15940   ins_cost(INSN_COST);
15941   effect(TEMP tmp, TEMP dst);
15942   format %{ "umov  $tmp, $src2, S, 0\n\t"
15943             "mul   $dst, $tmp, $src1\n\t"
15944             "umov  $tmp, $src2, S, 1\n\t"
15945             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15946   %}
15947   ins_encode %{
15948     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15949     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15950     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15951     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15952   %}
15953   ins_pipe(pipe_class_default);
15954 %}
15955 
15956 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15957 %{
15958   match(Set dst (MulReductionVI src1 src2));
15959   ins_cost(INSN_COST);
15960   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15961   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15962             "mul   $tmp, $tmp, $src2\n\t"
15963             "umov  $tmp2, $tmp, S, 0\n\t"
15964             "mul   $dst, $tmp2, $src1\n\t"
15965             "umov  $tmp2, $tmp, S, 1\n\t"
15966             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15967   %}
15968   ins_encode %{
15969     __ ins(as_FloatRegister($tmp$$reg), __ D,
15970            as_FloatRegister($src2$$reg), 0, 1);
15971     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15972            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15973     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15974     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15975     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15976     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15977   %}
15978   ins_pipe(pipe_class_default);
15979 %}
15980 
15981 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15982 %{
15983   match(Set dst (AddReductionVF src1 src2));
15984   ins_cost(INSN_COST);
15985   effect(TEMP tmp, TEMP dst);
15986   format %{ "fadds $dst, $src1, $src2\n\t"
15987             "ins   $tmp, S, $src2, 0, 1\n\t"
15988             "fadds $dst, $dst, $tmp\t add reduction2f"
15989   %}
15990   ins_encode %{
15991     __ fadds(as_FloatRegister($dst$$reg),
15992              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15993     __ ins(as_FloatRegister($tmp$$reg), __ S,
15994            as_FloatRegister($src2$$reg), 0, 1);
15995     __ fadds(as_FloatRegister($dst$$reg),
15996              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15997   %}
15998   ins_pipe(pipe_class_default);
15999 %}
16000 
16001 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16002 %{
16003   match(Set dst (AddReductionVF src1 src2));
16004   ins_cost(INSN_COST);
16005   effect(TEMP tmp, TEMP dst);
16006   format %{ "fadds $dst, $src1, $src2\n\t"
16007             "ins   $tmp, S, $src2, 0, 1\n\t"
16008             "fadds $dst, $dst, $tmp\n\t"
16009             "ins   $tmp, S, $src2, 0, 2\n\t"
16010             "fadds $dst, $dst, $tmp\n\t"
16011             "ins   $tmp, S, $src2, 0, 3\n\t"
16012             "fadds $dst, $dst, $tmp\t add reduction4f"
16013   %}
16014   ins_encode %{
16015     __ fadds(as_FloatRegister($dst$$reg),
16016              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16017     __ ins(as_FloatRegister($tmp$$reg), __ S,
16018            as_FloatRegister($src2$$reg), 0, 1);
16019     __ fadds(as_FloatRegister($dst$$reg),
16020              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16021     __ ins(as_FloatRegister($tmp$$reg), __ S,
16022            as_FloatRegister($src2$$reg), 0, 2);
16023     __ fadds(as_FloatRegister($dst$$reg),
16024              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16025     __ ins(as_FloatRegister($tmp$$reg), __ S,
16026            as_FloatRegister($src2$$reg), 0, 3);
16027     __ fadds(as_FloatRegister($dst$$reg),
16028              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16029   %}
16030   ins_pipe(pipe_class_default);
16031 %}
16032 
16033 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16034 %{
16035   match(Set dst (MulReductionVF src1 src2));
16036   ins_cost(INSN_COST);
16037   effect(TEMP tmp, TEMP dst);
16038   format %{ "fmuls $dst, $src1, $src2\n\t"
16039             "ins   $tmp, S, $src2, 0, 1\n\t"
16040             "fmuls $dst, $dst, $tmp\t add reduction4f"
16041   %}
16042   ins_encode %{
16043     __ fmuls(as_FloatRegister($dst$$reg),
16044              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16045     __ ins(as_FloatRegister($tmp$$reg), __ S,
16046            as_FloatRegister($src2$$reg), 0, 1);
16047     __ fmuls(as_FloatRegister($dst$$reg),
16048              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16049   %}
16050   ins_pipe(pipe_class_default);
16051 %}
16052 
16053 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16054 %{
16055   match(Set dst (MulReductionVF src1 src2));
16056   ins_cost(INSN_COST);
16057   effect(TEMP tmp, TEMP dst);
16058   format %{ "fmuls $dst, $src1, $src2\n\t"
16059             "ins   $tmp, S, $src2, 0, 1\n\t"
16060             "fmuls $dst, $dst, $tmp\n\t"
16061             "ins   $tmp, S, $src2, 0, 2\n\t"
16062             "fmuls $dst, $dst, $tmp\n\t"
16063             "ins   $tmp, S, $src2, 0, 3\n\t"
16064             "fmuls $dst, $dst, $tmp\t add reduction4f"
16065   %}
16066   ins_encode %{
16067     __ fmuls(as_FloatRegister($dst$$reg),
16068              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16069     __ ins(as_FloatRegister($tmp$$reg), __ S,
16070            as_FloatRegister($src2$$reg), 0, 1);
16071     __ fmuls(as_FloatRegister($dst$$reg),
16072              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16073     __ ins(as_FloatRegister($tmp$$reg), __ S,
16074            as_FloatRegister($src2$$reg), 0, 2);
16075     __ fmuls(as_FloatRegister($dst$$reg),
16076              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16077     __ ins(as_FloatRegister($tmp$$reg), __ S,
16078            as_FloatRegister($src2$$reg), 0, 3);
16079     __ fmuls(as_FloatRegister($dst$$reg),
16080              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16081   %}
16082   ins_pipe(pipe_class_default);
16083 %}
16084 
16085 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16086 %{
16087   match(Set dst (AddReductionVD src1 src2));
16088   ins_cost(INSN_COST);
16089   effect(TEMP tmp, TEMP dst);
16090   format %{ "faddd $dst, $src1, $src2\n\t"
16091             "ins   $tmp, D, $src2, 0, 1\n\t"
16092             "faddd $dst, $dst, $tmp\t add reduction2d"
16093   %}
16094   ins_encode %{
16095     __ faddd(as_FloatRegister($dst$$reg),
16096              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16097     __ ins(as_FloatRegister($tmp$$reg), __ D,
16098            as_FloatRegister($src2$$reg), 0, 1);
16099     __ faddd(as_FloatRegister($dst$$reg),
16100              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16101   %}
16102   ins_pipe(pipe_class_default);
16103 %}
16104 
16105 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16106 %{
16107   match(Set dst (MulReductionVD src1 src2));
16108   ins_cost(INSN_COST);
16109   effect(TEMP tmp, TEMP dst);
16110   format %{ "fmuld $dst, $src1, $src2\n\t"
16111             "ins   $tmp, D, $src2, 0, 1\n\t"
16112             "fmuld $dst, $dst, $tmp\t add reduction2d"
16113   %}
16114   ins_encode %{
16115     __ fmuld(as_FloatRegister($dst$$reg),
16116              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16117     __ ins(as_FloatRegister($tmp$$reg), __ D,
16118            as_FloatRegister($src2$$reg), 0, 1);
16119     __ fmuld(as_FloatRegister($dst$$reg),
16120              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16121   %}
16122   ins_pipe(pipe_class_default);
16123 %}
16124 
16125 instruct reduce_max2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
16126   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16127   match(Set dst (MaxReductionV src1 src2));
16128   ins_cost(INSN_COST);
16129   effect(TEMP_DEF dst, TEMP tmp);
16130   format %{ "fmaxs $dst, $src1, $src2\n\t"
16131             "ins   $tmp, S, $src2, 0, 1\n\t"
16132             "fmaxs $dst, $dst, $tmp\t max reduction2F" %}
16133   ins_encode %{
16134     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16135     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
16136     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16137   %}
16138   ins_pipe(pipe_class_default);
16139 %}
16140 
16141 instruct reduce_max4F(vRegF dst, vRegF src1, vecX src2) %{
16142   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16143   match(Set dst (MaxReductionV src1 src2));
16144   ins_cost(INSN_COST);
16145   effect(TEMP_DEF dst);
16146   format %{ "fmaxv $dst, T4S, $src2\n\t"
16147             "fmaxs $dst, $dst, $src1\t max reduction4F" %}
16148   ins_encode %{
16149     __ fmaxv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
16150     __ fmaxs(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
16151   %}
16152   ins_pipe(pipe_class_default);
16153 %}
16154 
16155 instruct reduce_max2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
16156   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16157   match(Set dst (MaxReductionV src1 src2));
16158   ins_cost(INSN_COST);
16159   effect(TEMP_DEF dst, TEMP tmp);
16160   format %{ "fmaxd $dst, $src1, $src2\n\t"
16161             "ins   $tmp, D, $src2, 0, 1\n\t"
16162             "fmaxd $dst, $dst, $tmp\t max reduction2D" %}
16163   ins_encode %{
16164     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16165     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
16166     __ fmaxd(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16167   %}
16168   ins_pipe(pipe_class_default);
16169 %}
16170 
16171 instruct reduce_min2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) %{
16172   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16173   match(Set dst (MinReductionV src1 src2));
16174   ins_cost(INSN_COST);
16175   effect(TEMP_DEF dst, TEMP tmp);
16176   format %{ "fmins $dst, $src1, $src2\n\t"
16177             "ins   $tmp, S, $src2, 0, 1\n\t"
16178             "fmins $dst, $dst, $tmp\t min reduction2F" %}
16179   ins_encode %{
16180     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16181     __ ins(as_FloatRegister($tmp$$reg), __ S, as_FloatRegister($src2$$reg), 0, 1);
16182     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16183   %}
16184   ins_pipe(pipe_class_default);
16185 %}
16186 
16187 instruct reduce_min4F(vRegF dst, vRegF src1, vecX src2) %{
16188   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
16189   match(Set dst (MinReductionV src1 src2));
16190   ins_cost(INSN_COST);
16191   effect(TEMP_DEF dst);
16192   format %{ "fminv $dst, T4S, $src2\n\t"
16193             "fmins $dst, $dst, $src1\t min reduction4F" %}
16194   ins_encode %{
16195     __ fminv(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src2$$reg));
16196     __ fmins(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg));
16197   %}
16198   ins_pipe(pipe_class_default);
16199 %}
16200 
16201 instruct reduce_min2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) %{
16202   predicate(n->in(2)->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
16203   match(Set dst (MinReductionV src1 src2));
16204   ins_cost(INSN_COST);
16205   effect(TEMP_DEF dst, TEMP tmp);
16206   format %{ "fmind $dst, $src1, $src2\n\t"
16207             "ins   $tmp, D, $src2, 0, 1\n\t"
16208             "fmind $dst, $dst, $tmp\t min reduction2D" %}
16209   ins_encode %{
16210     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16211     __ ins(as_FloatRegister($tmp$$reg), __ D, as_FloatRegister($src2$$reg), 0, 1);
16212     __ fmind(as_FloatRegister($dst$$reg), as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16213   %}
16214   ins_pipe(pipe_class_default);
16215 %}
16216 
16217 // ====================VECTOR ARITHMETIC=======================================
16218 
16219 // --------------------------------- ADD --------------------------------------
16220 
16221 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16222 %{
16223   predicate(n->as_Vector()->length() == 4 ||
16224             n->as_Vector()->length() == 8);
16225   match(Set dst (AddVB src1 src2));
16226   ins_cost(INSN_COST);
16227   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16228   ins_encode %{
16229     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16230             as_FloatRegister($src1$$reg),
16231             as_FloatRegister($src2$$reg));
16232   %}
16233   ins_pipe(vdop64);
16234 %}
16235 
16236 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16237 %{
16238   predicate(n->as_Vector()->length() == 16);
16239   match(Set dst (AddVB src1 src2));
16240   ins_cost(INSN_COST);
16241   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16242   ins_encode %{
16243     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16244             as_FloatRegister($src1$$reg),
16245             as_FloatRegister($src2$$reg));
16246   %}
16247   ins_pipe(vdop128);
16248 %}
16249 
16250 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16251 %{
16252   predicate(n->as_Vector()->length() == 2 ||
16253             n->as_Vector()->length() == 4);
16254   match(Set dst (AddVS src1 src2));
16255   ins_cost(INSN_COST);
16256   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16257   ins_encode %{
16258     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16259             as_FloatRegister($src1$$reg),
16260             as_FloatRegister($src2$$reg));
16261   %}
16262   ins_pipe(vdop64);
16263 %}
16264 
16265 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16266 %{
16267   predicate(n->as_Vector()->length() == 8);
16268   match(Set dst (AddVS src1 src2));
16269   ins_cost(INSN_COST);
16270   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16271   ins_encode %{
16272     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16273             as_FloatRegister($src1$$reg),
16274             as_FloatRegister($src2$$reg));
16275   %}
16276   ins_pipe(vdop128);
16277 %}
16278 
16279 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16280 %{
16281   predicate(n->as_Vector()->length() == 2);
16282   match(Set dst (AddVI src1 src2));
16283   ins_cost(INSN_COST);
16284   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16285   ins_encode %{
16286     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16287             as_FloatRegister($src1$$reg),
16288             as_FloatRegister($src2$$reg));
16289   %}
16290   ins_pipe(vdop64);
16291 %}
16292 
16293 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16294 %{
16295   predicate(n->as_Vector()->length() == 4);
16296   match(Set dst (AddVI src1 src2));
16297   ins_cost(INSN_COST);
16298   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16299   ins_encode %{
16300     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16301             as_FloatRegister($src1$$reg),
16302             as_FloatRegister($src2$$reg));
16303   %}
16304   ins_pipe(vdop128);
16305 %}
16306 
16307 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16308 %{
16309   predicate(n->as_Vector()->length() == 2);
16310   match(Set dst (AddVL src1 src2));
16311   ins_cost(INSN_COST);
16312   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16313   ins_encode %{
16314     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16315             as_FloatRegister($src1$$reg),
16316             as_FloatRegister($src2$$reg));
16317   %}
16318   ins_pipe(vdop128);
16319 %}
16320 
16321 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16322 %{
16323   predicate(n->as_Vector()->length() == 2);
16324   match(Set dst (AddVF src1 src2));
16325   ins_cost(INSN_COST);
16326   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16327   ins_encode %{
16328     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16329             as_FloatRegister($src1$$reg),
16330             as_FloatRegister($src2$$reg));
16331   %}
16332   ins_pipe(vdop_fp64);
16333 %}
16334 
16335 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16336 %{
16337   predicate(n->as_Vector()->length() == 4);
16338   match(Set dst (AddVF src1 src2));
16339   ins_cost(INSN_COST);
16340   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16341   ins_encode %{
16342     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16343             as_FloatRegister($src1$$reg),
16344             as_FloatRegister($src2$$reg));
16345   %}
16346   ins_pipe(vdop_fp128);
16347 %}
16348 
16349 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16350 %{
16351   match(Set dst (AddVD src1 src2));
16352   ins_cost(INSN_COST);
16353   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16354   ins_encode %{
16355     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16356             as_FloatRegister($src1$$reg),
16357             as_FloatRegister($src2$$reg));
16358   %}
16359   ins_pipe(vdop_fp128);
16360 %}
16361 
16362 // --------------------------------- SUB --------------------------------------
16363 
16364 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16365 %{
16366   predicate(n->as_Vector()->length() == 4 ||
16367             n->as_Vector()->length() == 8);
16368   match(Set dst (SubVB src1 src2));
16369   ins_cost(INSN_COST);
16370   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16371   ins_encode %{
16372     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16373             as_FloatRegister($src1$$reg),
16374             as_FloatRegister($src2$$reg));
16375   %}
16376   ins_pipe(vdop64);
16377 %}
16378 
16379 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16380 %{
16381   predicate(n->as_Vector()->length() == 16);
16382   match(Set dst (SubVB src1 src2));
16383   ins_cost(INSN_COST);
16384   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16385   ins_encode %{
16386     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16387             as_FloatRegister($src1$$reg),
16388             as_FloatRegister($src2$$reg));
16389   %}
16390   ins_pipe(vdop128);
16391 %}
16392 
16393 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16394 %{
16395   predicate(n->as_Vector()->length() == 2 ||
16396             n->as_Vector()->length() == 4);
16397   match(Set dst (SubVS src1 src2));
16398   ins_cost(INSN_COST);
16399   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16400   ins_encode %{
16401     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16402             as_FloatRegister($src1$$reg),
16403             as_FloatRegister($src2$$reg));
16404   %}
16405   ins_pipe(vdop64);
16406 %}
16407 
16408 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16409 %{
16410   predicate(n->as_Vector()->length() == 8);
16411   match(Set dst (SubVS src1 src2));
16412   ins_cost(INSN_COST);
16413   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16414   ins_encode %{
16415     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16416             as_FloatRegister($src1$$reg),
16417             as_FloatRegister($src2$$reg));
16418   %}
16419   ins_pipe(vdop128);
16420 %}
16421 
16422 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16423 %{
16424   predicate(n->as_Vector()->length() == 2);
16425   match(Set dst (SubVI src1 src2));
16426   ins_cost(INSN_COST);
16427   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16428   ins_encode %{
16429     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16430             as_FloatRegister($src1$$reg),
16431             as_FloatRegister($src2$$reg));
16432   %}
16433   ins_pipe(vdop64);
16434 %}
16435 
16436 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16437 %{
16438   predicate(n->as_Vector()->length() == 4);
16439   match(Set dst (SubVI src1 src2));
16440   ins_cost(INSN_COST);
16441   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16442   ins_encode %{
16443     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16444             as_FloatRegister($src1$$reg),
16445             as_FloatRegister($src2$$reg));
16446   %}
16447   ins_pipe(vdop128);
16448 %}
16449 
16450 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16451 %{
16452   predicate(n->as_Vector()->length() == 2);
16453   match(Set dst (SubVL src1 src2));
16454   ins_cost(INSN_COST);
16455   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16456   ins_encode %{
16457     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16458             as_FloatRegister($src1$$reg),
16459             as_FloatRegister($src2$$reg));
16460   %}
16461   ins_pipe(vdop128);
16462 %}
16463 
16464 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16465 %{
16466   predicate(n->as_Vector()->length() == 2);
16467   match(Set dst (SubVF src1 src2));
16468   ins_cost(INSN_COST);
16469   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16470   ins_encode %{
16471     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16472             as_FloatRegister($src1$$reg),
16473             as_FloatRegister($src2$$reg));
16474   %}
16475   ins_pipe(vdop_fp64);
16476 %}
16477 
16478 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16479 %{
16480   predicate(n->as_Vector()->length() == 4);
16481   match(Set dst (SubVF src1 src2));
16482   ins_cost(INSN_COST);
16483   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16484   ins_encode %{
16485     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16486             as_FloatRegister($src1$$reg),
16487             as_FloatRegister($src2$$reg));
16488   %}
16489   ins_pipe(vdop_fp128);
16490 %}
16491 
16492 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16493 %{
16494   predicate(n->as_Vector()->length() == 2);
16495   match(Set dst (SubVD src1 src2));
16496   ins_cost(INSN_COST);
16497   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16498   ins_encode %{
16499     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16500             as_FloatRegister($src1$$reg),
16501             as_FloatRegister($src2$$reg));
16502   %}
16503   ins_pipe(vdop_fp128);
16504 %}
16505 
16506 // --------------------------------- MUL --------------------------------------
16507 
16508 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16509 %{
16510   predicate(n->as_Vector()->length() == 2 ||
16511             n->as_Vector()->length() == 4);
16512   match(Set dst (MulVS src1 src2));
16513   ins_cost(INSN_COST);
16514   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16515   ins_encode %{
16516     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16517             as_FloatRegister($src1$$reg),
16518             as_FloatRegister($src2$$reg));
16519   %}
16520   ins_pipe(vmul64);
16521 %}
16522 
16523 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16524 %{
16525   predicate(n->as_Vector()->length() == 8);
16526   match(Set dst (MulVS src1 src2));
16527   ins_cost(INSN_COST);
16528   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16529   ins_encode %{
16530     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16531             as_FloatRegister($src1$$reg),
16532             as_FloatRegister($src2$$reg));
16533   %}
16534   ins_pipe(vmul128);
16535 %}
16536 
16537 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16538 %{
16539   predicate(n->as_Vector()->length() == 2);
16540   match(Set dst (MulVI src1 src2));
16541   ins_cost(INSN_COST);
16542   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16543   ins_encode %{
16544     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16545             as_FloatRegister($src1$$reg),
16546             as_FloatRegister($src2$$reg));
16547   %}
16548   ins_pipe(vmul64);
16549 %}
16550 
16551 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16552 %{
16553   predicate(n->as_Vector()->length() == 4);
16554   match(Set dst (MulVI src1 src2));
16555   ins_cost(INSN_COST);
16556   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16557   ins_encode %{
16558     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16559             as_FloatRegister($src1$$reg),
16560             as_FloatRegister($src2$$reg));
16561   %}
16562   ins_pipe(vmul128);
16563 %}
16564 
16565 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16566 %{
16567   predicate(n->as_Vector()->length() == 2);
16568   match(Set dst (MulVF src1 src2));
16569   ins_cost(INSN_COST);
16570   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16571   ins_encode %{
16572     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16573             as_FloatRegister($src1$$reg),
16574             as_FloatRegister($src2$$reg));
16575   %}
16576   ins_pipe(vmuldiv_fp64);
16577 %}
16578 
16579 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16580 %{
16581   predicate(n->as_Vector()->length() == 4);
16582   match(Set dst (MulVF src1 src2));
16583   ins_cost(INSN_COST);
16584   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16585   ins_encode %{
16586     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16587             as_FloatRegister($src1$$reg),
16588             as_FloatRegister($src2$$reg));
16589   %}
16590   ins_pipe(vmuldiv_fp128);
16591 %}
16592 
16593 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16594 %{
16595   predicate(n->as_Vector()->length() == 2);
16596   match(Set dst (MulVD src1 src2));
16597   ins_cost(INSN_COST);
16598   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16599   ins_encode %{
16600     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16601             as_FloatRegister($src1$$reg),
16602             as_FloatRegister($src2$$reg));
16603   %}
16604   ins_pipe(vmuldiv_fp128);
16605 %}
16606 
16607 // --------------------------------- MLA --------------------------------------
16608 
16609 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16610 %{
16611   predicate(n->as_Vector()->length() == 2 ||
16612             n->as_Vector()->length() == 4);
16613   match(Set dst (AddVS dst (MulVS src1 src2)));
16614   ins_cost(INSN_COST);
16615   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16616   ins_encode %{
16617     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16618             as_FloatRegister($src1$$reg),
16619             as_FloatRegister($src2$$reg));
16620   %}
16621   ins_pipe(vmla64);
16622 %}
16623 
16624 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16625 %{
16626   predicate(n->as_Vector()->length() == 8);
16627   match(Set dst (AddVS dst (MulVS src1 src2)));
16628   ins_cost(INSN_COST);
16629   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16630   ins_encode %{
16631     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16632             as_FloatRegister($src1$$reg),
16633             as_FloatRegister($src2$$reg));
16634   %}
16635   ins_pipe(vmla128);
16636 %}
16637 
16638 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16639 %{
16640   predicate(n->as_Vector()->length() == 2);
16641   match(Set dst (AddVI dst (MulVI src1 src2)));
16642   ins_cost(INSN_COST);
16643   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16644   ins_encode %{
16645     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16646             as_FloatRegister($src1$$reg),
16647             as_FloatRegister($src2$$reg));
16648   %}
16649   ins_pipe(vmla64);
16650 %}
16651 
16652 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16653 %{
16654   predicate(n->as_Vector()->length() == 4);
16655   match(Set dst (AddVI dst (MulVI src1 src2)));
16656   ins_cost(INSN_COST);
16657   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16658   ins_encode %{
16659     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16660             as_FloatRegister($src1$$reg),
16661             as_FloatRegister($src2$$reg));
16662   %}
16663   ins_pipe(vmla128);
16664 %}
16665 
16666 // dst + src1 * src2
16667 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16668   predicate(UseFMA && n->as_Vector()->length() == 2);
16669   match(Set dst (FmaVF  dst (Binary src1 src2)));
16670   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16671   ins_cost(INSN_COST);
16672   ins_encode %{
16673     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16674             as_FloatRegister($src1$$reg),
16675             as_FloatRegister($src2$$reg));
16676   %}
16677   ins_pipe(vmuldiv_fp64);
16678 %}
16679 
16680 // dst + src1 * src2
16681 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16682   predicate(UseFMA && n->as_Vector()->length() == 4);
16683   match(Set dst (FmaVF  dst (Binary src1 src2)));
16684   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16685   ins_cost(INSN_COST);
16686   ins_encode %{
16687     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16688             as_FloatRegister($src1$$reg),
16689             as_FloatRegister($src2$$reg));
16690   %}
16691   ins_pipe(vmuldiv_fp128);
16692 %}
16693 
16694 // dst + src1 * src2
16695 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16696   predicate(UseFMA && n->as_Vector()->length() == 2);
16697   match(Set dst (FmaVD  dst (Binary src1 src2)));
16698   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16699   ins_cost(INSN_COST);
16700   ins_encode %{
16701     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16702             as_FloatRegister($src1$$reg),
16703             as_FloatRegister($src2$$reg));
16704   %}
16705   ins_pipe(vmuldiv_fp128);
16706 %}
16707 
16708 // --------------------------------- MLS --------------------------------------
16709 
16710 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16711 %{
16712   predicate(n->as_Vector()->length() == 2 ||
16713             n->as_Vector()->length() == 4);
16714   match(Set dst (SubVS dst (MulVS src1 src2)));
16715   ins_cost(INSN_COST);
16716   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16717   ins_encode %{
16718     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16719             as_FloatRegister($src1$$reg),
16720             as_FloatRegister($src2$$reg));
16721   %}
16722   ins_pipe(vmla64);
16723 %}
16724 
16725 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16726 %{
16727   predicate(n->as_Vector()->length() == 8);
16728   match(Set dst (SubVS dst (MulVS src1 src2)));
16729   ins_cost(INSN_COST);
16730   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16731   ins_encode %{
16732     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16733             as_FloatRegister($src1$$reg),
16734             as_FloatRegister($src2$$reg));
16735   %}
16736   ins_pipe(vmla128);
16737 %}
16738 
16739 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16740 %{
16741   predicate(n->as_Vector()->length() == 2);
16742   match(Set dst (SubVI dst (MulVI src1 src2)));
16743   ins_cost(INSN_COST);
16744   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16745   ins_encode %{
16746     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16747             as_FloatRegister($src1$$reg),
16748             as_FloatRegister($src2$$reg));
16749   %}
16750   ins_pipe(vmla64);
16751 %}
16752 
16753 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16754 %{
16755   predicate(n->as_Vector()->length() == 4);
16756   match(Set dst (SubVI dst (MulVI src1 src2)));
16757   ins_cost(INSN_COST);
16758   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16759   ins_encode %{
16760     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16761             as_FloatRegister($src1$$reg),
16762             as_FloatRegister($src2$$reg));
16763   %}
16764   ins_pipe(vmla128);
16765 %}
16766 
16767 // dst - src1 * src2
16768 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16769   predicate(UseFMA && n->as_Vector()->length() == 2);
16770   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16771   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16772   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16773   ins_cost(INSN_COST);
16774   ins_encode %{
16775     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16776             as_FloatRegister($src1$$reg),
16777             as_FloatRegister($src2$$reg));
16778   %}
16779   ins_pipe(vmuldiv_fp64);
16780 %}
16781 
16782 // dst - src1 * src2
16783 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16784   predicate(UseFMA && n->as_Vector()->length() == 4);
16785   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16786   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16787   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16788   ins_cost(INSN_COST);
16789   ins_encode %{
16790     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16791             as_FloatRegister($src1$$reg),
16792             as_FloatRegister($src2$$reg));
16793   %}
16794   ins_pipe(vmuldiv_fp128);
16795 %}
16796 
16797 // dst - src1 * src2
16798 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16799   predicate(UseFMA && n->as_Vector()->length() == 2);
16800   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16801   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16802   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16803   ins_cost(INSN_COST);
16804   ins_encode %{
16805     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16806             as_FloatRegister($src1$$reg),
16807             as_FloatRegister($src2$$reg));
16808   %}
16809   ins_pipe(vmuldiv_fp128);
16810 %}
16811 
16812 // --------------------------------- DIV --------------------------------------
16813 
16814 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16815 %{
16816   predicate(n->as_Vector()->length() == 2);
16817   match(Set dst (DivVF src1 src2));
16818   ins_cost(INSN_COST);
16819   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16820   ins_encode %{
16821     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16822             as_FloatRegister($src1$$reg),
16823             as_FloatRegister($src2$$reg));
16824   %}
16825   ins_pipe(vmuldiv_fp64);
16826 %}
16827 
16828 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16829 %{
16830   predicate(n->as_Vector()->length() == 4);
16831   match(Set dst (DivVF src1 src2));
16832   ins_cost(INSN_COST);
16833   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16834   ins_encode %{
16835     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16836             as_FloatRegister($src1$$reg),
16837             as_FloatRegister($src2$$reg));
16838   %}
16839   ins_pipe(vmuldiv_fp128);
16840 %}
16841 
16842 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16843 %{
16844   predicate(n->as_Vector()->length() == 2);
16845   match(Set dst (DivVD src1 src2));
16846   ins_cost(INSN_COST);
16847   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16848   ins_encode %{
16849     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16850             as_FloatRegister($src1$$reg),
16851             as_FloatRegister($src2$$reg));
16852   %}
16853   ins_pipe(vmuldiv_fp128);
16854 %}
16855 
16856 // --------------------------------- SQRT -------------------------------------
16857 
16858 instruct vsqrt2D(vecX dst, vecX src)
16859 %{
16860   predicate(n->as_Vector()->length() == 2);
16861   match(Set dst (SqrtVD src));
16862   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16863   ins_encode %{
16864     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16865              as_FloatRegister($src$$reg));
16866   %}
16867   ins_pipe(vsqrt_fp128);
16868 %}
16869 
16870 // --------------------------------- ABS --------------------------------------
16871 
16872 instruct vabs2F(vecD dst, vecD src)
16873 %{
16874   predicate(n->as_Vector()->length() == 2);
16875   match(Set dst (AbsVF src));
16876   ins_cost(INSN_COST * 3);
16877   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16878   ins_encode %{
16879     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16880             as_FloatRegister($src$$reg));
16881   %}
16882   ins_pipe(vunop_fp64);
16883 %}
16884 
16885 instruct vabs4F(vecX dst, vecX src)
16886 %{
16887   predicate(n->as_Vector()->length() == 4);
16888   match(Set dst (AbsVF src));
16889   ins_cost(INSN_COST * 3);
16890   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16891   ins_encode %{
16892     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16893             as_FloatRegister($src$$reg));
16894   %}
16895   ins_pipe(vunop_fp128);
16896 %}
16897 
16898 instruct vabs2D(vecX dst, vecX src)
16899 %{
16900   predicate(n->as_Vector()->length() == 2);
16901   match(Set dst (AbsVD src));
16902   ins_cost(INSN_COST * 3);
16903   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16904   ins_encode %{
16905     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16906             as_FloatRegister($src$$reg));
16907   %}
16908   ins_pipe(vunop_fp128);
16909 %}
16910 
16911 // --------------------------------- NEG --------------------------------------
16912 
16913 instruct vneg2F(vecD dst, vecD src)
16914 %{
16915   predicate(n->as_Vector()->length() == 2);
16916   match(Set dst (NegVF src));
16917   ins_cost(INSN_COST * 3);
16918   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16919   ins_encode %{
16920     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16921             as_FloatRegister($src$$reg));
16922   %}
16923   ins_pipe(vunop_fp64);
16924 %}
16925 
16926 instruct vneg4F(vecX dst, vecX src)
16927 %{
16928   predicate(n->as_Vector()->length() == 4);
16929   match(Set dst (NegVF src));
16930   ins_cost(INSN_COST * 3);
16931   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16932   ins_encode %{
16933     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16934             as_FloatRegister($src$$reg));
16935   %}
16936   ins_pipe(vunop_fp128);
16937 %}
16938 
16939 instruct vneg2D(vecX dst, vecX src)
16940 %{
16941   predicate(n->as_Vector()->length() == 2);
16942   match(Set dst (NegVD src));
16943   ins_cost(INSN_COST * 3);
16944   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16945   ins_encode %{
16946     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16947             as_FloatRegister($src$$reg));
16948   %}
16949   ins_pipe(vunop_fp128);
16950 %}
16951 
16952 // --------------------------------- AND --------------------------------------
16953 
16954 instruct vand8B(vecD dst, vecD src1, vecD src2)
16955 %{
16956   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16957             n->as_Vector()->length_in_bytes() == 8);
16958   match(Set dst (AndV src1 src2));
16959   ins_cost(INSN_COST);
16960   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16961   ins_encode %{
16962     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16963             as_FloatRegister($src1$$reg),
16964             as_FloatRegister($src2$$reg));
16965   %}
16966   ins_pipe(vlogical64);
16967 %}
16968 
16969 instruct vand16B(vecX dst, vecX src1, vecX src2)
16970 %{
16971   predicate(n->as_Vector()->length_in_bytes() == 16);
16972   match(Set dst (AndV src1 src2));
16973   ins_cost(INSN_COST);
16974   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16975   ins_encode %{
16976     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16977             as_FloatRegister($src1$$reg),
16978             as_FloatRegister($src2$$reg));
16979   %}
16980   ins_pipe(vlogical128);
16981 %}
16982 
16983 // --------------------------------- OR ---------------------------------------
16984 
16985 instruct vor8B(vecD dst, vecD src1, vecD src2)
16986 %{
16987   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16988             n->as_Vector()->length_in_bytes() == 8);
16989   match(Set dst (OrV src1 src2));
16990   ins_cost(INSN_COST);
16991   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16992   ins_encode %{
16993     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16994             as_FloatRegister($src1$$reg),
16995             as_FloatRegister($src2$$reg));
16996   %}
16997   ins_pipe(vlogical64);
16998 %}
16999 
17000 instruct vor16B(vecX dst, vecX src1, vecX src2)
17001 %{
17002   predicate(n->as_Vector()->length_in_bytes() == 16);
17003   match(Set dst (OrV src1 src2));
17004   ins_cost(INSN_COST);
17005   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17006   ins_encode %{
17007     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17008             as_FloatRegister($src1$$reg),
17009             as_FloatRegister($src2$$reg));
17010   %}
17011   ins_pipe(vlogical128);
17012 %}
17013 
17014 // --------------------------------- XOR --------------------------------------
17015 
17016 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17017 %{
17018   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17019             n->as_Vector()->length_in_bytes() == 8);
17020   match(Set dst (XorV src1 src2));
17021   ins_cost(INSN_COST);
17022   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17023   ins_encode %{
17024     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17025             as_FloatRegister($src1$$reg),
17026             as_FloatRegister($src2$$reg));
17027   %}
17028   ins_pipe(vlogical64);
17029 %}
17030 
17031 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17032 %{
17033   predicate(n->as_Vector()->length_in_bytes() == 16);
17034   match(Set dst (XorV src1 src2));
17035   ins_cost(INSN_COST);
17036   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17037   ins_encode %{
17038     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17039             as_FloatRegister($src1$$reg),
17040             as_FloatRegister($src2$$reg));
17041   %}
17042   ins_pipe(vlogical128);
17043 %}
17044 
17045 // ------------------------------ Shift ---------------------------------------
17046 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
17047   predicate(n->as_Vector()->length_in_bytes() == 8);
17048   match(Set dst (LShiftCntV cnt));
17049   match(Set dst (RShiftCntV cnt));
17050   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
17051   ins_encode %{
17052     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
17053   %}
17054   ins_pipe(vdup_reg_reg64);
17055 %}
17056 
17057 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
17058   predicate(n->as_Vector()->length_in_bytes() == 16);
17059   match(Set dst (LShiftCntV cnt));
17060   match(Set dst (RShiftCntV cnt));
17061   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
17062   ins_encode %{
17063     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17064   %}
17065   ins_pipe(vdup_reg_reg128);
17066 %}
17067 
17068 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
17069   predicate(n->as_Vector()->length() == 4 ||
17070             n->as_Vector()->length() == 8);
17071   match(Set dst (LShiftVB src shift));
17072   ins_cost(INSN_COST);
17073   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17074   ins_encode %{
17075     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17076             as_FloatRegister($src$$reg),
17077             as_FloatRegister($shift$$reg));
17078   %}
17079   ins_pipe(vshift64);
17080 %}
17081 
17082 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17083   predicate(n->as_Vector()->length() == 16);
17084   match(Set dst (LShiftVB src shift));
17085   ins_cost(INSN_COST);
17086   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17087   ins_encode %{
17088     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17089             as_FloatRegister($src$$reg),
17090             as_FloatRegister($shift$$reg));
17091   %}
17092   ins_pipe(vshift128);
17093 %}
17094 
17095 // Right shifts with vector shift count on aarch64 SIMD are implemented
17096 // as left shift by negative shift count.
17097 // There are two cases for vector shift count.
17098 //
17099 // Case 1: The vector shift count is from replication.
17100 //        |            |
17101 //    LoadVector  RShiftCntV
17102 //        |       /
17103 //     RShiftVI
17104 // Note: In inner loop, multiple neg instructions are used, which can be
17105 // moved to outer loop and merge into one neg instruction.
17106 //
17107 // Case 2: The vector shift count is from loading.
17108 // This case isn't supported by middle-end now. But it's supported by
17109 // panama/vectorIntrinsics(JEP 338: Vector API).
17110 //        |            |
17111 //    LoadVector  LoadVector
17112 //        |       /
17113 //     RShiftVI
17114 //
17115 
17116 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17117   predicate(n->as_Vector()->length() == 4 ||
17118             n->as_Vector()->length() == 8);
17119   match(Set dst (RShiftVB src shift));
17120   ins_cost(INSN_COST);
17121   effect(TEMP tmp);
17122   format %{ "negr  $tmp,$shift\t"
17123             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
17124   ins_encode %{
17125     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17126             as_FloatRegister($shift$$reg));
17127     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17128             as_FloatRegister($src$$reg),
17129             as_FloatRegister($tmp$$reg));
17130   %}
17131   ins_pipe(vshift64);
17132 %}
17133 
17134 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17135   predicate(n->as_Vector()->length() == 16);
17136   match(Set dst (RShiftVB src shift));
17137   ins_cost(INSN_COST);
17138   effect(TEMP tmp);
17139   format %{ "negr  $tmp,$shift\t"
17140             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
17141   ins_encode %{
17142     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17143             as_FloatRegister($shift$$reg));
17144     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17145             as_FloatRegister($src$$reg),
17146             as_FloatRegister($tmp$$reg));
17147   %}
17148   ins_pipe(vshift128);
17149 %}
17150 
17151 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
17152   predicate(n->as_Vector()->length() == 4 ||
17153             n->as_Vector()->length() == 8);
17154   match(Set dst (URShiftVB src shift));
17155   ins_cost(INSN_COST);
17156   effect(TEMP tmp);
17157   format %{ "negr  $tmp,$shift\t"
17158             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
17159   ins_encode %{
17160     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17161             as_FloatRegister($shift$$reg));
17162     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17163             as_FloatRegister($src$$reg),
17164             as_FloatRegister($tmp$$reg));
17165   %}
17166   ins_pipe(vshift64);
17167 %}
17168 
17169 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
17170   predicate(n->as_Vector()->length() == 16);
17171   match(Set dst (URShiftVB src shift));
17172   ins_cost(INSN_COST);
17173   effect(TEMP tmp);
17174   format %{ "negr  $tmp,$shift\t"
17175             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
17176   ins_encode %{
17177     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17178             as_FloatRegister($shift$$reg));
17179     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17180             as_FloatRegister($src$$reg),
17181             as_FloatRegister($tmp$$reg));
17182   %}
17183   ins_pipe(vshift128);
17184 %}
17185 
17186 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17187   predicate(n->as_Vector()->length() == 4 ||
17188             n->as_Vector()->length() == 8);
17189   match(Set dst (LShiftVB src shift));
17190   ins_cost(INSN_COST);
17191   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17192   ins_encode %{
17193     int sh = (int)$shift$$constant;
17194     if (sh >= 8) {
17195       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17196              as_FloatRegister($src$$reg),
17197              as_FloatRegister($src$$reg));
17198     } else {
17199       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17200              as_FloatRegister($src$$reg), sh);
17201     }
17202   %}
17203   ins_pipe(vshift64_imm);
17204 %}
17205 
17206 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17207   predicate(n->as_Vector()->length() == 16);
17208   match(Set dst (LShiftVB src shift));
17209   ins_cost(INSN_COST);
17210   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17211   ins_encode %{
17212     int sh = (int)$shift$$constant;
17213     if (sh >= 8) {
17214       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17215              as_FloatRegister($src$$reg),
17216              as_FloatRegister($src$$reg));
17217     } else {
17218       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17219              as_FloatRegister($src$$reg), sh);
17220     }
17221   %}
17222   ins_pipe(vshift128_imm);
17223 %}
17224 
17225 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17226   predicate(n->as_Vector()->length() == 4 ||
17227             n->as_Vector()->length() == 8);
17228   match(Set dst (RShiftVB src shift));
17229   ins_cost(INSN_COST);
17230   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17231   ins_encode %{
17232     int sh = (int)$shift$$constant;
17233     if (sh >= 8) sh = 7;
17234     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17235            as_FloatRegister($src$$reg), sh);
17236   %}
17237   ins_pipe(vshift64_imm);
17238 %}
17239 
17240 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17241   predicate(n->as_Vector()->length() == 16);
17242   match(Set dst (RShiftVB src shift));
17243   ins_cost(INSN_COST);
17244   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17245   ins_encode %{
17246     int sh = (int)$shift$$constant;
17247     if (sh >= 8) sh = 7;
17248     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17249            as_FloatRegister($src$$reg), sh);
17250   %}
17251   ins_pipe(vshift128_imm);
17252 %}
17253 
17254 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17255   predicate(n->as_Vector()->length() == 4 ||
17256             n->as_Vector()->length() == 8);
17257   match(Set dst (URShiftVB src shift));
17258   ins_cost(INSN_COST);
17259   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17260   ins_encode %{
17261     int sh = (int)$shift$$constant;
17262     if (sh >= 8) {
17263       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17264              as_FloatRegister($src$$reg),
17265              as_FloatRegister($src$$reg));
17266     } else {
17267       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17268              as_FloatRegister($src$$reg), sh);
17269     }
17270   %}
17271   ins_pipe(vshift64_imm);
17272 %}
17273 
17274 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17275   predicate(n->as_Vector()->length() == 16);
17276   match(Set dst (URShiftVB src shift));
17277   ins_cost(INSN_COST);
17278   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17279   ins_encode %{
17280     int sh = (int)$shift$$constant;
17281     if (sh >= 8) {
17282       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17283              as_FloatRegister($src$$reg),
17284              as_FloatRegister($src$$reg));
17285     } else {
17286       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17287              as_FloatRegister($src$$reg), sh);
17288     }
17289   %}
17290   ins_pipe(vshift128_imm);
17291 %}
17292 
17293 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
17294   predicate(n->as_Vector()->length() == 2 ||
17295             n->as_Vector()->length() == 4);
17296   match(Set dst (LShiftVS src shift));
17297   ins_cost(INSN_COST);
17298   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17299   ins_encode %{
17300     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17301             as_FloatRegister($src$$reg),
17302             as_FloatRegister($shift$$reg));
17303   %}
17304   ins_pipe(vshift64);
17305 %}
17306 
17307 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17308   predicate(n->as_Vector()->length() == 8);
17309   match(Set dst (LShiftVS src shift));
17310   ins_cost(INSN_COST);
17311   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17312   ins_encode %{
17313     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17314             as_FloatRegister($src$$reg),
17315             as_FloatRegister($shift$$reg));
17316   %}
17317   ins_pipe(vshift128);
17318 %}
17319 
17320 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17321   predicate(n->as_Vector()->length() == 2 ||
17322             n->as_Vector()->length() == 4);
17323   match(Set dst (RShiftVS src shift));
17324   ins_cost(INSN_COST);
17325   effect(TEMP tmp);
17326   format %{ "negr  $tmp,$shift\t"
17327             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
17328   ins_encode %{
17329     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17330             as_FloatRegister($shift$$reg));
17331     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17332             as_FloatRegister($src$$reg),
17333             as_FloatRegister($tmp$$reg));
17334   %}
17335   ins_pipe(vshift64);
17336 %}
17337 
17338 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17339   predicate(n->as_Vector()->length() == 8);
17340   match(Set dst (RShiftVS src shift));
17341   ins_cost(INSN_COST);
17342   effect(TEMP tmp);
17343   format %{ "negr  $tmp,$shift\t"
17344             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
17345   ins_encode %{
17346     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17347             as_FloatRegister($shift$$reg));
17348     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17349             as_FloatRegister($src$$reg),
17350             as_FloatRegister($tmp$$reg));
17351   %}
17352   ins_pipe(vshift128);
17353 %}
17354 
17355 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
17356   predicate(n->as_Vector()->length() == 2 ||
17357             n->as_Vector()->length() == 4);
17358   match(Set dst (URShiftVS src shift));
17359   ins_cost(INSN_COST);
17360   effect(TEMP tmp);
17361   format %{ "negr  $tmp,$shift\t"
17362             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
17363   ins_encode %{
17364     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17365             as_FloatRegister($shift$$reg));
17366     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17367             as_FloatRegister($src$$reg),
17368             as_FloatRegister($tmp$$reg));
17369   %}
17370   ins_pipe(vshift64);
17371 %}
17372 
17373 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
17374   predicate(n->as_Vector()->length() == 8);
17375   match(Set dst (URShiftVS src shift));
17376   ins_cost(INSN_COST);
17377   effect(TEMP tmp);
17378   format %{ "negr  $tmp,$shift\t"
17379             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
17380   ins_encode %{
17381     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17382             as_FloatRegister($shift$$reg));
17383     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17384             as_FloatRegister($src$$reg),
17385             as_FloatRegister($tmp$$reg));
17386   %}
17387   ins_pipe(vshift128);
17388 %}
17389 
17390 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17391   predicate(n->as_Vector()->length() == 2 ||
17392             n->as_Vector()->length() == 4);
17393   match(Set dst (LShiftVS src shift));
17394   ins_cost(INSN_COST);
17395   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17396   ins_encode %{
17397     int sh = (int)$shift$$constant;
17398     if (sh >= 16) {
17399       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17400              as_FloatRegister($src$$reg),
17401              as_FloatRegister($src$$reg));
17402     } else {
17403       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17404              as_FloatRegister($src$$reg), sh);
17405     }
17406   %}
17407   ins_pipe(vshift64_imm);
17408 %}
17409 
17410 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17411   predicate(n->as_Vector()->length() == 8);
17412   match(Set dst (LShiftVS src shift));
17413   ins_cost(INSN_COST);
17414   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17415   ins_encode %{
17416     int sh = (int)$shift$$constant;
17417     if (sh >= 16) {
17418       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17419              as_FloatRegister($src$$reg),
17420              as_FloatRegister($src$$reg));
17421     } else {
17422       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17423              as_FloatRegister($src$$reg), sh);
17424     }
17425   %}
17426   ins_pipe(vshift128_imm);
17427 %}
17428 
17429 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17430   predicate(n->as_Vector()->length() == 2 ||
17431             n->as_Vector()->length() == 4);
17432   match(Set dst (RShiftVS src shift));
17433   ins_cost(INSN_COST);
17434   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17435   ins_encode %{
17436     int sh = (int)$shift$$constant;
17437     if (sh >= 16) sh = 15;
17438     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17439            as_FloatRegister($src$$reg), sh);
17440   %}
17441   ins_pipe(vshift64_imm);
17442 %}
17443 
17444 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17445   predicate(n->as_Vector()->length() == 8);
17446   match(Set dst (RShiftVS src shift));
17447   ins_cost(INSN_COST);
17448   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17449   ins_encode %{
17450     int sh = (int)$shift$$constant;
17451     if (sh >= 16) sh = 15;
17452     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17453            as_FloatRegister($src$$reg), sh);
17454   %}
17455   ins_pipe(vshift128_imm);
17456 %}
17457 
17458 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17459   predicate(n->as_Vector()->length() == 2 ||
17460             n->as_Vector()->length() == 4);
17461   match(Set dst (URShiftVS src shift));
17462   ins_cost(INSN_COST);
17463   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17464   ins_encode %{
17465     int sh = (int)$shift$$constant;
17466     if (sh >= 16) {
17467       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17468              as_FloatRegister($src$$reg),
17469              as_FloatRegister($src$$reg));
17470     } else {
17471       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17472              as_FloatRegister($src$$reg), sh);
17473     }
17474   %}
17475   ins_pipe(vshift64_imm);
17476 %}
17477 
17478 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17479   predicate(n->as_Vector()->length() == 8);
17480   match(Set dst (URShiftVS src shift));
17481   ins_cost(INSN_COST);
17482   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17483   ins_encode %{
17484     int sh = (int)$shift$$constant;
17485     if (sh >= 16) {
17486       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17487              as_FloatRegister($src$$reg),
17488              as_FloatRegister($src$$reg));
17489     } else {
17490       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17491              as_FloatRegister($src$$reg), sh);
17492     }
17493   %}
17494   ins_pipe(vshift128_imm);
17495 %}
17496 
17497 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
17498   predicate(n->as_Vector()->length() == 2);
17499   match(Set dst (LShiftVI src shift));
17500   ins_cost(INSN_COST);
17501   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17502   ins_encode %{
17503     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17504             as_FloatRegister($src$$reg),
17505             as_FloatRegister($shift$$reg));
17506   %}
17507   ins_pipe(vshift64);
17508 %}
17509 
17510 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17511   predicate(n->as_Vector()->length() == 4);
17512   match(Set dst (LShiftVI src shift));
17513   ins_cost(INSN_COST);
17514   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17515   ins_encode %{
17516     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17517             as_FloatRegister($src$$reg),
17518             as_FloatRegister($shift$$reg));
17519   %}
17520   ins_pipe(vshift128);
17521 %}
17522 
17523 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17524   predicate(n->as_Vector()->length() == 2);
17525   match(Set dst (RShiftVI src shift));
17526   ins_cost(INSN_COST);
17527   effect(TEMP tmp);
17528   format %{ "negr  $tmp,$shift\t"
17529             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
17530   ins_encode %{
17531     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17532             as_FloatRegister($shift$$reg));
17533     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17534             as_FloatRegister($src$$reg),
17535             as_FloatRegister($tmp$$reg));
17536   %}
17537   ins_pipe(vshift64);
17538 %}
17539 
17540 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17541   predicate(n->as_Vector()->length() == 4);
17542   match(Set dst (RShiftVI src shift));
17543   ins_cost(INSN_COST);
17544   effect(TEMP tmp);
17545   format %{ "negr  $tmp,$shift\t"
17546             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
17547   ins_encode %{
17548     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17549             as_FloatRegister($shift$$reg));
17550     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17551             as_FloatRegister($src$$reg),
17552             as_FloatRegister($tmp$$reg));
17553   %}
17554   ins_pipe(vshift128);
17555 %}
17556 
17557 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17558   predicate(n->as_Vector()->length() == 2);
17559   match(Set dst (URShiftVI src shift));
17560   ins_cost(INSN_COST);
17561   effect(TEMP tmp);
17562   format %{ "negr  $tmp,$shift\t"
17563             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
17564   ins_encode %{
17565     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17566             as_FloatRegister($shift$$reg));
17567     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17568             as_FloatRegister($src$$reg),
17569             as_FloatRegister($tmp$$reg));
17570   %}
17571   ins_pipe(vshift64);
17572 %}
17573 
17574 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17575   predicate(n->as_Vector()->length() == 4);
17576   match(Set dst (URShiftVI src shift));
17577   ins_cost(INSN_COST);
17578   effect(TEMP tmp);
17579   format %{ "negr  $tmp,$shift\t"
17580             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
17581   ins_encode %{
17582     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17583             as_FloatRegister($shift$$reg));
17584     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17585             as_FloatRegister($src$$reg),
17586             as_FloatRegister($tmp$$reg));
17587   %}
17588   ins_pipe(vshift128);
17589 %}
17590 
17591 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17592   predicate(n->as_Vector()->length() == 2);
17593   match(Set dst (LShiftVI src shift));
17594   ins_cost(INSN_COST);
17595   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17596   ins_encode %{
17597     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17598            as_FloatRegister($src$$reg),
17599            (int)$shift$$constant);
17600   %}
17601   ins_pipe(vshift64_imm);
17602 %}
17603 
17604 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17605   predicate(n->as_Vector()->length() == 4);
17606   match(Set dst (LShiftVI src shift));
17607   ins_cost(INSN_COST);
17608   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17609   ins_encode %{
17610     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17611            as_FloatRegister($src$$reg),
17612            (int)$shift$$constant);
17613   %}
17614   ins_pipe(vshift128_imm);
17615 %}
17616 
17617 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17618   predicate(n->as_Vector()->length() == 2);
17619   match(Set dst (RShiftVI src shift));
17620   ins_cost(INSN_COST);
17621   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17622   ins_encode %{
17623     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17624             as_FloatRegister($src$$reg),
17625             (int)$shift$$constant);
17626   %}
17627   ins_pipe(vshift64_imm);
17628 %}
17629 
17630 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17631   predicate(n->as_Vector()->length() == 4);
17632   match(Set dst (RShiftVI src shift));
17633   ins_cost(INSN_COST);
17634   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17635   ins_encode %{
17636     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17637             as_FloatRegister($src$$reg),
17638             (int)$shift$$constant);
17639   %}
17640   ins_pipe(vshift128_imm);
17641 %}
17642 
17643 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17644   predicate(n->as_Vector()->length() == 2);
17645   match(Set dst (URShiftVI src shift));
17646   ins_cost(INSN_COST);
17647   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17648   ins_encode %{
17649     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17650             as_FloatRegister($src$$reg),
17651             (int)$shift$$constant);
17652   %}
17653   ins_pipe(vshift64_imm);
17654 %}
17655 
17656 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17657   predicate(n->as_Vector()->length() == 4);
17658   match(Set dst (URShiftVI src shift));
17659   ins_cost(INSN_COST);
17660   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17661   ins_encode %{
17662     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17663             as_FloatRegister($src$$reg),
17664             (int)$shift$$constant);
17665   %}
17666   ins_pipe(vshift128_imm);
17667 %}
17668 
17669 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17670   predicate(n->as_Vector()->length() == 2);
17671   match(Set dst (LShiftVL src shift));
17672   ins_cost(INSN_COST);
17673   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17674   ins_encode %{
17675     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17676             as_FloatRegister($src$$reg),
17677             as_FloatRegister($shift$$reg));
17678   %}
17679   ins_pipe(vshift128);
17680 %}
17681 
17682 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17683   predicate(n->as_Vector()->length() == 2);
17684   match(Set dst (RShiftVL src shift));
17685   ins_cost(INSN_COST);
17686   effect(TEMP tmp);
17687   format %{ "negr  $tmp,$shift\t"
17688             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
17689   ins_encode %{
17690     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17691             as_FloatRegister($shift$$reg));
17692     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17693             as_FloatRegister($src$$reg),
17694             as_FloatRegister($tmp$$reg));
17695   %}
17696   ins_pipe(vshift128);
17697 %}
17698 
17699 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17700   predicate(n->as_Vector()->length() == 2);
17701   match(Set dst (URShiftVL src shift));
17702   ins_cost(INSN_COST);
17703   effect(TEMP tmp);
17704   format %{ "negr  $tmp,$shift\t"
17705             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
17706   ins_encode %{
17707     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17708             as_FloatRegister($shift$$reg));
17709     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17710             as_FloatRegister($src$$reg),
17711             as_FloatRegister($tmp$$reg));
17712   %}
17713   ins_pipe(vshift128);
17714 %}
17715 
17716 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17717   predicate(n->as_Vector()->length() == 2);
17718   match(Set dst (LShiftVL src shift));
17719   ins_cost(INSN_COST);
17720   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17721   ins_encode %{
17722     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17723            as_FloatRegister($src$$reg),
17724            (int)$shift$$constant);
17725   %}
17726   ins_pipe(vshift128_imm);
17727 %}
17728 
17729 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17730   predicate(n->as_Vector()->length() == 2);
17731   match(Set dst (RShiftVL src shift));
17732   ins_cost(INSN_COST);
17733   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17734   ins_encode %{
17735     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17736             as_FloatRegister($src$$reg),
17737             (int)$shift$$constant);
17738   %}
17739   ins_pipe(vshift128_imm);
17740 %}
17741 
17742 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17743   predicate(n->as_Vector()->length() == 2);
17744   match(Set dst (URShiftVL src shift));
17745   ins_cost(INSN_COST);
17746   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17747   ins_encode %{
17748     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17749             as_FloatRegister($src$$reg),
17750             (int)$shift$$constant);
17751   %}
17752   ins_pipe(vshift128_imm);
17753 %}
17754 
17755 instruct vmax2F(vecD dst, vecD src1, vecD src2)
17756 %{
17757   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17758   match(Set dst (MaxV src1 src2));
17759   ins_cost(INSN_COST);
17760   format %{ "fmax  $dst,$src1,$src2\t# vector (2F)" %}
17761   ins_encode %{
17762     __ fmax(as_FloatRegister($dst$$reg), __ T2S,
17763             as_FloatRegister($src1$$reg),
17764             as_FloatRegister($src2$$reg));
17765   %}
17766   ins_pipe(vdop_fp64);
17767 %}
17768 
17769 instruct vmax4F(vecX dst, vecX src1, vecX src2)
17770 %{
17771   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17772   match(Set dst (MaxV src1 src2));
17773   ins_cost(INSN_COST);
17774   format %{ "fmax  $dst,$src1,$src2\t# vector (4S)" %}
17775   ins_encode %{
17776     __ fmax(as_FloatRegister($dst$$reg), __ T4S,
17777             as_FloatRegister($src1$$reg),
17778             as_FloatRegister($src2$$reg));
17779   %}
17780   ins_pipe(vdop_fp128);
17781 %}
17782 
17783 instruct vmax2D(vecX dst, vecX src1, vecX src2)
17784 %{
17785   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17786   match(Set dst (MaxV src1 src2));
17787   ins_cost(INSN_COST);
17788   format %{ "fmax  $dst,$src1,$src2\t# vector (2D)" %}
17789   ins_encode %{
17790     __ fmax(as_FloatRegister($dst$$reg), __ T2D,
17791             as_FloatRegister($src1$$reg),
17792             as_FloatRegister($src2$$reg));
17793   %}
17794   ins_pipe(vdop_fp128);
17795 %}
17796 
17797 instruct vmin2F(vecD dst, vecD src1, vecD src2)
17798 %{
17799   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17800   match(Set dst (MinV src1 src2));
17801   ins_cost(INSN_COST);
17802   format %{ "fmin  $dst,$src1,$src2\t# vector (2F)" %}
17803   ins_encode %{
17804     __ fmin(as_FloatRegister($dst$$reg), __ T2S,
17805             as_FloatRegister($src1$$reg),
17806             as_FloatRegister($src2$$reg));
17807   %}
17808   ins_pipe(vdop_fp64);
17809 %}
17810 
17811 instruct vmin4F(vecX dst, vecX src1, vecX src2)
17812 %{
17813   predicate(n->as_Vector()->length() == 4 && n->bottom_type()->is_vect()->element_basic_type() == T_FLOAT);
17814   match(Set dst (MinV src1 src2));
17815   ins_cost(INSN_COST);
17816   format %{ "fmin  $dst,$src1,$src2\t# vector (4S)" %}
17817   ins_encode %{
17818     __ fmin(as_FloatRegister($dst$$reg), __ T4S,
17819             as_FloatRegister($src1$$reg),
17820             as_FloatRegister($src2$$reg));
17821   %}
17822   ins_pipe(vdop_fp128);
17823 %}
17824 
17825 instruct vmin2D(vecX dst, vecX src1, vecX src2)
17826 %{
17827   predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
17828   match(Set dst (MinV src1 src2));
17829   ins_cost(INSN_COST);
17830   format %{ "fmin  $dst,$src1,$src2\t# vector (2D)" %}
17831   ins_encode %{
17832     __ fmin(as_FloatRegister($dst$$reg), __ T2D,
17833             as_FloatRegister($src1$$reg),
17834             as_FloatRegister($src2$$reg));
17835   %}
17836   ins_pipe(vdop_fp128);
17837 %}
17838 
17839 //----------PEEPHOLE RULES-----------------------------------------------------
17840 // These must follow all instruction definitions as they use the names
17841 // defined in the instructions definitions.
17842 //
17843 // peepmatch ( root_instr_name [preceding_instruction]* );
17844 //
17845 // peepconstraint %{
17846 // (instruction_number.operand_name relational_op instruction_number.operand_name
17847 //  [, ...] );
17848 // // instruction numbers are zero-based using left to right order in peepmatch
17849 //
17850 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17851 // // provide an instruction_number.operand_name for each operand that appears
17852 // // in the replacement instruction's match rule
17853 //
17854 // ---------VM FLAGS---------------------------------------------------------
17855 //
17856 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17857 //
17858 // Each peephole rule is given an identifying number starting with zero and
17859 // increasing by one in the order seen by the parser.  An individual peephole
17860 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17861 // on the command-line.
17862 //
17863 // ---------CURRENT LIMITATIONS----------------------------------------------
17864 //
17865 // Only match adjacent instructions in same basic block
17866 // Only equality constraints
17867 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17868 // Only one replacement instruction
17869 //
17870 // ---------EXAMPLE----------------------------------------------------------
17871 //
17872 // // pertinent parts of existing instructions in architecture description
17873 // instruct movI(iRegINoSp dst, iRegI src)
17874 // %{
17875 //   match(Set dst (CopyI src));
17876 // %}
17877 //
17878 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17879 // %{
17880 //   match(Set dst (AddI dst src));
17881 //   effect(KILL cr);
17882 // %}
17883 //
17884 // // Change (inc mov) to lea
17885 // peephole %{
17886 //   // increment preceeded by register-register move
17887 //   peepmatch ( incI_iReg movI );
17888 //   // require that the destination register of the increment
17889 //   // match the destination register of the move
17890 //   peepconstraint ( 0.dst == 1.dst );
17891 //   // construct a replacement instruction that sets
17892 //   // the destination to ( move's source register + one )
17893 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17894 // %}
17895 //
17896 
17897 // Implementation no longer uses movX instructions since
17898 // machine-independent system no longer uses CopyX nodes.
17899 //
17900 // peephole
17901 // %{
17902 //   peepmatch (incI_iReg movI);
17903 //   peepconstraint (0.dst == 1.dst);
17904 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17905 // %}
17906 
17907 // peephole
17908 // %{
17909 //   peepmatch (decI_iReg movI);
17910 //   peepconstraint (0.dst == 1.dst);
17911 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17912 // %}
17913 
17914 // peephole
17915 // %{
17916 //   peepmatch (addI_iReg_imm movI);
17917 //   peepconstraint (0.dst == 1.dst);
17918 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17919 // %}
17920 
17921 // peephole
17922 // %{
17923 //   peepmatch (incL_iReg movL);
17924 //   peepconstraint (0.dst == 1.dst);
17925 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17926 // %}
17927 
17928 // peephole
17929 // %{
17930 //   peepmatch (decL_iReg movL);
17931 //   peepconstraint (0.dst == 1.dst);
17932 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17933 // %}
17934 
17935 // peephole
17936 // %{
17937 //   peepmatch (addL_iReg_imm movL);
17938 //   peepconstraint (0.dst == 1.dst);
17939 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17940 // %}
17941 
17942 // peephole
17943 // %{
17944 //   peepmatch (addP_iReg_imm movP);
17945 //   peepconstraint (0.dst == 1.dst);
17946 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17947 // %}
17948 
17949 // // Change load of spilled value to only a spill
17950 // instruct storeI(memory mem, iRegI src)
17951 // %{
17952 //   match(Set mem (StoreI mem src));
17953 // %}
17954 //
17955 // instruct loadI(iRegINoSp dst, memory mem)
17956 // %{
17957 //   match(Set dst (LoadI mem));
17958 // %}
17959 //
17960 
17961 //----------SMARTSPILL RULES---------------------------------------------------
17962 // These must follow all instruction definitions as they use the names
17963 // defined in the instructions definitions.
17964 
17965 // Local Variables:
17966 // mode: c++
17967 // End: