1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 
1004 class CallStubImpl {
1005 
1006   //--------------------------------------------------------------
1007   //---<  Used for optimization in Compile::shorten_branches  >---
1008   //--------------------------------------------------------------
1009 
1010  public:
1011   // Size of call trampoline stub.
1012   static uint size_call_trampoline() {
1013     return 0; // no call trampolines on this platform
1014   }
1015 
1016   // number of relocations needed by a call trampoline stub
1017   static uint reloc_call_trampoline() {
1018     return 0; // no call trampolines on this platform
1019   }
1020 };
1021 
1022 class HandlerImpl {
1023 
1024  public:
1025 
1026   static int emit_exception_handler(CodeBuffer &cbuf);
1027   static int emit_deopt_handler(CodeBuffer& cbuf);
1028 
1029   static uint size_exception_handler() {
1030     return MacroAssembler::far_branch_size();
1031   }
1032 
1033   static uint size_deopt_handler() {
1034     // count one adr and one far branch instruction
1035     return 4 * NativeInstruction::instruction_size;
1036   }
1037 };
1038 
1039   bool is_CAS(int opcode);
1040 
1041   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1042 
1043   bool unnecessary_acquire(const Node *barrier);
1044   bool needs_acquiring_load(const Node *load);
1045 
1046   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1047 
1048   bool unnecessary_release(const Node *barrier);
1049   bool unnecessary_volatile(const Node *barrier);
1050   bool needs_releasing_store(const Node *store);
1051 
1052   // predicate controlling translation of CompareAndSwapX
1053   bool needs_acquiring_load_exclusive(const Node *load);
1054 
1055   // predicate controlling translation of StoreCM
1056   bool unnecessary_storestore(const Node *storecm);
1057 
1058   // predicate controlling addressing modes
1059   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1060 %}
1061 
1062 source %{
1063 
1064   // Optimizaton of volatile gets and puts
1065   // -------------------------------------
1066   //
1067   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1068   // use to implement volatile reads and writes. For a volatile read
1069   // we simply need
1070   //
1071   //   ldar<x>
1072   //
1073   // and for a volatile write we need
1074   //
1075   //   stlr<x>
1076   //
1077   // Alternatively, we can implement them by pairing a normal
1078   // load/store with a memory barrier. For a volatile read we need
1079   //
1080   //   ldr<x>
1081   //   dmb ishld
1082   //
1083   // for a volatile write
1084   //
1085   //   dmb ish
1086   //   str<x>
1087   //   dmb ish
1088   //
1089   // We can also use ldaxr and stlxr to implement compare and swap CAS
1090   // sequences. These are normally translated to an instruction
1091   // sequence like the following
1092   //
1093   //   dmb      ish
1094   // retry:
1095   //   ldxr<x>   rval raddr
1096   //   cmp       rval rold
1097   //   b.ne done
1098   //   stlxr<x>  rval, rnew, rold
1099   //   cbnz      rval retry
1100   // done:
1101   //   cset      r0, eq
1102   //   dmb ishld
1103   //
1104   // Note that the exclusive store is already using an stlxr
1105   // instruction. That is required to ensure visibility to other
1106   // threads of the exclusive write (assuming it succeeds) before that
1107   // of any subsequent writes.
1108   //
1109   // The following instruction sequence is an improvement on the above
1110   //
1111   // retry:
1112   //   ldaxr<x>  rval raddr
1113   //   cmp       rval rold
1114   //   b.ne done
1115   //   stlxr<x>  rval, rnew, rold
1116   //   cbnz      rval retry
1117   // done:
1118   //   cset      r0, eq
1119   //
1120   // We don't need the leading dmb ish since the stlxr guarantees
1121   // visibility of prior writes in the case that the swap is
1122   // successful. Crucially we don't have to worry about the case where
1123   // the swap is not successful since no valid program should be
1124   // relying on visibility of prior changes by the attempting thread
1125   // in the case where the CAS fails.
1126   //
1127   // Similarly, we don't need the trailing dmb ishld if we substitute
1128   // an ldaxr instruction since that will provide all the guarantees we
1129   // require regarding observation of changes made by other threads
1130   // before any change to the CAS address observed by the load.
1131   //
1132   // In order to generate the desired instruction sequence we need to
1133   // be able to identify specific 'signature' ideal graph node
1134   // sequences which i) occur as a translation of a volatile reads or
1135   // writes or CAS operations and ii) do not occur through any other
1136   // translation or graph transformation. We can then provide
1137   // alternative aldc matching rules which translate these node
1138   // sequences to the desired machine code sequences. Selection of the
1139   // alternative rules can be implemented by predicates which identify
1140   // the relevant node sequences.
1141   //
1142   // The ideal graph generator translates a volatile read to the node
1143   // sequence
1144   //
1145   //   LoadX[mo_acquire]
1146   //   MemBarAcquire
1147   //
1148   // As a special case when using the compressed oops optimization we
1149   // may also see this variant
1150   //
1151   //   LoadN[mo_acquire]
1152   //   DecodeN
1153   //   MemBarAcquire
1154   //
1155   // A volatile write is translated to the node sequence
1156   //
1157   //   MemBarRelease
1158   //   StoreX[mo_release] {CardMark}-optional
1159   //   MemBarVolatile
1160   //
1161   // n.b. the above node patterns are generated with a strict
1162   // 'signature' configuration of input and output dependencies (see
1163   // the predicates below for exact details). The card mark may be as
1164   // simple as a few extra nodes or, in a few GC configurations, may
1165   // include more complex control flow between the leading and
1166   // trailing memory barriers. However, whatever the card mark
1167   // configuration these signatures are unique to translated volatile
1168   // reads/stores -- they will not appear as a result of any other
1169   // bytecode translation or inlining nor as a consequence of
1170   // optimizing transforms.
1171   //
1172   // We also want to catch inlined unsafe volatile gets and puts and
1173   // be able to implement them using either ldar<x>/stlr<x> or some
1174   // combination of ldr<x>/stlr<x> and dmb instructions.
1175   //
1176   // Inlined unsafe volatiles puts manifest as a minor variant of the
1177   // normal volatile put node sequence containing an extra cpuorder
1178   // membar
1179   //
1180   //   MemBarRelease
1181   //   MemBarCPUOrder
1182   //   StoreX[mo_release] {CardMark}-optional
1183   //   MemBarCPUOrder
1184   //   MemBarVolatile
1185   //
1186   // n.b. as an aside, a cpuorder membar is not itself subject to
1187   // matching and translation by adlc rules.  However, the rule
1188   // predicates need to detect its presence in order to correctly
1189   // select the desired adlc rules.
1190   //
1191   // Inlined unsafe volatile gets manifest as a slightly different
1192   // node sequence to a normal volatile get because of the
1193   // introduction of some CPUOrder memory barriers to bracket the
1194   // Load. However, but the same basic skeleton of a LoadX feeding a
1195   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1196   // present
1197   //
1198   //   MemBarCPUOrder
1199   //        ||       \\
1200   //   MemBarCPUOrder LoadX[mo_acquire]
1201   //        ||            |
1202   //        ||       {DecodeN} optional
1203   //        ||       /
1204   //     MemBarAcquire
1205   //
1206   // In this case the acquire membar does not directly depend on the
1207   // load. However, we can be sure that the load is generated from an
1208   // inlined unsafe volatile get if we see it dependent on this unique
1209   // sequence of membar nodes. Similarly, given an acquire membar we
1210   // can know that it was added because of an inlined unsafe volatile
1211   // get if it is fed and feeds a cpuorder membar and if its feed
1212   // membar also feeds an acquiring load.
1213   //
1214   // Finally an inlined (Unsafe) CAS operation is translated to the
1215   // following ideal graph
1216   //
1217   //   MemBarRelease
1218   //   MemBarCPUOrder
1219   //   CompareAndSwapX {CardMark}-optional
1220   //   MemBarCPUOrder
1221   //   MemBarAcquire
1222   //
1223   // So, where we can identify these volatile read and write
1224   // signatures we can choose to plant either of the above two code
1225   // sequences. For a volatile read we can simply plant a normal
1226   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1227   // also choose to inhibit translation of the MemBarAcquire and
1228   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1229   //
1230   // When we recognise a volatile store signature we can choose to
1231   // plant at a dmb ish as a translation for the MemBarRelease, a
1232   // normal str<x> and then a dmb ish for the MemBarVolatile.
1233   // Alternatively, we can inhibit translation of the MemBarRelease
1234   // and MemBarVolatile and instead plant a simple stlr<x>
1235   // instruction.
1236   //
1237   // when we recognise a CAS signature we can choose to plant a dmb
1238   // ish as a translation for the MemBarRelease, the conventional
1239   // macro-instruction sequence for the CompareAndSwap node (which
1240   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1241   // Alternatively, we can elide generation of the dmb instructions
1242   // and plant the alternative CompareAndSwap macro-instruction
1243   // sequence (which uses ldaxr<x>).
1244   //
1245   // Of course, the above only applies when we see these signature
1246   // configurations. We still want to plant dmb instructions in any
1247   // other cases where we may see a MemBarAcquire, MemBarRelease or
1248   // MemBarVolatile. For example, at the end of a constructor which
1249   // writes final/volatile fields we will see a MemBarRelease
1250   // instruction and this needs a 'dmb ish' lest we risk the
1251   // constructed object being visible without making the
1252   // final/volatile field writes visible.
1253   //
1254   // n.b. the translation rules below which rely on detection of the
1255   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1256   // If we see anything other than the signature configurations we
1257   // always just translate the loads and stores to ldr<x> and str<x>
1258   // and translate acquire, release and volatile membars to the
1259   // relevant dmb instructions.
1260   //
1261 
1262   // is_CAS(int opcode)
1263   //
1264   // return true if opcode is one of the possible CompareAndSwapX
1265   // values otherwise false.
1266 
1267   bool is_CAS(int opcode)
1268   {
1269     switch(opcode) {
1270       // We handle these
1271     case Op_CompareAndSwapI:
1272     case Op_CompareAndSwapL:
1273     case Op_CompareAndSwapP:
1274     case Op_CompareAndSwapN:
1275  // case Op_CompareAndSwapB:
1276  // case Op_CompareAndSwapS:
1277       return true;
1278       // These are TBD
1279     case Op_WeakCompareAndSwapB:
1280     case Op_WeakCompareAndSwapS:
1281     case Op_WeakCompareAndSwapI:
1282     case Op_WeakCompareAndSwapL:
1283     case Op_WeakCompareAndSwapP:
1284     case Op_WeakCompareAndSwapN:
1285     case Op_CompareAndExchangeB:
1286     case Op_CompareAndExchangeS:
1287     case Op_CompareAndExchangeI:
1288     case Op_CompareAndExchangeL:
1289     case Op_CompareAndExchangeP:
1290     case Op_CompareAndExchangeN:
1291       return false;
1292     default:
1293       return false;
1294     }
1295   }
1296 
1297   // helper to determine the maximum number of Phi nodes we may need to
1298   // traverse when searching from a card mark membar for the merge mem
1299   // feeding a trailing membar or vice versa
1300 
1301 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1302 
1303 bool unnecessary_acquire(const Node *barrier)
1304 {
1305   assert(barrier->is_MemBar(), "expecting a membar");
1306 
1307   if (UseBarriersForVolatile) {
1308     // we need to plant a dmb
1309     return false;
1310   }
1311 
1312   MemBarNode* mb = barrier->as_MemBar();
1313 
1314   if (mb->trailing_load()) {
1315     return true;
1316   }
1317 
1318   if (mb->trailing_load_store()) {
1319     Node* load_store = mb->in(MemBarNode::Precedent);
1320     assert(load_store->is_LoadStore(), "unexpected graph shape");
1321     return is_CAS(load_store->Opcode());
1322   }
1323 
1324   return false;
1325 }
1326 
1327 bool needs_acquiring_load(const Node *n)
1328 {
1329   assert(n->is_Load(), "expecting a load");
1330   if (UseBarriersForVolatile) {
1331     // we use a normal load and a dmb
1332     return false;
1333   }
1334 
1335   LoadNode *ld = n->as_Load();
1336 
1337   return ld->is_acquire();
1338 }
1339 
1340 bool unnecessary_release(const Node *n)
1341 {
1342   assert((n->is_MemBar() &&
1343           n->Opcode() == Op_MemBarRelease),
1344          "expecting a release membar");
1345 
1346   if (UseBarriersForVolatile) {
1347     // we need to plant a dmb
1348     return false;
1349   }
1350 
1351   MemBarNode *barrier = n->as_MemBar();
1352   if (!barrier->leading()) {
1353     return false;
1354   } else {
1355     Node* trailing = barrier->trailing_membar();
1356     MemBarNode* trailing_mb = trailing->as_MemBar();
1357     assert(trailing_mb->trailing(), "Not a trailing membar?");
1358     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1359 
1360     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1361     if (mem->is_Store()) {
1362       assert(mem->as_Store()->is_release(), "");
1363       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1364       return true;
1365     } else {
1366       assert(mem->is_LoadStore(), "");
1367       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1368       return is_CAS(mem->Opcode());
1369     }
1370   }
1371   return false;
1372 }
1373 
1374 bool unnecessary_volatile(const Node *n)
1375 {
1376   // assert n->is_MemBar();
1377   if (UseBarriersForVolatile) {
1378     // we need to plant a dmb
1379     return false;
1380   }
1381 
1382   MemBarNode *mbvol = n->as_MemBar();
1383 
1384   bool release = mbvol->trailing_store();
1385   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1386 #ifdef ASSERT
1387   if (release) {
1388     Node* leading = mbvol->leading_membar();
1389     assert(leading->Opcode() == Op_MemBarRelease, "");
1390     assert(leading->as_MemBar()->leading_store(), "");
1391     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1392   }
1393 #endif
1394 
1395   return release;
1396 }
1397 
1398 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1399 
1400 bool needs_releasing_store(const Node *n)
1401 {
1402   // assert n->is_Store();
1403   if (UseBarriersForVolatile) {
1404     // we use a normal store and dmb combination
1405     return false;
1406   }
1407 
1408   StoreNode *st = n->as_Store();
1409 
1410   return st->trailing_membar() != NULL;
1411 }
1412 
1413 // predicate controlling translation of CAS
1414 //
1415 // returns true if CAS needs to use an acquiring load otherwise false
1416 
1417 bool needs_acquiring_load_exclusive(const Node *n)
1418 {
1419   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
1420   if (UseBarriersForVolatile) {
1421     return false;
1422   }
1423 
1424   LoadStoreNode* ldst = n->as_LoadStore();
1425   assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1426 
1427   // so we can just return true here
1428   return true;
1429 }
1430 
1431 // predicate controlling translation of StoreCM
1432 //
1433 // returns true if a StoreStore must precede the card write otherwise
1434 // false
1435 
1436 bool unnecessary_storestore(const Node *storecm)
1437 {
1438   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1439 
1440   // we need to generate a dmb ishst between an object put and the
1441   // associated card mark when we are using CMS without conditional
1442   // card marking
1443 
1444   if (UseConcMarkSweepGC && !UseCondCardMark) {
1445     return false;
1446   }
1447 
1448   // a storestore is unnecesary in all other cases
1449 
1450   return true;
1451 }
1452 
1453 
1454 #define __ _masm.
1455 
1456 // advance declarations for helper functions to convert register
1457 // indices to register objects
1458 
1459 // the ad file has to provide implementations of certain methods
1460 // expected by the generic code
1461 //
1462 // REQUIRED FUNCTIONALITY
1463 
1464 //=============================================================================
1465 
1466 // !!!!! Special hack to get all types of calls to specify the byte offset
1467 //       from the start of the call to the point where the return address
1468 //       will point.
1469 
1470 int MachCallStaticJavaNode::ret_addr_offset()
1471 {
1472   // call should be a simple bl
1473   int off = 4;
1474   return off;
1475 }
1476 
1477 int MachCallDynamicJavaNode::ret_addr_offset()
1478 {
1479   return 16; // movz, movk, movk, bl
1480 }
1481 
1482 int MachCallRuntimeNode::ret_addr_offset() {
1483   // for generated stubs the call will be
1484   //   far_call(addr)
1485   // for real runtime callouts it will be six instructions
1486   // see aarch64_enc_java_to_runtime
1487   //   adr(rscratch2, retaddr)
1488   //   lea(rscratch1, RuntimeAddress(addr)
1489   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1490   //   blrt rscratch1
1491   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1492   if (cb) {
1493     return MacroAssembler::far_branch_size();
1494   } else {
1495     return 6 * NativeInstruction::instruction_size;
1496   }
1497 }
1498 
1499 // Indicate if the safepoint node needs the polling page as an input
1500 
1501 // the shared code plants the oop data at the start of the generated
1502 // code for the safepoint node and that needs ot be at the load
1503 // instruction itself. so we cannot plant a mov of the safepoint poll
1504 // address followed by a load. setting this to true means the mov is
1505 // scheduled as a prior instruction. that's better for scheduling
1506 // anyway.
1507 
1508 bool SafePointNode::needs_polling_address_input()
1509 {
1510   return true;
1511 }
1512 
1513 //=============================================================================
1514 
1515 #ifndef PRODUCT
1516 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1517   st->print("BREAKPOINT");
1518 }
1519 #endif
1520 
1521 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1522   MacroAssembler _masm(&cbuf);
1523   __ brk(0);
1524 }
1525 
1526 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1527   return MachNode::size(ra_);
1528 }
1529 
1530 //=============================================================================
1531 
1532 #ifndef PRODUCT
1533   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1534     st->print("nop \t# %d bytes pad for loops and calls", _count);
1535   }
1536 #endif
1537 
1538   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1539     MacroAssembler _masm(&cbuf);
1540     for (int i = 0; i < _count; i++) {
1541       __ nop();
1542     }
1543   }
1544 
1545   uint MachNopNode::size(PhaseRegAlloc*) const {
1546     return _count * NativeInstruction::instruction_size;
1547   }
1548 
1549 //=============================================================================
1550 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1551 
1552 int Compile::ConstantTable::calculate_table_base_offset() const {
1553   return 0;  // absolute addressing, no offset
1554 }
1555 
1556 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1557 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1558   ShouldNotReachHere();
1559 }
1560 
1561 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1562   // Empty encoding
1563 }
1564 
1565 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1566   return 0;
1567 }
1568 
1569 #ifndef PRODUCT
1570 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1571   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1572 }
1573 #endif
1574 
1575 #ifndef PRODUCT
1576 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1577   Compile* C = ra_->C;
1578 
1579   int framesize = C->frame_slots() << LogBytesPerInt;
1580 
1581   if (C->need_stack_bang(framesize))
1582     st->print("# stack bang size=%d\n\t", framesize);
1583 
1584   if (framesize < ((1 << 9) + 2 * wordSize)) {
1585     st->print("sub  sp, sp, #%d\n\t", framesize);
1586     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1587     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1588   } else {
1589     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1590     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1591     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1592     st->print("sub  sp, sp, rscratch1");
1593   }
1594 }
1595 #endif
1596 
1597 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1598   Compile* C = ra_->C;
1599   MacroAssembler _masm(&cbuf);
1600 
1601   // n.b. frame size includes space for return pc and rfp
1602   const long framesize = C->frame_size_in_bytes();
1603   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1604 
1605   // insert a nop at the start of the prolog so we can patch in a
1606   // branch if we need to invalidate the method later
1607   __ nop();
1608 
1609   int bangsize = C->bang_size_in_bytes();
1610   if (C->need_stack_bang(bangsize) && UseStackBanging)
1611     __ generate_stack_overflow_check(bangsize);
1612 
1613   __ build_frame(framesize);
1614 
1615   if (NotifySimulator) {
1616     __ notify(Assembler::method_entry);
1617   }
1618 
1619   if (VerifyStackAtCalls) {
1620     Unimplemented();
1621   }
1622 
1623   C->set_frame_complete(cbuf.insts_size());
1624 
1625   if (C->has_mach_constant_base_node()) {
1626     // NOTE: We set the table base offset here because users might be
1627     // emitted before MachConstantBaseNode.
1628     Compile::ConstantTable& constant_table = C->constant_table();
1629     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1630   }
1631 }
1632 
1633 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1634 {
1635   return MachNode::size(ra_); // too many variables; just compute it
1636                               // the hard way
1637 }
1638 
1639 int MachPrologNode::reloc() const
1640 {
1641   return 0;
1642 }
1643 
1644 //=============================================================================
1645 
1646 #ifndef PRODUCT
1647 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1648   Compile* C = ra_->C;
1649   int framesize = C->frame_slots() << LogBytesPerInt;
1650 
1651   st->print("# pop frame %d\n\t",framesize);
1652 
1653   if (framesize == 0) {
1654     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1655   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1656     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1657     st->print("add  sp, sp, #%d\n\t", framesize);
1658   } else {
1659     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1660     st->print("add  sp, sp, rscratch1\n\t");
1661     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1662   }
1663 
1664   if (do_polling() && C->is_method_compilation()) {
1665     st->print("# touch polling page\n\t");
1666     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1667     st->print("ldr zr, [rscratch1]");
1668   }
1669 }
1670 #endif
1671 
1672 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1673   Compile* C = ra_->C;
1674   MacroAssembler _masm(&cbuf);
1675   int framesize = C->frame_slots() << LogBytesPerInt;
1676 
1677   __ remove_frame(framesize);
1678 
1679   if (NotifySimulator) {
1680     __ notify(Assembler::method_reentry);
1681   }
1682 
1683   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1684     __ reserved_stack_check();
1685   }
1686 
1687   if (do_polling() && C->is_method_compilation()) {
1688     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1689   }
1690 }
1691 
1692 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1693   // Variable size. Determine dynamically.
1694   return MachNode::size(ra_);
1695 }
1696 
1697 int MachEpilogNode::reloc() const {
1698   // Return number of relocatable values contained in this instruction.
1699   return 1; // 1 for polling page.
1700 }
1701 
1702 const Pipeline * MachEpilogNode::pipeline() const {
1703   return MachNode::pipeline_class();
1704 }
1705 
1706 // This method seems to be obsolete. It is declared in machnode.hpp
1707 // and defined in all *.ad files, but it is never called. Should we
1708 // get rid of it?
1709 int MachEpilogNode::safepoint_offset() const {
1710   assert(do_polling(), "no return for this epilog node");
1711   return 4;
1712 }
1713 
1714 //=============================================================================
1715 
1716 // Figure out which register class each belongs in: rc_int, rc_float or
1717 // rc_stack.
1718 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1719 
1720 static enum RC rc_class(OptoReg::Name reg) {
1721 
1722   if (reg == OptoReg::Bad) {
1723     return rc_bad;
1724   }
1725 
1726   // we have 30 int registers * 2 halves
1727   // (rscratch1 and rscratch2 are omitted)
1728 
1729   if (reg < 60) {
1730     return rc_int;
1731   }
1732 
1733   // we have 32 float register * 2 halves
1734   if (reg < 60 + 128) {
1735     return rc_float;
1736   }
1737 
1738   // Between float regs & stack is the flags regs.
1739   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1740 
1741   return rc_stack;
1742 }
1743 
1744 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1745   Compile* C = ra_->C;
1746 
1747   // Get registers to move.
1748   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1749   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1750   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1751   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1752 
1753   enum RC src_hi_rc = rc_class(src_hi);
1754   enum RC src_lo_rc = rc_class(src_lo);
1755   enum RC dst_hi_rc = rc_class(dst_hi);
1756   enum RC dst_lo_rc = rc_class(dst_lo);
1757 
1758   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1759 
1760   if (src_hi != OptoReg::Bad) {
1761     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1762            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1763            "expected aligned-adjacent pairs");
1764   }
1765 
1766   if (src_lo == dst_lo && src_hi == dst_hi) {
1767     return 0;            // Self copy, no move.
1768   }
1769 
1770   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1771               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1772   int src_offset = ra_->reg2offset(src_lo);
1773   int dst_offset = ra_->reg2offset(dst_lo);
1774 
1775   if (bottom_type()->isa_vect() != NULL) {
1776     uint ireg = ideal_reg();
1777     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1778     if (cbuf) {
1779       MacroAssembler _masm(cbuf);
1780       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1781       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1782         // stack->stack
1783         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1784         if (ireg == Op_VecD) {
1785           __ unspill(rscratch1, true, src_offset);
1786           __ spill(rscratch1, true, dst_offset);
1787         } else {
1788           __ spill_copy128(src_offset, dst_offset);
1789         }
1790       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1791         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1792                ireg == Op_VecD ? __ T8B : __ T16B,
1793                as_FloatRegister(Matcher::_regEncode[src_lo]));
1794       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1795         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1796                        ireg == Op_VecD ? __ D : __ Q,
1797                        ra_->reg2offset(dst_lo));
1798       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1799         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1800                        ireg == Op_VecD ? __ D : __ Q,
1801                        ra_->reg2offset(src_lo));
1802       } else {
1803         ShouldNotReachHere();
1804       }
1805     }
1806   } else if (cbuf) {
1807     MacroAssembler _masm(cbuf);
1808     switch (src_lo_rc) {
1809     case rc_int:
1810       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1811         if (is64) {
1812             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1813                    as_Register(Matcher::_regEncode[src_lo]));
1814         } else {
1815             MacroAssembler _masm(cbuf);
1816             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1817                     as_Register(Matcher::_regEncode[src_lo]));
1818         }
1819       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1820         if (is64) {
1821             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1822                      as_Register(Matcher::_regEncode[src_lo]));
1823         } else {
1824             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1825                      as_Register(Matcher::_regEncode[src_lo]));
1826         }
1827       } else {                    // gpr --> stack spill
1828         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1829         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1830       }
1831       break;
1832     case rc_float:
1833       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1834         if (is64) {
1835             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1836                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1837         } else {
1838             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1839                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1840         }
1841       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1842           if (cbuf) {
1843             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1844                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1845         } else {
1846             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1847                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1848         }
1849       } else {                    // fpr --> stack spill
1850         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1851         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1852                  is64 ? __ D : __ S, dst_offset);
1853       }
1854       break;
1855     case rc_stack:
1856       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1857         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1858       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1859         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1860                    is64 ? __ D : __ S, src_offset);
1861       } else {                    // stack --> stack copy
1862         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1863         __ unspill(rscratch1, is64, src_offset);
1864         __ spill(rscratch1, is64, dst_offset);
1865       }
1866       break;
1867     default:
1868       assert(false, "bad rc_class for spill");
1869       ShouldNotReachHere();
1870     }
1871   }
1872 
1873   if (st) {
1874     st->print("spill ");
1875     if (src_lo_rc == rc_stack) {
1876       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1877     } else {
1878       st->print("%s -> ", Matcher::regName[src_lo]);
1879     }
1880     if (dst_lo_rc == rc_stack) {
1881       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1882     } else {
1883       st->print("%s", Matcher::regName[dst_lo]);
1884     }
1885     if (bottom_type()->isa_vect() != NULL) {
1886       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1887     } else {
1888       st->print("\t# spill size = %d", is64 ? 64:32);
1889     }
1890   }
1891 
1892   return 0;
1893 
1894 }
1895 
1896 #ifndef PRODUCT
1897 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1898   if (!ra_)
1899     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1900   else
1901     implementation(NULL, ra_, false, st);
1902 }
1903 #endif
1904 
1905 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1906   implementation(&cbuf, ra_, false, NULL);
1907 }
1908 
1909 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1910   return MachNode::size(ra_);
1911 }
1912 
1913 //=============================================================================
1914 
1915 #ifndef PRODUCT
1916 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1917   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1918   int reg = ra_->get_reg_first(this);
1919   st->print("add %s, rsp, #%d]\t# box lock",
1920             Matcher::regName[reg], offset);
1921 }
1922 #endif
1923 
1924 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1925   MacroAssembler _masm(&cbuf);
1926 
1927   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1928   int reg    = ra_->get_encode(this);
1929 
1930   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1931     __ add(as_Register(reg), sp, offset);
1932   } else {
1933     ShouldNotReachHere();
1934   }
1935 }
1936 
1937 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1938   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1939   return 4;
1940 }
1941 
1942 //=============================================================================
1943 
1944 #ifndef PRODUCT
1945 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1946 {
1947   st->print_cr("# MachUEPNode");
1948   if (UseCompressedClassPointers) {
1949     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1950     if (Universe::narrow_klass_shift() != 0) {
1951       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1952     }
1953   } else {
1954    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1955   }
1956   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1957   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1958 }
1959 #endif
1960 
1961 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1962 {
1963   // This is the unverified entry point.
1964   MacroAssembler _masm(&cbuf);
1965 
1966   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1967   Label skip;
1968   // TODO
1969   // can we avoid this skip and still use a reloc?
1970   __ br(Assembler::EQ, skip);
1971   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1972   __ bind(skip);
1973 }
1974 
1975 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1976 {
1977   return MachNode::size(ra_);
1978 }
1979 
1980 // REQUIRED EMIT CODE
1981 
1982 //=============================================================================
1983 
1984 // Emit exception handler code.
1985 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
1986 {
1987   // mov rscratch1 #exception_blob_entry_point
1988   // br rscratch1
1989   // Note that the code buffer's insts_mark is always relative to insts.
1990   // That's why we must use the macroassembler to generate a handler.
1991   MacroAssembler _masm(&cbuf);
1992   address base = __ start_a_stub(size_exception_handler());
1993   if (base == NULL) {
1994     ciEnv::current()->record_failure("CodeCache is full");
1995     return 0;  // CodeBuffer::expand failed
1996   }
1997   int offset = __ offset();
1998   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
1999   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2000   __ end_a_stub();
2001   return offset;
2002 }
2003 
2004 // Emit deopt handler code.
2005 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2006 {
2007   // Note that the code buffer's insts_mark is always relative to insts.
2008   // That's why we must use the macroassembler to generate a handler.
2009   MacroAssembler _masm(&cbuf);
2010   address base = __ start_a_stub(size_deopt_handler());
2011   if (base == NULL) {
2012     ciEnv::current()->record_failure("CodeCache is full");
2013     return 0;  // CodeBuffer::expand failed
2014   }
2015   int offset = __ offset();
2016 
2017   __ adr(lr, __ pc());
2018   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2019 
2020   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2021   __ end_a_stub();
2022   return offset;
2023 }
2024 
2025 // REQUIRED MATCHER CODE
2026 
2027 //=============================================================================
2028 
2029 const bool Matcher::match_rule_supported(int opcode) {
2030 
2031   switch (opcode) {
2032   default:
2033     break;
2034   }
2035 
2036   if (!has_match_rule(opcode)) {
2037     return false;
2038   }
2039 
2040   return true;  // Per default match rules are supported.
2041 }
2042 
2043 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
2044 
2045   // TODO
2046   // identify extra cases that we might want to provide match rules for
2047   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2048   bool ret_value = match_rule_supported(opcode);
2049   // Add rules here.
2050 
2051   return ret_value;  // Per default match rules are supported.
2052 }
2053 
2054 const bool Matcher::has_predicated_vectors(void) {
2055   return false;
2056 }
2057 
2058 const int Matcher::float_pressure(int default_pressure_threshold) {
2059   return default_pressure_threshold;
2060 }
2061 
2062 int Matcher::regnum_to_fpu_offset(int regnum)
2063 {
2064   Unimplemented();
2065   return 0;
2066 }
2067 
2068 // Is this branch offset short enough that a short branch can be used?
2069 //
2070 // NOTE: If the platform does not provide any short branch variants, then
2071 //       this method should return false for offset 0.
2072 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2073   // The passed offset is relative to address of the branch.
2074 
2075   return (-32768 <= offset && offset < 32768);
2076 }
2077 
2078 const bool Matcher::isSimpleConstant64(jlong value) {
2079   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2080   // Probably always true, even if a temp register is required.
2081   return true;
2082 }
2083 
2084 // true just means we have fast l2f conversion
2085 const bool Matcher::convL2FSupported(void) {
2086   return true;
2087 }
2088 
2089 // Vector width in bytes.
2090 const int Matcher::vector_width_in_bytes(BasicType bt) {
2091   int size = MIN2(16,(int)MaxVectorSize);
2092   // Minimum 2 values in vector
2093   if (size < 2*type2aelembytes(bt)) size = 0;
2094   // But never < 4
2095   if (size < 4) size = 0;
2096   return size;
2097 }
2098 
2099 // Limits on vector size (number of elements) loaded into vector.
2100 const int Matcher::max_vector_size(const BasicType bt) {
2101   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2102 }
2103 const int Matcher::min_vector_size(const BasicType bt) {
2104 //  For the moment limit the vector size to 8 bytes
2105     int size = 8 / type2aelembytes(bt);
2106     if (size < 2) size = 2;
2107     return size;
2108 }
2109 
2110 // Vector ideal reg.
2111 const uint Matcher::vector_ideal_reg(int len) {
2112   switch(len) {
2113     case  8: return Op_VecD;
2114     case 16: return Op_VecX;
2115   }
2116   ShouldNotReachHere();
2117   return 0;
2118 }
2119 
2120 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2121   return Op_VecX;
2122 }
2123 
2124 // AES support not yet implemented
2125 const bool Matcher::pass_original_key_for_aes() {
2126   return false;
2127 }
2128 
2129 // x86 supports misaligned vectors store/load.
2130 const bool Matcher::misaligned_vectors_ok() {
2131   return !AlignVector; // can be changed by flag
2132 }
2133 
2134 // false => size gets scaled to BytesPerLong, ok.
2135 const bool Matcher::init_array_count_is_in_bytes = false;
2136 
2137 // Use conditional move (CMOVL)
2138 const int Matcher::long_cmove_cost() {
2139   // long cmoves are no more expensive than int cmoves
2140   return 0;
2141 }
2142 
2143 const int Matcher::float_cmove_cost() {
2144   // float cmoves are no more expensive than int cmoves
2145   return 0;
2146 }
2147 
2148 // Does the CPU require late expand (see block.cpp for description of late expand)?
2149 const bool Matcher::require_postalloc_expand = false;
2150 
2151 // Do we need to mask the count passed to shift instructions or does
2152 // the cpu only look at the lower 5/6 bits anyway?
2153 const bool Matcher::need_masked_shift_count = false;
2154 
2155 // This affects two different things:
2156 //  - how Decode nodes are matched
2157 //  - how ImplicitNullCheck opportunities are recognized
2158 // If true, the matcher will try to remove all Decodes and match them
2159 // (as operands) into nodes. NullChecks are not prepared to deal with
2160 // Decodes by final_graph_reshaping().
2161 // If false, final_graph_reshaping() forces the decode behind the Cmp
2162 // for a NullCheck. The matcher matches the Decode node into a register.
2163 // Implicit_null_check optimization moves the Decode along with the
2164 // memory operation back up before the NullCheck.
2165 bool Matcher::narrow_oop_use_complex_address() {
2166   return Universe::narrow_oop_shift() == 0;
2167 }
2168 
2169 bool Matcher::narrow_klass_use_complex_address() {
2170 // TODO
2171 // decide whether we need to set this to true
2172   return false;
2173 }
2174 
2175 bool Matcher::const_oop_prefer_decode() {
2176   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2177   return Universe::narrow_oop_base() == NULL;
2178 }
2179 
2180 bool Matcher::const_klass_prefer_decode() {
2181   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2182   return Universe::narrow_klass_base() == NULL;
2183 }
2184 
2185 // Is it better to copy float constants, or load them directly from
2186 // memory?  Intel can load a float constant from a direct address,
2187 // requiring no extra registers.  Most RISCs will have to materialize
2188 // an address into a register first, so they would do better to copy
2189 // the constant from stack.
2190 const bool Matcher::rematerialize_float_constants = false;
2191 
2192 // If CPU can load and store mis-aligned doubles directly then no
2193 // fixup is needed.  Else we split the double into 2 integer pieces
2194 // and move it piece-by-piece.  Only happens when passing doubles into
2195 // C code as the Java calling convention forces doubles to be aligned.
2196 const bool Matcher::misaligned_doubles_ok = true;
2197 
2198 // No-op on amd64
2199 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2200   Unimplemented();
2201 }
2202 
2203 // Advertise here if the CPU requires explicit rounding operations to
2204 // implement the UseStrictFP mode.
2205 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2206 
2207 // Are floats converted to double when stored to stack during
2208 // deoptimization?
2209 bool Matcher::float_in_double() { return false; }
2210 
2211 // Do ints take an entire long register or just half?
2212 // The relevant question is how the int is callee-saved:
2213 // the whole long is written but de-opt'ing will have to extract
2214 // the relevant 32 bits.
2215 const bool Matcher::int_in_long = true;
2216 
2217 // Return whether or not this register is ever used as an argument.
2218 // This function is used on startup to build the trampoline stubs in
2219 // generateOptoStub.  Registers not mentioned will be killed by the VM
2220 // call in the trampoline, and arguments in those registers not be
2221 // available to the callee.
2222 bool Matcher::can_be_java_arg(int reg)
2223 {
2224   return
2225     reg ==  R0_num || reg == R0_H_num ||
2226     reg ==  R1_num || reg == R1_H_num ||
2227     reg ==  R2_num || reg == R2_H_num ||
2228     reg ==  R3_num || reg == R3_H_num ||
2229     reg ==  R4_num || reg == R4_H_num ||
2230     reg ==  R5_num || reg == R5_H_num ||
2231     reg ==  R6_num || reg == R6_H_num ||
2232     reg ==  R7_num || reg == R7_H_num ||
2233     reg ==  V0_num || reg == V0_H_num ||
2234     reg ==  V1_num || reg == V1_H_num ||
2235     reg ==  V2_num || reg == V2_H_num ||
2236     reg ==  V3_num || reg == V3_H_num ||
2237     reg ==  V4_num || reg == V4_H_num ||
2238     reg ==  V5_num || reg == V5_H_num ||
2239     reg ==  V6_num || reg == V6_H_num ||
2240     reg ==  V7_num || reg == V7_H_num;
2241 }
2242 
2243 bool Matcher::is_spillable_arg(int reg)
2244 {
2245   return can_be_java_arg(reg);
2246 }
2247 
2248 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2249   return false;
2250 }
2251 
2252 RegMask Matcher::divI_proj_mask() {
2253   ShouldNotReachHere();
2254   return RegMask();
2255 }
2256 
2257 // Register for MODI projection of divmodI.
2258 RegMask Matcher::modI_proj_mask() {
2259   ShouldNotReachHere();
2260   return RegMask();
2261 }
2262 
2263 // Register for DIVL projection of divmodL.
2264 RegMask Matcher::divL_proj_mask() {
2265   ShouldNotReachHere();
2266   return RegMask();
2267 }
2268 
2269 // Register for MODL projection of divmodL.
2270 RegMask Matcher::modL_proj_mask() {
2271   ShouldNotReachHere();
2272   return RegMask();
2273 }
2274 
2275 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2276   return FP_REG_mask();
2277 }
2278 
2279 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2280   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2281     Node* u = addp->fast_out(i);
2282     if (u->is_Mem()) {
2283       int opsize = u->as_Mem()->memory_size();
2284       assert(opsize > 0, "unexpected memory operand size");
2285       if (u->as_Mem()->memory_size() != (1<<shift)) {
2286         return false;
2287       }
2288     }
2289   }
2290   return true;
2291 }
2292 
2293 const bool Matcher::convi2l_type_required = false;
2294 
2295 // Should the Matcher clone shifts on addressing modes, expecting them
2296 // to be subsumed into complex addressing expressions or compute them
2297 // into registers?
2298 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2299   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2300     return true;
2301   }
2302 
2303   Node *off = m->in(AddPNode::Offset);
2304   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2305       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2306       // Are there other uses besides address expressions?
2307       !is_visited(off)) {
2308     address_visited.set(off->_idx); // Flag as address_visited
2309     mstack.push(off->in(2), Visit);
2310     Node *conv = off->in(1);
2311     if (conv->Opcode() == Op_ConvI2L &&
2312         // Are there other uses besides address expressions?
2313         !is_visited(conv)) {
2314       address_visited.set(conv->_idx); // Flag as address_visited
2315       mstack.push(conv->in(1), Pre_Visit);
2316     } else {
2317       mstack.push(conv, Pre_Visit);
2318     }
2319     address_visited.test_set(m->_idx); // Flag as address_visited
2320     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2321     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2322     return true;
2323   } else if (off->Opcode() == Op_ConvI2L &&
2324              // Are there other uses besides address expressions?
2325              !is_visited(off)) {
2326     address_visited.test_set(m->_idx); // Flag as address_visited
2327     address_visited.set(off->_idx); // Flag as address_visited
2328     mstack.push(off->in(1), Pre_Visit);
2329     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2330     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2331     return true;
2332   }
2333   return false;
2334 }
2335 
2336 void Compile::reshape_address(AddPNode* addp) {
2337 }
2338 
2339 // helper for encoding java_to_runtime calls on sim
2340 //
2341 // this is needed to compute the extra arguments required when
2342 // planting a call to the simulator blrt instruction. the TypeFunc
2343 // can be queried to identify the counts for integral, and floating
2344 // arguments and the return type
2345 
2346 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2347 {
2348   int gps = 0;
2349   int fps = 0;
2350   const TypeTuple *domain = tf->domain();
2351   int max = domain->cnt();
2352   for (int i = TypeFunc::Parms; i < max; i++) {
2353     const Type *t = domain->field_at(i);
2354     switch(t->basic_type()) {
2355     case T_FLOAT:
2356     case T_DOUBLE:
2357       fps++;
2358     default:
2359       gps++;
2360     }
2361   }
2362   gpcnt = gps;
2363   fpcnt = fps;
2364   BasicType rt = tf->return_type();
2365   switch (rt) {
2366   case T_VOID:
2367     rtype = MacroAssembler::ret_type_void;
2368     break;
2369   default:
2370     rtype = MacroAssembler::ret_type_integral;
2371     break;
2372   case T_FLOAT:
2373     rtype = MacroAssembler::ret_type_float;
2374     break;
2375   case T_DOUBLE:
2376     rtype = MacroAssembler::ret_type_double;
2377     break;
2378   }
2379 }
2380 
2381 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2382   MacroAssembler _masm(&cbuf);                                          \
2383   {                                                                     \
2384     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2385     guarantee(DISP == 0, "mode not permitted for volatile");            \
2386     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2387     __ INSN(REG, as_Register(BASE));                                    \
2388   }
2389 
2390 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2391 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2392 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2393                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2394 
2395   // Used for all non-volatile memory accesses.  The use of
2396   // $mem->opcode() to discover whether this pattern uses sign-extended
2397   // offsets is something of a kludge.
2398   static void loadStore(MacroAssembler masm, mem_insn insn,
2399                          Register reg, int opcode,
2400                          Register base, int index, int size, int disp)
2401   {
2402     Address::extend scale;
2403 
2404     // Hooboy, this is fugly.  We need a way to communicate to the
2405     // encoder that the index needs to be sign extended, so we have to
2406     // enumerate all the cases.
2407     switch (opcode) {
2408     case INDINDEXSCALEDI2L:
2409     case INDINDEXSCALEDI2LN:
2410     case INDINDEXI2L:
2411     case INDINDEXI2LN:
2412       scale = Address::sxtw(size);
2413       break;
2414     default:
2415       scale = Address::lsl(size);
2416     }
2417 
2418     if (index == -1) {
2419       (masm.*insn)(reg, Address(base, disp));
2420     } else {
2421       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2422       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2423     }
2424   }
2425 
2426   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2427                          FloatRegister reg, int opcode,
2428                          Register base, int index, int size, int disp)
2429   {
2430     Address::extend scale;
2431 
2432     switch (opcode) {
2433     case INDINDEXSCALEDI2L:
2434     case INDINDEXSCALEDI2LN:
2435       scale = Address::sxtw(size);
2436       break;
2437     default:
2438       scale = Address::lsl(size);
2439     }
2440 
2441      if (index == -1) {
2442       (masm.*insn)(reg, Address(base, disp));
2443     } else {
2444       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2445       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2446     }
2447   }
2448 
2449   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2450                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2451                          int opcode, Register base, int index, int size, int disp)
2452   {
2453     if (index == -1) {
2454       (masm.*insn)(reg, T, Address(base, disp));
2455     } else {
2456       assert(disp == 0, "unsupported address mode");
2457       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2458     }
2459   }
2460 
2461 %}
2462 
2463 
2464 
2465 //----------ENCODING BLOCK-----------------------------------------------------
2466 // This block specifies the encoding classes used by the compiler to
2467 // output byte streams.  Encoding classes are parameterized macros
2468 // used by Machine Instruction Nodes in order to generate the bit
2469 // encoding of the instruction.  Operands specify their base encoding
2470 // interface with the interface keyword.  There are currently
2471 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2472 // COND_INTER.  REG_INTER causes an operand to generate a function
2473 // which returns its register number when queried.  CONST_INTER causes
2474 // an operand to generate a function which returns the value of the
2475 // constant when queried.  MEMORY_INTER causes an operand to generate
2476 // four functions which return the Base Register, the Index Register,
2477 // the Scale Value, and the Offset Value of the operand when queried.
2478 // COND_INTER causes an operand to generate six functions which return
2479 // the encoding code (ie - encoding bits for the instruction)
2480 // associated with each basic boolean condition for a conditional
2481 // instruction.
2482 //
2483 // Instructions specify two basic values for encoding.  Again, a
2484 // function is available to check if the constant displacement is an
2485 // oop. They use the ins_encode keyword to specify their encoding
2486 // classes (which must be a sequence of enc_class names, and their
2487 // parameters, specified in the encoding block), and they use the
2488 // opcode keyword to specify, in order, their primary, secondary, and
2489 // tertiary opcode.  Only the opcode sections which a particular
2490 // instruction needs for encoding need to be specified.
2491 encode %{
2492   // Build emit functions for each basic byte or larger field in the
2493   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2494   // from C++ code in the enc_class source block.  Emit functions will
2495   // live in the main source block for now.  In future, we can
2496   // generalize this by adding a syntax that specifies the sizes of
2497   // fields in an order, so that the adlc can build the emit functions
2498   // automagically
2499 
2500   // catch all for unimplemented encodings
2501   enc_class enc_unimplemented %{
2502     MacroAssembler _masm(&cbuf);
2503     __ unimplemented("C2 catch all");
2504   %}
2505 
2506   // BEGIN Non-volatile memory access
2507 
2508   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2509     Register dst_reg = as_Register($dst$$reg);
2510     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2511                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2512   %}
2513 
2514   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2515     Register dst_reg = as_Register($dst$$reg);
2516     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2517                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2518   %}
2519 
2520   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2521     Register dst_reg = as_Register($dst$$reg);
2522     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2523                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2524   %}
2525 
2526   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2527     Register dst_reg = as_Register($dst$$reg);
2528     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2529                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2530   %}
2531 
2532   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2533     Register dst_reg = as_Register($dst$$reg);
2534     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2535                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2536   %}
2537 
2538   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2539     Register dst_reg = as_Register($dst$$reg);
2540     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2541                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2542   %}
2543 
2544   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2545     Register dst_reg = as_Register($dst$$reg);
2546     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2547                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2548   %}
2549 
2550   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2551     Register dst_reg = as_Register($dst$$reg);
2552     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2553                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2554   %}
2555 
2556   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2557     Register dst_reg = as_Register($dst$$reg);
2558     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2559                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2560   %}
2561 
2562   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2563     Register dst_reg = as_Register($dst$$reg);
2564     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2565                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2566   %}
2567 
2568   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2569     Register dst_reg = as_Register($dst$$reg);
2570     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2571                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2572   %}
2573 
2574   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2575     Register dst_reg = as_Register($dst$$reg);
2576     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2577                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2578   %}
2579 
2580   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2581     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2582     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2583                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2584   %}
2585 
2586   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2587     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2588     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2589                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2590   %}
2591 
2592   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2593     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2594     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2595        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2596   %}
2597 
2598   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2599     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2600     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2601        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2602   %}
2603 
2604   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2605     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2606     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2607        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2608   %}
2609 
2610   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2611     Register src_reg = as_Register($src$$reg);
2612     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2613                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2614   %}
2615 
2616   enc_class aarch64_enc_strb0(memory mem) %{
2617     MacroAssembler _masm(&cbuf);
2618     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2619                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2620   %}
2621 
2622   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2623     MacroAssembler _masm(&cbuf);
2624     __ membar(Assembler::StoreStore);
2625     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2626                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2627   %}
2628 
2629   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2630     Register src_reg = as_Register($src$$reg);
2631     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2632                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2633   %}
2634 
2635   enc_class aarch64_enc_strh0(memory mem) %{
2636     MacroAssembler _masm(&cbuf);
2637     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2638                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2639   %}
2640 
2641   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2642     Register src_reg = as_Register($src$$reg);
2643     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2644                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2645   %}
2646 
2647   enc_class aarch64_enc_strw0(memory mem) %{
2648     MacroAssembler _masm(&cbuf);
2649     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2650                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2651   %}
2652 
2653   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2654     Register src_reg = as_Register($src$$reg);
2655     // we sometimes get asked to store the stack pointer into the
2656     // current thread -- we cannot do that directly on AArch64
2657     if (src_reg == r31_sp) {
2658       MacroAssembler _masm(&cbuf);
2659       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2660       __ mov(rscratch2, sp);
2661       src_reg = rscratch2;
2662     }
2663     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2664                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2665   %}
2666 
2667   enc_class aarch64_enc_str0(memory mem) %{
2668     MacroAssembler _masm(&cbuf);
2669     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2670                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2671   %}
2672 
2673   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2674     FloatRegister src_reg = as_FloatRegister($src$$reg);
2675     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2676                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2677   %}
2678 
2679   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2680     FloatRegister src_reg = as_FloatRegister($src$$reg);
2681     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2682                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2683   %}
2684 
2685   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2686     FloatRegister src_reg = as_FloatRegister($src$$reg);
2687     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2688        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2689   %}
2690 
2691   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2692     FloatRegister src_reg = as_FloatRegister($src$$reg);
2693     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2694        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2695   %}
2696 
2697   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2698     FloatRegister src_reg = as_FloatRegister($src$$reg);
2699     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2700        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2701   %}
2702 
2703   // END Non-volatile memory access
2704 
2705   // volatile loads and stores
2706 
2707   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2708     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2709                  rscratch1, stlrb);
2710   %}
2711 
2712   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2713     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2714                  rscratch1, stlrh);
2715   %}
2716 
2717   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2718     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2719                  rscratch1, stlrw);
2720   %}
2721 
2722 
2723   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2724     Register dst_reg = as_Register($dst$$reg);
2725     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2726              rscratch1, ldarb);
2727     __ sxtbw(dst_reg, dst_reg);
2728   %}
2729 
2730   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2731     Register dst_reg = as_Register($dst$$reg);
2732     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2733              rscratch1, ldarb);
2734     __ sxtb(dst_reg, dst_reg);
2735   %}
2736 
2737   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2738     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2739              rscratch1, ldarb);
2740   %}
2741 
2742   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2743     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2744              rscratch1, ldarb);
2745   %}
2746 
2747   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2748     Register dst_reg = as_Register($dst$$reg);
2749     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2750              rscratch1, ldarh);
2751     __ sxthw(dst_reg, dst_reg);
2752   %}
2753 
2754   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2755     Register dst_reg = as_Register($dst$$reg);
2756     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2757              rscratch1, ldarh);
2758     __ sxth(dst_reg, dst_reg);
2759   %}
2760 
2761   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2762     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2763              rscratch1, ldarh);
2764   %}
2765 
2766   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2767     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2768              rscratch1, ldarh);
2769   %}
2770 
2771   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2772     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2773              rscratch1, ldarw);
2774   %}
2775 
2776   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2777     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2778              rscratch1, ldarw);
2779   %}
2780 
2781   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2782     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2783              rscratch1, ldar);
2784   %}
2785 
2786   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2787     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2788              rscratch1, ldarw);
2789     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2790   %}
2791 
2792   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2793     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2794              rscratch1, ldar);
2795     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2796   %}
2797 
2798   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2799     Register src_reg = as_Register($src$$reg);
2800     // we sometimes get asked to store the stack pointer into the
2801     // current thread -- we cannot do that directly on AArch64
2802     if (src_reg == r31_sp) {
2803         MacroAssembler _masm(&cbuf);
2804       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2805       __ mov(rscratch2, sp);
2806       src_reg = rscratch2;
2807     }
2808     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2809                  rscratch1, stlr);
2810   %}
2811 
2812   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2813     {
2814       MacroAssembler _masm(&cbuf);
2815       FloatRegister src_reg = as_FloatRegister($src$$reg);
2816       __ fmovs(rscratch2, src_reg);
2817     }
2818     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2819                  rscratch1, stlrw);
2820   %}
2821 
2822   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2823     {
2824       MacroAssembler _masm(&cbuf);
2825       FloatRegister src_reg = as_FloatRegister($src$$reg);
2826       __ fmovd(rscratch2, src_reg);
2827     }
2828     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2829                  rscratch1, stlr);
2830   %}
2831 
2832   // synchronized read/update encodings
2833 
2834   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2835     MacroAssembler _masm(&cbuf);
2836     Register dst_reg = as_Register($dst$$reg);
2837     Register base = as_Register($mem$$base);
2838     int index = $mem$$index;
2839     int scale = $mem$$scale;
2840     int disp = $mem$$disp;
2841     if (index == -1) {
2842        if (disp != 0) {
2843         __ lea(rscratch1, Address(base, disp));
2844         __ ldaxr(dst_reg, rscratch1);
2845       } else {
2846         // TODO
2847         // should we ever get anything other than this case?
2848         __ ldaxr(dst_reg, base);
2849       }
2850     } else {
2851       Register index_reg = as_Register(index);
2852       if (disp == 0) {
2853         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2854         __ ldaxr(dst_reg, rscratch1);
2855       } else {
2856         __ lea(rscratch1, Address(base, disp));
2857         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2858         __ ldaxr(dst_reg, rscratch1);
2859       }
2860     }
2861   %}
2862 
2863   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2864     MacroAssembler _masm(&cbuf);
2865     Register src_reg = as_Register($src$$reg);
2866     Register base = as_Register($mem$$base);
2867     int index = $mem$$index;
2868     int scale = $mem$$scale;
2869     int disp = $mem$$disp;
2870     if (index == -1) {
2871        if (disp != 0) {
2872         __ lea(rscratch2, Address(base, disp));
2873         __ stlxr(rscratch1, src_reg, rscratch2);
2874       } else {
2875         // TODO
2876         // should we ever get anything other than this case?
2877         __ stlxr(rscratch1, src_reg, base);
2878       }
2879     } else {
2880       Register index_reg = as_Register(index);
2881       if (disp == 0) {
2882         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
2883         __ stlxr(rscratch1, src_reg, rscratch2);
2884       } else {
2885         __ lea(rscratch2, Address(base, disp));
2886         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
2887         __ stlxr(rscratch1, src_reg, rscratch2);
2888       }
2889     }
2890     __ cmpw(rscratch1, zr);
2891   %}
2892 
2893   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2894     MacroAssembler _masm(&cbuf);
2895     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2896     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2897                Assembler::xword, /*acquire*/ false, /*release*/ true,
2898                /*weak*/ false, noreg);
2899   %}
2900 
2901   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2902     MacroAssembler _masm(&cbuf);
2903     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2904     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2905                Assembler::word, /*acquire*/ false, /*release*/ true,
2906                /*weak*/ false, noreg);
2907   %}
2908 
2909   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2910     MacroAssembler _masm(&cbuf);
2911     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2912     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2913                Assembler::halfword, /*acquire*/ false, /*release*/ true,
2914                /*weak*/ false, noreg);
2915   %}
2916 
2917   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2918     MacroAssembler _masm(&cbuf);
2919     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2920     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2921                Assembler::byte, /*acquire*/ false, /*release*/ true,
2922                /*weak*/ false, noreg);
2923   %}
2924 
2925 
2926   // The only difference between aarch64_enc_cmpxchg and
2927   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
2928   // CompareAndSwap sequence to serve as a barrier on acquiring a
2929   // lock.
2930   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2931     MacroAssembler _masm(&cbuf);
2932     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2933     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2934                Assembler::xword, /*acquire*/ true, /*release*/ true,
2935                /*weak*/ false, noreg);
2936   %}
2937 
2938   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2939     MacroAssembler _masm(&cbuf);
2940     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2941     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2942                Assembler::word, /*acquire*/ true, /*release*/ true,
2943                /*weak*/ false, noreg);
2944   %}
2945 
2946 
2947   // auxiliary used for CompareAndSwapX to set result register
2948   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
2949     MacroAssembler _masm(&cbuf);
2950     Register res_reg = as_Register($res$$reg);
2951     __ cset(res_reg, Assembler::EQ);
2952   %}
2953 
2954   // prefetch encodings
2955 
2956   enc_class aarch64_enc_prefetchw(memory mem) %{
2957     MacroAssembler _masm(&cbuf);
2958     Register base = as_Register($mem$$base);
2959     int index = $mem$$index;
2960     int scale = $mem$$scale;
2961     int disp = $mem$$disp;
2962     if (index == -1) {
2963       __ prfm(Address(base, disp), PSTL1KEEP);
2964     } else {
2965       Register index_reg = as_Register(index);
2966       if (disp == 0) {
2967         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
2968       } else {
2969         __ lea(rscratch1, Address(base, disp));
2970         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
2971       }
2972     }
2973   %}
2974 
2975   /// mov envcodings
2976 
2977   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
2978     MacroAssembler _masm(&cbuf);
2979     u_int32_t con = (u_int32_t)$src$$constant;
2980     Register dst_reg = as_Register($dst$$reg);
2981     if (con == 0) {
2982       __ movw(dst_reg, zr);
2983     } else {
2984       __ movw(dst_reg, con);
2985     }
2986   %}
2987 
2988   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
2989     MacroAssembler _masm(&cbuf);
2990     Register dst_reg = as_Register($dst$$reg);
2991     u_int64_t con = (u_int64_t)$src$$constant;
2992     if (con == 0) {
2993       __ mov(dst_reg, zr);
2994     } else {
2995       __ mov(dst_reg, con);
2996     }
2997   %}
2998 
2999   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3000     MacroAssembler _masm(&cbuf);
3001     Register dst_reg = as_Register($dst$$reg);
3002     address con = (address)$src$$constant;
3003     if (con == NULL || con == (address)1) {
3004       ShouldNotReachHere();
3005     } else {
3006       relocInfo::relocType rtype = $src->constant_reloc();
3007       if (rtype == relocInfo::oop_type) {
3008         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3009       } else if (rtype == relocInfo::metadata_type) {
3010         __ mov_metadata(dst_reg, (Metadata*)con);
3011       } else {
3012         assert(rtype == relocInfo::none, "unexpected reloc type");
3013         if (con < (address)(uintptr_t)os::vm_page_size()) {
3014           __ mov(dst_reg, con);
3015         } else {
3016           unsigned long offset;
3017           __ adrp(dst_reg, con, offset);
3018           __ add(dst_reg, dst_reg, offset);
3019         }
3020       }
3021     }
3022   %}
3023 
3024   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3025     MacroAssembler _masm(&cbuf);
3026     Register dst_reg = as_Register($dst$$reg);
3027     __ mov(dst_reg, zr);
3028   %}
3029 
3030   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3031     MacroAssembler _masm(&cbuf);
3032     Register dst_reg = as_Register($dst$$reg);
3033     __ mov(dst_reg, (u_int64_t)1);
3034   %}
3035 
3036   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3037     MacroAssembler _masm(&cbuf);
3038     address page = (address)$src$$constant;
3039     Register dst_reg = as_Register($dst$$reg);
3040     unsigned long off;
3041     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3042     assert(off == 0, "assumed offset == 0");
3043   %}
3044 
3045   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3046     MacroAssembler _masm(&cbuf);
3047     __ load_byte_map_base($dst$$Register);
3048   %}
3049 
3050   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3051     MacroAssembler _masm(&cbuf);
3052     Register dst_reg = as_Register($dst$$reg);
3053     address con = (address)$src$$constant;
3054     if (con == NULL) {
3055       ShouldNotReachHere();
3056     } else {
3057       relocInfo::relocType rtype = $src->constant_reloc();
3058       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3059       __ set_narrow_oop(dst_reg, (jobject)con);
3060     }
3061   %}
3062 
3063   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3064     MacroAssembler _masm(&cbuf);
3065     Register dst_reg = as_Register($dst$$reg);
3066     __ mov(dst_reg, zr);
3067   %}
3068 
3069   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3070     MacroAssembler _masm(&cbuf);
3071     Register dst_reg = as_Register($dst$$reg);
3072     address con = (address)$src$$constant;
3073     if (con == NULL) {
3074       ShouldNotReachHere();
3075     } else {
3076       relocInfo::relocType rtype = $src->constant_reloc();
3077       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3078       __ set_narrow_klass(dst_reg, (Klass *)con);
3079     }
3080   %}
3081 
3082   // arithmetic encodings
3083 
3084   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3085     MacroAssembler _masm(&cbuf);
3086     Register dst_reg = as_Register($dst$$reg);
3087     Register src_reg = as_Register($src1$$reg);
3088     int32_t con = (int32_t)$src2$$constant;
3089     // add has primary == 0, subtract has primary == 1
3090     if ($primary) { con = -con; }
3091     if (con < 0) {
3092       __ subw(dst_reg, src_reg, -con);
3093     } else {
3094       __ addw(dst_reg, src_reg, con);
3095     }
3096   %}
3097 
3098   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3099     MacroAssembler _masm(&cbuf);
3100     Register dst_reg = as_Register($dst$$reg);
3101     Register src_reg = as_Register($src1$$reg);
3102     int32_t con = (int32_t)$src2$$constant;
3103     // add has primary == 0, subtract has primary == 1
3104     if ($primary) { con = -con; }
3105     if (con < 0) {
3106       __ sub(dst_reg, src_reg, -con);
3107     } else {
3108       __ add(dst_reg, src_reg, con);
3109     }
3110   %}
3111 
3112   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3113     MacroAssembler _masm(&cbuf);
3114    Register dst_reg = as_Register($dst$$reg);
3115    Register src1_reg = as_Register($src1$$reg);
3116    Register src2_reg = as_Register($src2$$reg);
3117     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3118   %}
3119 
3120   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3121     MacroAssembler _masm(&cbuf);
3122    Register dst_reg = as_Register($dst$$reg);
3123    Register src1_reg = as_Register($src1$$reg);
3124    Register src2_reg = as_Register($src2$$reg);
3125     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3126   %}
3127 
3128   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3129     MacroAssembler _masm(&cbuf);
3130    Register dst_reg = as_Register($dst$$reg);
3131    Register src1_reg = as_Register($src1$$reg);
3132    Register src2_reg = as_Register($src2$$reg);
3133     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3134   %}
3135 
3136   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3137     MacroAssembler _masm(&cbuf);
3138    Register dst_reg = as_Register($dst$$reg);
3139    Register src1_reg = as_Register($src1$$reg);
3140    Register src2_reg = as_Register($src2$$reg);
3141     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3142   %}
3143 
3144   // compare instruction encodings
3145 
3146   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3147     MacroAssembler _masm(&cbuf);
3148     Register reg1 = as_Register($src1$$reg);
3149     Register reg2 = as_Register($src2$$reg);
3150     __ cmpw(reg1, reg2);
3151   %}
3152 
3153   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3154     MacroAssembler _masm(&cbuf);
3155     Register reg = as_Register($src1$$reg);
3156     int32_t val = $src2$$constant;
3157     if (val >= 0) {
3158       __ subsw(zr, reg, val);
3159     } else {
3160       __ addsw(zr, reg, -val);
3161     }
3162   %}
3163 
3164   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3165     MacroAssembler _masm(&cbuf);
3166     Register reg1 = as_Register($src1$$reg);
3167     u_int32_t val = (u_int32_t)$src2$$constant;
3168     __ movw(rscratch1, val);
3169     __ cmpw(reg1, rscratch1);
3170   %}
3171 
3172   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3173     MacroAssembler _masm(&cbuf);
3174     Register reg1 = as_Register($src1$$reg);
3175     Register reg2 = as_Register($src2$$reg);
3176     __ cmp(reg1, reg2);
3177   %}
3178 
3179   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3180     MacroAssembler _masm(&cbuf);
3181     Register reg = as_Register($src1$$reg);
3182     int64_t val = $src2$$constant;
3183     if (val >= 0) {
3184       __ subs(zr, reg, val);
3185     } else if (val != -val) {
3186       __ adds(zr, reg, -val);
3187     } else {
3188     // aargh, Long.MIN_VALUE is a special case
3189       __ orr(rscratch1, zr, (u_int64_t)val);
3190       __ subs(zr, reg, rscratch1);
3191     }
3192   %}
3193 
3194   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3195     MacroAssembler _masm(&cbuf);
3196     Register reg1 = as_Register($src1$$reg);
3197     u_int64_t val = (u_int64_t)$src2$$constant;
3198     __ mov(rscratch1, val);
3199     __ cmp(reg1, rscratch1);
3200   %}
3201 
3202   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3203     MacroAssembler _masm(&cbuf);
3204     Register reg1 = as_Register($src1$$reg);
3205     Register reg2 = as_Register($src2$$reg);
3206     __ cmp(reg1, reg2);
3207   %}
3208 
3209   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3210     MacroAssembler _masm(&cbuf);
3211     Register reg1 = as_Register($src1$$reg);
3212     Register reg2 = as_Register($src2$$reg);
3213     __ cmpw(reg1, reg2);
3214   %}
3215 
3216   enc_class aarch64_enc_testp(iRegP src) %{
3217     MacroAssembler _masm(&cbuf);
3218     Register reg = as_Register($src$$reg);
3219     __ cmp(reg, zr);
3220   %}
3221 
3222   enc_class aarch64_enc_testn(iRegN src) %{
3223     MacroAssembler _masm(&cbuf);
3224     Register reg = as_Register($src$$reg);
3225     __ cmpw(reg, zr);
3226   %}
3227 
3228   enc_class aarch64_enc_b(label lbl) %{
3229     MacroAssembler _masm(&cbuf);
3230     Label *L = $lbl$$label;
3231     __ b(*L);
3232   %}
3233 
3234   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3235     MacroAssembler _masm(&cbuf);
3236     Label *L = $lbl$$label;
3237     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3238   %}
3239 
3240   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3241     MacroAssembler _masm(&cbuf);
3242     Label *L = $lbl$$label;
3243     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3244   %}
3245 
3246   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3247   %{
3248      Register sub_reg = as_Register($sub$$reg);
3249      Register super_reg = as_Register($super$$reg);
3250      Register temp_reg = as_Register($temp$$reg);
3251      Register result_reg = as_Register($result$$reg);
3252 
3253      Label miss;
3254      MacroAssembler _masm(&cbuf);
3255      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3256                                      NULL, &miss,
3257                                      /*set_cond_codes:*/ true);
3258      if ($primary) {
3259        __ mov(result_reg, zr);
3260      }
3261      __ bind(miss);
3262   %}
3263 
3264   enc_class aarch64_enc_java_static_call(method meth) %{
3265     MacroAssembler _masm(&cbuf);
3266 
3267     address addr = (address)$meth$$method;
3268     address call;
3269     if (!_method) {
3270       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3271       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3272     } else {
3273       int method_index = resolved_method_index(cbuf);
3274       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3275                                                   : static_call_Relocation::spec(method_index);
3276       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3277 
3278       // Emit stub for static call
3279       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3280       if (stub == NULL) {
3281         ciEnv::current()->record_failure("CodeCache is full");
3282         return;
3283       }
3284     }
3285     if (call == NULL) {
3286       ciEnv::current()->record_failure("CodeCache is full");
3287       return;
3288     }
3289   %}
3290 
3291   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3292     MacroAssembler _masm(&cbuf);
3293     int method_index = resolved_method_index(cbuf);
3294     address call = __ ic_call((address)$meth$$method, method_index);
3295     if (call == NULL) {
3296       ciEnv::current()->record_failure("CodeCache is full");
3297       return;
3298     }
3299   %}
3300 
3301   enc_class aarch64_enc_call_epilog() %{
3302     MacroAssembler _masm(&cbuf);
3303     if (VerifyStackAtCalls) {
3304       // Check that stack depth is unchanged: find majik cookie on stack
3305       __ call_Unimplemented();
3306     }
3307   %}
3308 
3309   enc_class aarch64_enc_java_to_runtime(method meth) %{
3310     MacroAssembler _masm(&cbuf);
3311 
3312     // some calls to generated routines (arraycopy code) are scheduled
3313     // by C2 as runtime calls. if so we can call them using a br (they
3314     // will be in a reachable segment) otherwise we have to use a blrt
3315     // which loads the absolute address into a register.
3316     address entry = (address)$meth$$method;
3317     CodeBlob *cb = CodeCache::find_blob(entry);
3318     if (cb) {
3319       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3320       if (call == NULL) {
3321         ciEnv::current()->record_failure("CodeCache is full");
3322         return;
3323       }
3324     } else {
3325       int gpcnt;
3326       int fpcnt;
3327       int rtype;
3328       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3329       Label retaddr;
3330       __ adr(rscratch2, retaddr);
3331       __ lea(rscratch1, RuntimeAddress(entry));
3332       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3333       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3334       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3335       __ bind(retaddr);
3336       __ add(sp, sp, 2 * wordSize);
3337     }
3338   %}
3339 
3340   enc_class aarch64_enc_rethrow() %{
3341     MacroAssembler _masm(&cbuf);
3342     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3343   %}
3344 
3345   enc_class aarch64_enc_ret() %{
3346     MacroAssembler _masm(&cbuf);
3347     __ ret(lr);
3348   %}
3349 
3350   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3351     MacroAssembler _masm(&cbuf);
3352     Register target_reg = as_Register($jump_target$$reg);
3353     __ br(target_reg);
3354   %}
3355 
3356   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3357     MacroAssembler _masm(&cbuf);
3358     Register target_reg = as_Register($jump_target$$reg);
3359     // exception oop should be in r0
3360     // ret addr has been popped into lr
3361     // callee expects it in r3
3362     __ mov(r3, lr);
3363     __ br(target_reg);
3364   %}
3365 
3366   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3367     MacroAssembler _masm(&cbuf);
3368     Register oop = as_Register($object$$reg);
3369     Register box = as_Register($box$$reg);
3370     Register disp_hdr = as_Register($tmp$$reg);
3371     Register tmp = as_Register($tmp2$$reg);
3372     Label cont;
3373     Label object_has_monitor;
3374     Label cas_failed;
3375 
3376     assert_different_registers(oop, box, tmp, disp_hdr);
3377 
3378     // Load markOop from object into displaced_header.
3379     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3380 
3381     if (UseBiasedLocking && !UseOptoBiasInlining) {
3382       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3383     }
3384 
3385     // Handle existing monitor
3386     // we can use AArch64's bit test and branch here but
3387     // markoopDesc does not define a bit index just the bit value
3388     // so assert in case the bit pos changes
3389 #   define __monitor_value_log2 1
3390     assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
3391     __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
3392 #   undef __monitor_value_log2
3393 
3394     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
3395     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
3396 
3397     // Load Compare Value application register.
3398 
3399     // Initialize the box. (Must happen before we update the object mark!)
3400     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3401 
3402     // Compare object markOop with mark and if equal exchange scratch1
3403     // with object markOop.
3404     if (UseLSE) {
3405       __ mov(tmp, disp_hdr);
3406       __ casal(Assembler::xword, tmp, box, oop);
3407       __ cmp(tmp, disp_hdr);
3408       __ br(Assembler::EQ, cont);
3409     } else {
3410       Label retry_load;
3411       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3412         __ prfm(Address(oop), PSTL1STRM);
3413       __ bind(retry_load);
3414       __ ldaxr(tmp, oop);
3415       __ cmp(tmp, disp_hdr);
3416       __ br(Assembler::NE, cas_failed);
3417       // use stlxr to ensure update is immediately visible
3418       __ stlxr(tmp, box, oop);
3419       __ cbzw(tmp, cont);
3420       __ b(retry_load);
3421     }
3422 
3423     // Formerly:
3424     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3425     //               /*newv=*/box,
3426     //               /*addr=*/oop,
3427     //               /*tmp=*/tmp,
3428     //               cont,
3429     //               /*fail*/NULL);
3430 
3431     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3432 
3433     // If the compare-and-exchange succeeded, then we found an unlocked
3434     // object, will have now locked it will continue at label cont
3435 
3436     __ bind(cas_failed);
3437     // We did not see an unlocked object so try the fast recursive case.
3438 
3439     // Check if the owner is self by comparing the value in the
3440     // markOop of object (disp_hdr) with the stack pointer.
3441     __ mov(rscratch1, sp);
3442     __ sub(disp_hdr, disp_hdr, rscratch1);
3443     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3444     // If condition is true we are cont and hence we can store 0 as the
3445     // displaced header in the box, which indicates that it is a recursive lock.
3446     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3447     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3448 
3449     // Handle existing monitor.
3450     __ b(cont);
3451 
3452     __ bind(object_has_monitor);
3453     // The object's monitor m is unlocked iff m->owner == NULL,
3454     // otherwise m->owner may contain a thread or a stack address.
3455     //
3456     // Try to CAS m->owner from NULL to current thread.
3457     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3458     __ mov(disp_hdr, zr);
3459 
3460     if (UseLSE) {
3461       __ mov(rscratch1, disp_hdr);
3462       __ casal(Assembler::xword, rscratch1, rthread, tmp);
3463       __ cmp(rscratch1, disp_hdr);
3464     } else {
3465       Label retry_load, fail;
3466       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) {
3467         __ prfm(Address(tmp), PSTL1STRM);
3468       }
3469       __ bind(retry_load);
3470       __ ldaxr(rscratch1, tmp);
3471       __ cmp(disp_hdr, rscratch1);
3472       __ br(Assembler::NE, fail);
3473       // use stlxr to ensure update is immediately visible
3474       __ stlxr(rscratch1, rthread, tmp);
3475       __ cbnzw(rscratch1, retry_load);
3476       __ bind(fail);
3477     }
3478 
3479     // Label next;
3480     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3481     //               /*newv=*/rthread,
3482     //               /*addr=*/tmp,
3483     //               /*tmp=*/rscratch1,
3484     //               /*succeed*/next,
3485     //               /*fail*/NULL);
3486     // __ bind(next);
3487 
3488     // store a non-null value into the box.
3489     __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3490 
3491     // PPC port checks the following invariants
3492     // #ifdef ASSERT
3493     // bne(flag, cont);
3494     // We have acquired the monitor, check some invariants.
3495     // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
3496     // Invariant 1: _recursions should be 0.
3497     // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
3498     // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
3499     //                        "monitor->_recursions should be 0", -1);
3500     // Invariant 2: OwnerIsThread shouldn't be 0.
3501     // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
3502     //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
3503     //                           "monitor->OwnerIsThread shouldn't be 0", -1);
3504     // #endif
3505 
3506     __ bind(cont);
3507     // flag == EQ indicates success
3508     // flag == NE indicates failure
3509 
3510   %}
3511 
3512   // TODO
3513   // reimplement this with custom cmpxchgptr code
3514   // which avoids some of the unnecessary branching
3515   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3516     MacroAssembler _masm(&cbuf);
3517     Register oop = as_Register($object$$reg);
3518     Register box = as_Register($box$$reg);
3519     Register disp_hdr = as_Register($tmp$$reg);
3520     Register tmp = as_Register($tmp2$$reg);
3521     Label cont;
3522     Label object_has_monitor;
3523     Label cas_failed;
3524 
3525     assert_different_registers(oop, box, tmp, disp_hdr);
3526 
3527     if (UseBiasedLocking && !UseOptoBiasInlining) {
3528       __ biased_locking_exit(oop, tmp, cont);
3529     }
3530 
3531     // Find the lock address and load the displaced header from the stack.
3532     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3533 
3534     // If the displaced header is 0, we have a recursive unlock.
3535     __ cmp(disp_hdr, zr);
3536     __ br(Assembler::EQ, cont);
3537 
3538 
3539     // Handle existing monitor.
3540     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3541     __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3542 
3543     // Check if it is still a light weight lock, this is is true if we
3544     // see the stack address of the basicLock in the markOop of the
3545     // object.
3546 
3547       if (UseLSE) {
3548         __ mov(tmp, box);
3549         __ casl(Assembler::xword, tmp, disp_hdr, oop);
3550         __ cmp(tmp, box);
3551       } else {
3552         Label retry_load;
3553         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3554           __ prfm(Address(oop), PSTL1STRM);
3555         __ bind(retry_load);
3556         __ ldxr(tmp, oop);
3557         __ cmp(box, tmp);
3558         __ br(Assembler::NE, cas_failed);
3559         // use stlxr to ensure update is immediately visible
3560         __ stlxr(tmp, disp_hdr, oop);
3561         __ cbzw(tmp, cont);
3562         __ b(retry_load);
3563       }
3564 
3565     // __ cmpxchgptr(/*compare_value=*/box,
3566     //               /*exchange_value=*/disp_hdr,
3567     //               /*where=*/oop,
3568     //               /*result=*/tmp,
3569     //               cont,
3570     //               /*cas_failed*/NULL);
3571     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3572 
3573     __ bind(cas_failed);
3574 
3575     // Handle existing monitor.
3576     __ b(cont);
3577 
3578     __ bind(object_has_monitor);
3579     __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3580     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3581     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3582     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3583     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3584     __ cmp(rscratch1, zr);
3585     __ br(Assembler::NE, cont);
3586 
3587     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3588     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3589     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3590     __ cmp(rscratch1, zr);
3591     __ cbnz(rscratch1, cont);
3592     // need a release store here
3593     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3594     __ stlr(rscratch1, tmp); // rscratch1 is zero
3595 
3596     __ bind(cont);
3597     // flag == EQ indicates success
3598     // flag == NE indicates failure
3599   %}
3600 
3601 %}
3602 
3603 //----------FRAME--------------------------------------------------------------
3604 // Definition of frame structure and management information.
3605 //
3606 //  S T A C K   L A Y O U T    Allocators stack-slot number
3607 //                             |   (to get allocators register number
3608 //  G  Owned by    |        |  v    add OptoReg::stack0())
3609 //  r   CALLER     |        |
3610 //  o     |        +--------+      pad to even-align allocators stack-slot
3611 //  w     V        |  pad0  |        numbers; owned by CALLER
3612 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3613 //  h     ^        |   in   |  5
3614 //        |        |  args  |  4   Holes in incoming args owned by SELF
3615 //  |     |        |        |  3
3616 //  |     |        +--------+
3617 //  V     |        | old out|      Empty on Intel, window on Sparc
3618 //        |    old |preserve|      Must be even aligned.
3619 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3620 //        |        |   in   |  3   area for Intel ret address
3621 //     Owned by    |preserve|      Empty on Sparc.
3622 //       SELF      +--------+
3623 //        |        |  pad2  |  2   pad to align old SP
3624 //        |        +--------+  1
3625 //        |        | locks  |  0
3626 //        |        +--------+----> OptoReg::stack0(), even aligned
3627 //        |        |  pad1  | 11   pad to align new SP
3628 //        |        +--------+
3629 //        |        |        | 10
3630 //        |        | spills |  9   spills
3631 //        V        |        |  8   (pad0 slot for callee)
3632 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3633 //        ^        |  out   |  7
3634 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3635 //     Owned by    +--------+
3636 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3637 //        |    new |preserve|      Must be even-aligned.
3638 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3639 //        |        |        |
3640 //
3641 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3642 //         known from SELF's arguments and the Java calling convention.
3643 //         Region 6-7 is determined per call site.
3644 // Note 2: If the calling convention leaves holes in the incoming argument
3645 //         area, those holes are owned by SELF.  Holes in the outgoing area
3646 //         are owned by the CALLEE.  Holes should not be nessecary in the
3647 //         incoming area, as the Java calling convention is completely under
3648 //         the control of the AD file.  Doubles can be sorted and packed to
3649 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3650 //         varargs C calling conventions.
3651 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3652 //         even aligned with pad0 as needed.
3653 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3654 //           (the latter is true on Intel but is it false on AArch64?)
3655 //         region 6-11 is even aligned; it may be padded out more so that
3656 //         the region from SP to FP meets the minimum stack alignment.
3657 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3658 //         alignment.  Region 11, pad1, may be dynamically extended so that
3659 //         SP meets the minimum alignment.
3660 
3661 frame %{
3662   // What direction does stack grow in (assumed to be same for C & Java)
3663   stack_direction(TOWARDS_LOW);
3664 
3665   // These three registers define part of the calling convention
3666   // between compiled code and the interpreter.
3667 
3668   // Inline Cache Register or methodOop for I2C.
3669   inline_cache_reg(R12);
3670 
3671   // Method Oop Register when calling interpreter.
3672   interpreter_method_oop_reg(R12);
3673 
3674   // Number of stack slots consumed by locking an object
3675   sync_stack_slots(2);
3676 
3677   // Compiled code's Frame Pointer
3678   frame_pointer(R31);
3679 
3680   // Interpreter stores its frame pointer in a register which is
3681   // stored to the stack by I2CAdaptors.
3682   // I2CAdaptors convert from interpreted java to compiled java.
3683   interpreter_frame_pointer(R29);
3684 
3685   // Stack alignment requirement
3686   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3687 
3688   // Number of stack slots between incoming argument block and the start of
3689   // a new frame.  The PROLOG must add this many slots to the stack.  The
3690   // EPILOG must remove this many slots. aarch64 needs two slots for
3691   // return address and fp.
3692   // TODO think this is correct but check
3693   in_preserve_stack_slots(4);
3694 
3695   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3696   // for calls to C.  Supports the var-args backing area for register parms.
3697   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3698 
3699   // The after-PROLOG location of the return address.  Location of
3700   // return address specifies a type (REG or STACK) and a number
3701   // representing the register number (i.e. - use a register name) or
3702   // stack slot.
3703   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3704   // Otherwise, it is above the locks and verification slot and alignment word
3705   // TODO this may well be correct but need to check why that - 2 is there
3706   // ppc port uses 0 but we definitely need to allow for fixed_slots
3707   // which folds in the space used for monitors
3708   return_addr(STACK - 2 +
3709               align_up((Compile::current()->in_preserve_stack_slots() +
3710                         Compile::current()->fixed_slots()),
3711                        stack_alignment_in_slots()));
3712 
3713   // Body of function which returns an integer array locating
3714   // arguments either in registers or in stack slots.  Passed an array
3715   // of ideal registers called "sig" and a "length" count.  Stack-slot
3716   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3717   // arguments for a CALLEE.  Incoming stack arguments are
3718   // automatically biased by the preserve_stack_slots field above.
3719 
3720   calling_convention
3721   %{
3722     // No difference between ingoing/outgoing just pass false
3723     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3724   %}
3725 
3726   c_calling_convention
3727   %{
3728     // This is obviously always outgoing
3729     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3730   %}
3731 
3732   // Location of compiled Java return values.  Same as C for now.
3733   return_value
3734   %{
3735     // TODO do we allow ideal_reg == Op_RegN???
3736     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3737            "only return normal values");
3738 
3739     static const int lo[Op_RegL + 1] = { // enum name
3740       0,                                 // Op_Node
3741       0,                                 // Op_Set
3742       R0_num,                            // Op_RegN
3743       R0_num,                            // Op_RegI
3744       R0_num,                            // Op_RegP
3745       V0_num,                            // Op_RegF
3746       V0_num,                            // Op_RegD
3747       R0_num                             // Op_RegL
3748     };
3749 
3750     static const int hi[Op_RegL + 1] = { // enum name
3751       0,                                 // Op_Node
3752       0,                                 // Op_Set
3753       OptoReg::Bad,                       // Op_RegN
3754       OptoReg::Bad,                      // Op_RegI
3755       R0_H_num,                          // Op_RegP
3756       OptoReg::Bad,                      // Op_RegF
3757       V0_H_num,                          // Op_RegD
3758       R0_H_num                           // Op_RegL
3759     };
3760 
3761     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3762   %}
3763 %}
3764 
3765 //----------ATTRIBUTES---------------------------------------------------------
3766 //----------Operand Attributes-------------------------------------------------
3767 op_attrib op_cost(1);        // Required cost attribute
3768 
3769 //----------Instruction Attributes---------------------------------------------
3770 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3771 ins_attrib ins_size(32);        // Required size attribute (in bits)
3772 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3773                                 // a non-matching short branch variant
3774                                 // of some long branch?
3775 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3776                                 // be a power of 2) specifies the
3777                                 // alignment that some part of the
3778                                 // instruction (not necessarily the
3779                                 // start) requires.  If > 1, a
3780                                 // compute_padding() function must be
3781                                 // provided for the instruction
3782 
3783 //----------OPERANDS-----------------------------------------------------------
3784 // Operand definitions must precede instruction definitions for correct parsing
3785 // in the ADLC because operands constitute user defined types which are used in
3786 // instruction definitions.
3787 
3788 //----------Simple Operands----------------------------------------------------
3789 
3790 // Integer operands 32 bit
3791 // 32 bit immediate
3792 operand immI()
3793 %{
3794   match(ConI);
3795 
3796   op_cost(0);
3797   format %{ %}
3798   interface(CONST_INTER);
3799 %}
3800 
3801 // 32 bit zero
3802 operand immI0()
3803 %{
3804   predicate(n->get_int() == 0);
3805   match(ConI);
3806 
3807   op_cost(0);
3808   format %{ %}
3809   interface(CONST_INTER);
3810 %}
3811 
3812 // 32 bit unit increment
3813 operand immI_1()
3814 %{
3815   predicate(n->get_int() == 1);
3816   match(ConI);
3817 
3818   op_cost(0);
3819   format %{ %}
3820   interface(CONST_INTER);
3821 %}
3822 
3823 // 32 bit unit decrement
3824 operand immI_M1()
3825 %{
3826   predicate(n->get_int() == -1);
3827   match(ConI);
3828 
3829   op_cost(0);
3830   format %{ %}
3831   interface(CONST_INTER);
3832 %}
3833 
3834 // Shift values for add/sub extension shift
3835 operand immIExt()
3836 %{
3837   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3838   match(ConI);
3839 
3840   op_cost(0);
3841   format %{ %}
3842   interface(CONST_INTER);
3843 %}
3844 
3845 operand immI_le_4()
3846 %{
3847   predicate(n->get_int() <= 4);
3848   match(ConI);
3849 
3850   op_cost(0);
3851   format %{ %}
3852   interface(CONST_INTER);
3853 %}
3854 
3855 operand immI_31()
3856 %{
3857   predicate(n->get_int() == 31);
3858   match(ConI);
3859 
3860   op_cost(0);
3861   format %{ %}
3862   interface(CONST_INTER);
3863 %}
3864 
3865 operand immI_8()
3866 %{
3867   predicate(n->get_int() == 8);
3868   match(ConI);
3869 
3870   op_cost(0);
3871   format %{ %}
3872   interface(CONST_INTER);
3873 %}
3874 
3875 operand immI_16()
3876 %{
3877   predicate(n->get_int() == 16);
3878   match(ConI);
3879 
3880   op_cost(0);
3881   format %{ %}
3882   interface(CONST_INTER);
3883 %}
3884 
3885 operand immI_24()
3886 %{
3887   predicate(n->get_int() == 24);
3888   match(ConI);
3889 
3890   op_cost(0);
3891   format %{ %}
3892   interface(CONST_INTER);
3893 %}
3894 
3895 operand immI_32()
3896 %{
3897   predicate(n->get_int() == 32);
3898   match(ConI);
3899 
3900   op_cost(0);
3901   format %{ %}
3902   interface(CONST_INTER);
3903 %}
3904 
3905 operand immI_48()
3906 %{
3907   predicate(n->get_int() == 48);
3908   match(ConI);
3909 
3910   op_cost(0);
3911   format %{ %}
3912   interface(CONST_INTER);
3913 %}
3914 
3915 operand immI_56()
3916 %{
3917   predicate(n->get_int() == 56);
3918   match(ConI);
3919 
3920   op_cost(0);
3921   format %{ %}
3922   interface(CONST_INTER);
3923 %}
3924 
3925 operand immI_63()
3926 %{
3927   predicate(n->get_int() == 63);
3928   match(ConI);
3929 
3930   op_cost(0);
3931   format %{ %}
3932   interface(CONST_INTER);
3933 %}
3934 
3935 operand immI_64()
3936 %{
3937   predicate(n->get_int() == 64);
3938   match(ConI);
3939 
3940   op_cost(0);
3941   format %{ %}
3942   interface(CONST_INTER);
3943 %}
3944 
3945 operand immI_255()
3946 %{
3947   predicate(n->get_int() == 255);
3948   match(ConI);
3949 
3950   op_cost(0);
3951   format %{ %}
3952   interface(CONST_INTER);
3953 %}
3954 
3955 operand immI_65535()
3956 %{
3957   predicate(n->get_int() == 65535);
3958   match(ConI);
3959 
3960   op_cost(0);
3961   format %{ %}
3962   interface(CONST_INTER);
3963 %}
3964 
3965 operand immL_255()
3966 %{
3967   predicate(n->get_long() == 255L);
3968   match(ConL);
3969 
3970   op_cost(0);
3971   format %{ %}
3972   interface(CONST_INTER);
3973 %}
3974 
3975 operand immL_65535()
3976 %{
3977   predicate(n->get_long() == 65535L);
3978   match(ConL);
3979 
3980   op_cost(0);
3981   format %{ %}
3982   interface(CONST_INTER);
3983 %}
3984 
3985 operand immL_4294967295()
3986 %{
3987   predicate(n->get_long() == 4294967295L);
3988   match(ConL);
3989 
3990   op_cost(0);
3991   format %{ %}
3992   interface(CONST_INTER);
3993 %}
3994 
3995 operand immL_bitmask()
3996 %{
3997   predicate(((n->get_long() & 0xc000000000000000l) == 0)
3998             && is_power_of_2(n->get_long() + 1));
3999   match(ConL);
4000 
4001   op_cost(0);
4002   format %{ %}
4003   interface(CONST_INTER);
4004 %}
4005 
4006 operand immI_bitmask()
4007 %{
4008   predicate(((n->get_int() & 0xc0000000) == 0)
4009             && is_power_of_2(n->get_int() + 1));
4010   match(ConI);
4011 
4012   op_cost(0);
4013   format %{ %}
4014   interface(CONST_INTER);
4015 %}
4016 
4017 // Scale values for scaled offset addressing modes (up to long but not quad)
4018 operand immIScale()
4019 %{
4020   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4021   match(ConI);
4022 
4023   op_cost(0);
4024   format %{ %}
4025   interface(CONST_INTER);
4026 %}
4027 
4028 // 26 bit signed offset -- for pc-relative branches
4029 operand immI26()
4030 %{
4031   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4032   match(ConI);
4033 
4034   op_cost(0);
4035   format %{ %}
4036   interface(CONST_INTER);
4037 %}
4038 
4039 // 19 bit signed offset -- for pc-relative loads
4040 operand immI19()
4041 %{
4042   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4043   match(ConI);
4044 
4045   op_cost(0);
4046   format %{ %}
4047   interface(CONST_INTER);
4048 %}
4049 
4050 // 12 bit unsigned offset -- for base plus immediate loads
4051 operand immIU12()
4052 %{
4053   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4054   match(ConI);
4055 
4056   op_cost(0);
4057   format %{ %}
4058   interface(CONST_INTER);
4059 %}
4060 
4061 operand immLU12()
4062 %{
4063   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4064   match(ConL);
4065 
4066   op_cost(0);
4067   format %{ %}
4068   interface(CONST_INTER);
4069 %}
4070 
4071 // Offset for scaled or unscaled immediate loads and stores
4072 operand immIOffset()
4073 %{
4074   predicate(Address::offset_ok_for_immed(n->get_int()));
4075   match(ConI);
4076 
4077   op_cost(0);
4078   format %{ %}
4079   interface(CONST_INTER);
4080 %}
4081 
4082 operand immIOffset4()
4083 %{
4084   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4085   match(ConI);
4086 
4087   op_cost(0);
4088   format %{ %}
4089   interface(CONST_INTER);
4090 %}
4091 
4092 operand immIOffset8()
4093 %{
4094   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4095   match(ConI);
4096 
4097   op_cost(0);
4098   format %{ %}
4099   interface(CONST_INTER);
4100 %}
4101 
4102 operand immIOffset16()
4103 %{
4104   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4105   match(ConI);
4106 
4107   op_cost(0);
4108   format %{ %}
4109   interface(CONST_INTER);
4110 %}
4111 
4112 operand immLoffset()
4113 %{
4114   predicate(Address::offset_ok_for_immed(n->get_long()));
4115   match(ConL);
4116 
4117   op_cost(0);
4118   format %{ %}
4119   interface(CONST_INTER);
4120 %}
4121 
4122 operand immLoffset4()
4123 %{
4124   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4125   match(ConL);
4126 
4127   op_cost(0);
4128   format %{ %}
4129   interface(CONST_INTER);
4130 %}
4131 
4132 operand immLoffset8()
4133 %{
4134   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4135   match(ConL);
4136 
4137   op_cost(0);
4138   format %{ %}
4139   interface(CONST_INTER);
4140 %}
4141 
4142 operand immLoffset16()
4143 %{
4144   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4145   match(ConL);
4146 
4147   op_cost(0);
4148   format %{ %}
4149   interface(CONST_INTER);
4150 %}
4151 
4152 // 32 bit integer valid for add sub immediate
4153 operand immIAddSub()
4154 %{
4155   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4156   match(ConI);
4157   op_cost(0);
4158   format %{ %}
4159   interface(CONST_INTER);
4160 %}
4161 
4162 // 32 bit unsigned integer valid for logical immediate
4163 // TODO -- check this is right when e.g the mask is 0x80000000
4164 operand immILog()
4165 %{
4166   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4167   match(ConI);
4168 
4169   op_cost(0);
4170   format %{ %}
4171   interface(CONST_INTER);
4172 %}
4173 
4174 // Integer operands 64 bit
4175 // 64 bit immediate
4176 operand immL()
4177 %{
4178   match(ConL);
4179 
4180   op_cost(0);
4181   format %{ %}
4182   interface(CONST_INTER);
4183 %}
4184 
4185 // 64 bit zero
4186 operand immL0()
4187 %{
4188   predicate(n->get_long() == 0);
4189   match(ConL);
4190 
4191   op_cost(0);
4192   format %{ %}
4193   interface(CONST_INTER);
4194 %}
4195 
4196 // 64 bit unit increment
4197 operand immL_1()
4198 %{
4199   predicate(n->get_long() == 1);
4200   match(ConL);
4201 
4202   op_cost(0);
4203   format %{ %}
4204   interface(CONST_INTER);
4205 %}
4206 
4207 // 64 bit unit decrement
4208 operand immL_M1()
4209 %{
4210   predicate(n->get_long() == -1);
4211   match(ConL);
4212 
4213   op_cost(0);
4214   format %{ %}
4215   interface(CONST_INTER);
4216 %}
4217 
4218 // 32 bit offset of pc in thread anchor
4219 
4220 operand immL_pc_off()
4221 %{
4222   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4223                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4224   match(ConL);
4225 
4226   op_cost(0);
4227   format %{ %}
4228   interface(CONST_INTER);
4229 %}
4230 
4231 // 64 bit integer valid for add sub immediate
4232 operand immLAddSub()
4233 %{
4234   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4235   match(ConL);
4236   op_cost(0);
4237   format %{ %}
4238   interface(CONST_INTER);
4239 %}
4240 
4241 // 64 bit integer valid for logical immediate
4242 operand immLLog()
4243 %{
4244   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4245   match(ConL);
4246   op_cost(0);
4247   format %{ %}
4248   interface(CONST_INTER);
4249 %}
4250 
4251 // Long Immediate: low 32-bit mask
4252 operand immL_32bits()
4253 %{
4254   predicate(n->get_long() == 0xFFFFFFFFL);
4255   match(ConL);
4256   op_cost(0);
4257   format %{ %}
4258   interface(CONST_INTER);
4259 %}
4260 
4261 // Pointer operands
4262 // Pointer Immediate
4263 operand immP()
4264 %{
4265   match(ConP);
4266 
4267   op_cost(0);
4268   format %{ %}
4269   interface(CONST_INTER);
4270 %}
4271 
4272 // NULL Pointer Immediate
4273 operand immP0()
4274 %{
4275   predicate(n->get_ptr() == 0);
4276   match(ConP);
4277 
4278   op_cost(0);
4279   format %{ %}
4280   interface(CONST_INTER);
4281 %}
4282 
4283 // Pointer Immediate One
4284 // this is used in object initialization (initial object header)
4285 operand immP_1()
4286 %{
4287   predicate(n->get_ptr() == 1);
4288   match(ConP);
4289 
4290   op_cost(0);
4291   format %{ %}
4292   interface(CONST_INTER);
4293 %}
4294 
4295 // Polling Page Pointer Immediate
4296 operand immPollPage()
4297 %{
4298   predicate((address)n->get_ptr() == os::get_polling_page());
4299   match(ConP);
4300 
4301   op_cost(0);
4302   format %{ %}
4303   interface(CONST_INTER);
4304 %}
4305 
4306 // Card Table Byte Map Base
4307 operand immByteMapBase()
4308 %{
4309   // Get base of card map
4310   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4311             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4312   match(ConP);
4313 
4314   op_cost(0);
4315   format %{ %}
4316   interface(CONST_INTER);
4317 %}
4318 
4319 // Pointer Immediate Minus One
4320 // this is used when we want to write the current PC to the thread anchor
4321 operand immP_M1()
4322 %{
4323   predicate(n->get_ptr() == -1);
4324   match(ConP);
4325 
4326   op_cost(0);
4327   format %{ %}
4328   interface(CONST_INTER);
4329 %}
4330 
4331 // Pointer Immediate Minus Two
4332 // this is used when we want to write the current PC to the thread anchor
4333 operand immP_M2()
4334 %{
4335   predicate(n->get_ptr() == -2);
4336   match(ConP);
4337 
4338   op_cost(0);
4339   format %{ %}
4340   interface(CONST_INTER);
4341 %}
4342 
4343 // Float and Double operands
4344 // Double Immediate
4345 operand immD()
4346 %{
4347   match(ConD);
4348   op_cost(0);
4349   format %{ %}
4350   interface(CONST_INTER);
4351 %}
4352 
4353 // Double Immediate: +0.0d
4354 operand immD0()
4355 %{
4356   predicate(jlong_cast(n->getd()) == 0);
4357   match(ConD);
4358 
4359   op_cost(0);
4360   format %{ %}
4361   interface(CONST_INTER);
4362 %}
4363 
4364 // constant 'double +0.0'.
4365 operand immDPacked()
4366 %{
4367   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4368   match(ConD);
4369   op_cost(0);
4370   format %{ %}
4371   interface(CONST_INTER);
4372 %}
4373 
4374 // Float Immediate
4375 operand immF()
4376 %{
4377   match(ConF);
4378   op_cost(0);
4379   format %{ %}
4380   interface(CONST_INTER);
4381 %}
4382 
4383 // Float Immediate: +0.0f.
4384 operand immF0()
4385 %{
4386   predicate(jint_cast(n->getf()) == 0);
4387   match(ConF);
4388 
4389   op_cost(0);
4390   format %{ %}
4391   interface(CONST_INTER);
4392 %}
4393 
4394 //
4395 operand immFPacked()
4396 %{
4397   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4398   match(ConF);
4399   op_cost(0);
4400   format %{ %}
4401   interface(CONST_INTER);
4402 %}
4403 
4404 // Narrow pointer operands
4405 // Narrow Pointer Immediate
4406 operand immN()
4407 %{
4408   match(ConN);
4409 
4410   op_cost(0);
4411   format %{ %}
4412   interface(CONST_INTER);
4413 %}
4414 
4415 // Narrow NULL Pointer Immediate
4416 operand immN0()
4417 %{
4418   predicate(n->get_narrowcon() == 0);
4419   match(ConN);
4420 
4421   op_cost(0);
4422   format %{ %}
4423   interface(CONST_INTER);
4424 %}
4425 
4426 operand immNKlass()
4427 %{
4428   match(ConNKlass);
4429 
4430   op_cost(0);
4431   format %{ %}
4432   interface(CONST_INTER);
4433 %}
4434 
4435 // Integer 32 bit Register Operands
4436 // Integer 32 bitRegister (excludes SP)
4437 operand iRegI()
4438 %{
4439   constraint(ALLOC_IN_RC(any_reg32));
4440   match(RegI);
4441   match(iRegINoSp);
4442   op_cost(0);
4443   format %{ %}
4444   interface(REG_INTER);
4445 %}
4446 
4447 // Integer 32 bit Register not Special
4448 operand iRegINoSp()
4449 %{
4450   constraint(ALLOC_IN_RC(no_special_reg32));
4451   match(RegI);
4452   op_cost(0);
4453   format %{ %}
4454   interface(REG_INTER);
4455 %}
4456 
4457 // Integer 64 bit Register Operands
4458 // Integer 64 bit Register (includes SP)
4459 operand iRegL()
4460 %{
4461   constraint(ALLOC_IN_RC(any_reg));
4462   match(RegL);
4463   match(iRegLNoSp);
4464   op_cost(0);
4465   format %{ %}
4466   interface(REG_INTER);
4467 %}
4468 
4469 // Integer 64 bit Register not Special
4470 operand iRegLNoSp()
4471 %{
4472   constraint(ALLOC_IN_RC(no_special_reg));
4473   match(RegL);
4474   match(iRegL_R0);
4475   format %{ %}
4476   interface(REG_INTER);
4477 %}
4478 
4479 // Pointer Register Operands
4480 // Pointer Register
4481 operand iRegP()
4482 %{
4483   constraint(ALLOC_IN_RC(ptr_reg));
4484   match(RegP);
4485   match(iRegPNoSp);
4486   match(iRegP_R0);
4487   //match(iRegP_R2);
4488   //match(iRegP_R4);
4489   //match(iRegP_R5);
4490   match(thread_RegP);
4491   op_cost(0);
4492   format %{ %}
4493   interface(REG_INTER);
4494 %}
4495 
4496 // Pointer 64 bit Register not Special
4497 operand iRegPNoSp()
4498 %{
4499   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4500   match(RegP);
4501   // match(iRegP);
4502   // match(iRegP_R0);
4503   // match(iRegP_R2);
4504   // match(iRegP_R4);
4505   // match(iRegP_R5);
4506   // match(thread_RegP);
4507   op_cost(0);
4508   format %{ %}
4509   interface(REG_INTER);
4510 %}
4511 
4512 // Pointer 64 bit Register R0 only
4513 operand iRegP_R0()
4514 %{
4515   constraint(ALLOC_IN_RC(r0_reg));
4516   match(RegP);
4517   // match(iRegP);
4518   match(iRegPNoSp);
4519   op_cost(0);
4520   format %{ %}
4521   interface(REG_INTER);
4522 %}
4523 
4524 // Pointer 64 bit Register R1 only
4525 operand iRegP_R1()
4526 %{
4527   constraint(ALLOC_IN_RC(r1_reg));
4528   match(RegP);
4529   // match(iRegP);
4530   match(iRegPNoSp);
4531   op_cost(0);
4532   format %{ %}
4533   interface(REG_INTER);
4534 %}
4535 
4536 // Pointer 64 bit Register R2 only
4537 operand iRegP_R2()
4538 %{
4539   constraint(ALLOC_IN_RC(r2_reg));
4540   match(RegP);
4541   // match(iRegP);
4542   match(iRegPNoSp);
4543   op_cost(0);
4544   format %{ %}
4545   interface(REG_INTER);
4546 %}
4547 
4548 // Pointer 64 bit Register R3 only
4549 operand iRegP_R3()
4550 %{
4551   constraint(ALLOC_IN_RC(r3_reg));
4552   match(RegP);
4553   // match(iRegP);
4554   match(iRegPNoSp);
4555   op_cost(0);
4556   format %{ %}
4557   interface(REG_INTER);
4558 %}
4559 
4560 // Pointer 64 bit Register R4 only
4561 operand iRegP_R4()
4562 %{
4563   constraint(ALLOC_IN_RC(r4_reg));
4564   match(RegP);
4565   // match(iRegP);
4566   match(iRegPNoSp);
4567   op_cost(0);
4568   format %{ %}
4569   interface(REG_INTER);
4570 %}
4571 
4572 // Pointer 64 bit Register R5 only
4573 operand iRegP_R5()
4574 %{
4575   constraint(ALLOC_IN_RC(r5_reg));
4576   match(RegP);
4577   // match(iRegP);
4578   match(iRegPNoSp);
4579   op_cost(0);
4580   format %{ %}
4581   interface(REG_INTER);
4582 %}
4583 
4584 // Pointer 64 bit Register R10 only
4585 operand iRegP_R10()
4586 %{
4587   constraint(ALLOC_IN_RC(r10_reg));
4588   match(RegP);
4589   // match(iRegP);
4590   match(iRegPNoSp);
4591   op_cost(0);
4592   format %{ %}
4593   interface(REG_INTER);
4594 %}
4595 
4596 // Long 64 bit Register R0 only
4597 operand iRegL_R0()
4598 %{
4599   constraint(ALLOC_IN_RC(r0_reg));
4600   match(RegL);
4601   match(iRegLNoSp);
4602   op_cost(0);
4603   format %{ %}
4604   interface(REG_INTER);
4605 %}
4606 
4607 // Long 64 bit Register R2 only
4608 operand iRegL_R2()
4609 %{
4610   constraint(ALLOC_IN_RC(r2_reg));
4611   match(RegL);
4612   match(iRegLNoSp);
4613   op_cost(0);
4614   format %{ %}
4615   interface(REG_INTER);
4616 %}
4617 
4618 // Long 64 bit Register R3 only
4619 operand iRegL_R3()
4620 %{
4621   constraint(ALLOC_IN_RC(r3_reg));
4622   match(RegL);
4623   match(iRegLNoSp);
4624   op_cost(0);
4625   format %{ %}
4626   interface(REG_INTER);
4627 %}
4628 
4629 // Long 64 bit Register R11 only
4630 operand iRegL_R11()
4631 %{
4632   constraint(ALLOC_IN_RC(r11_reg));
4633   match(RegL);
4634   match(iRegLNoSp);
4635   op_cost(0);
4636   format %{ %}
4637   interface(REG_INTER);
4638 %}
4639 
4640 // Pointer 64 bit Register FP only
4641 operand iRegP_FP()
4642 %{
4643   constraint(ALLOC_IN_RC(fp_reg));
4644   match(RegP);
4645   // match(iRegP);
4646   op_cost(0);
4647   format %{ %}
4648   interface(REG_INTER);
4649 %}
4650 
4651 // Register R0 only
4652 operand iRegI_R0()
4653 %{
4654   constraint(ALLOC_IN_RC(int_r0_reg));
4655   match(RegI);
4656   match(iRegINoSp);
4657   op_cost(0);
4658   format %{ %}
4659   interface(REG_INTER);
4660 %}
4661 
4662 // Register R2 only
4663 operand iRegI_R2()
4664 %{
4665   constraint(ALLOC_IN_RC(int_r2_reg));
4666   match(RegI);
4667   match(iRegINoSp);
4668   op_cost(0);
4669   format %{ %}
4670   interface(REG_INTER);
4671 %}
4672 
4673 // Register R3 only
4674 operand iRegI_R3()
4675 %{
4676   constraint(ALLOC_IN_RC(int_r3_reg));
4677   match(RegI);
4678   match(iRegINoSp);
4679   op_cost(0);
4680   format %{ %}
4681   interface(REG_INTER);
4682 %}
4683 
4684 
4685 // Register R4 only
4686 operand iRegI_R4()
4687 %{
4688   constraint(ALLOC_IN_RC(int_r4_reg));
4689   match(RegI);
4690   match(iRegINoSp);
4691   op_cost(0);
4692   format %{ %}
4693   interface(REG_INTER);
4694 %}
4695 
4696 
4697 // Pointer Register Operands
4698 // Narrow Pointer Register
4699 operand iRegN()
4700 %{
4701   constraint(ALLOC_IN_RC(any_reg32));
4702   match(RegN);
4703   match(iRegNNoSp);
4704   op_cost(0);
4705   format %{ %}
4706   interface(REG_INTER);
4707 %}
4708 
4709 operand iRegN_R0()
4710 %{
4711   constraint(ALLOC_IN_RC(r0_reg));
4712   match(iRegN);
4713   op_cost(0);
4714   format %{ %}
4715   interface(REG_INTER);
4716 %}
4717 
4718 operand iRegN_R2()
4719 %{
4720   constraint(ALLOC_IN_RC(r2_reg));
4721   match(iRegN);
4722   op_cost(0);
4723   format %{ %}
4724   interface(REG_INTER);
4725 %}
4726 
4727 operand iRegN_R3()
4728 %{
4729   constraint(ALLOC_IN_RC(r3_reg));
4730   match(iRegN);
4731   op_cost(0);
4732   format %{ %}
4733   interface(REG_INTER);
4734 %}
4735 
4736 // Integer 64 bit Register not Special
4737 operand iRegNNoSp()
4738 %{
4739   constraint(ALLOC_IN_RC(no_special_reg32));
4740   match(RegN);
4741   op_cost(0);
4742   format %{ %}
4743   interface(REG_INTER);
4744 %}
4745 
4746 // heap base register -- used for encoding immN0
4747 
4748 operand iRegIHeapbase()
4749 %{
4750   constraint(ALLOC_IN_RC(heapbase_reg));
4751   match(RegI);
4752   op_cost(0);
4753   format %{ %}
4754   interface(REG_INTER);
4755 %}
4756 
4757 // Float Register
4758 // Float register operands
4759 operand vRegF()
4760 %{
4761   constraint(ALLOC_IN_RC(float_reg));
4762   match(RegF);
4763 
4764   op_cost(0);
4765   format %{ %}
4766   interface(REG_INTER);
4767 %}
4768 
4769 // Double Register
4770 // Double register operands
4771 operand vRegD()
4772 %{
4773   constraint(ALLOC_IN_RC(double_reg));
4774   match(RegD);
4775 
4776   op_cost(0);
4777   format %{ %}
4778   interface(REG_INTER);
4779 %}
4780 
4781 operand vecD()
4782 %{
4783   constraint(ALLOC_IN_RC(vectord_reg));
4784   match(VecD);
4785 
4786   op_cost(0);
4787   format %{ %}
4788   interface(REG_INTER);
4789 %}
4790 
4791 operand vecX()
4792 %{
4793   constraint(ALLOC_IN_RC(vectorx_reg));
4794   match(VecX);
4795 
4796   op_cost(0);
4797   format %{ %}
4798   interface(REG_INTER);
4799 %}
4800 
4801 operand vRegD_V0()
4802 %{
4803   constraint(ALLOC_IN_RC(v0_reg));
4804   match(RegD);
4805   op_cost(0);
4806   format %{ %}
4807   interface(REG_INTER);
4808 %}
4809 
4810 operand vRegD_V1()
4811 %{
4812   constraint(ALLOC_IN_RC(v1_reg));
4813   match(RegD);
4814   op_cost(0);
4815   format %{ %}
4816   interface(REG_INTER);
4817 %}
4818 
4819 operand vRegD_V2()
4820 %{
4821   constraint(ALLOC_IN_RC(v2_reg));
4822   match(RegD);
4823   op_cost(0);
4824   format %{ %}
4825   interface(REG_INTER);
4826 %}
4827 
4828 operand vRegD_V3()
4829 %{
4830   constraint(ALLOC_IN_RC(v3_reg));
4831   match(RegD);
4832   op_cost(0);
4833   format %{ %}
4834   interface(REG_INTER);
4835 %}
4836 
4837 // Flags register, used as output of signed compare instructions
4838 
4839 // note that on AArch64 we also use this register as the output for
4840 // for floating point compare instructions (CmpF CmpD). this ensures
4841 // that ordered inequality tests use GT, GE, LT or LE none of which
4842 // pass through cases where the result is unordered i.e. one or both
4843 // inputs to the compare is a NaN. this means that the ideal code can
4844 // replace e.g. a GT with an LE and not end up capturing the NaN case
4845 // (where the comparison should always fail). EQ and NE tests are
4846 // always generated in ideal code so that unordered folds into the NE
4847 // case, matching the behaviour of AArch64 NE.
4848 //
4849 // This differs from x86 where the outputs of FP compares use a
4850 // special FP flags registers and where compares based on this
4851 // register are distinguished into ordered inequalities (cmpOpUCF) and
4852 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4853 // to explicitly handle the unordered case in branches. x86 also has
4854 // to include extra CMoveX rules to accept a cmpOpUCF input.
4855 
4856 operand rFlagsReg()
4857 %{
4858   constraint(ALLOC_IN_RC(int_flags));
4859   match(RegFlags);
4860 
4861   op_cost(0);
4862   format %{ "RFLAGS" %}
4863   interface(REG_INTER);
4864 %}
4865 
4866 // Flags register, used as output of unsigned compare instructions
4867 operand rFlagsRegU()
4868 %{
4869   constraint(ALLOC_IN_RC(int_flags));
4870   match(RegFlags);
4871 
4872   op_cost(0);
4873   format %{ "RFLAGSU" %}
4874   interface(REG_INTER);
4875 %}
4876 
4877 // Special Registers
4878 
4879 // Method Register
4880 operand inline_cache_RegP(iRegP reg)
4881 %{
4882   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4883   match(reg);
4884   match(iRegPNoSp);
4885   op_cost(0);
4886   format %{ %}
4887   interface(REG_INTER);
4888 %}
4889 
4890 operand interpreter_method_oop_RegP(iRegP reg)
4891 %{
4892   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4893   match(reg);
4894   match(iRegPNoSp);
4895   op_cost(0);
4896   format %{ %}
4897   interface(REG_INTER);
4898 %}
4899 
4900 // Thread Register
4901 operand thread_RegP(iRegP reg)
4902 %{
4903   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
4904   match(reg);
4905   op_cost(0);
4906   format %{ %}
4907   interface(REG_INTER);
4908 %}
4909 
4910 operand lr_RegP(iRegP reg)
4911 %{
4912   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
4913   match(reg);
4914   op_cost(0);
4915   format %{ %}
4916   interface(REG_INTER);
4917 %}
4918 
4919 //----------Memory Operands----------------------------------------------------
4920 
4921 operand indirect(iRegP reg)
4922 %{
4923   constraint(ALLOC_IN_RC(ptr_reg));
4924   match(reg);
4925   op_cost(0);
4926   format %{ "[$reg]" %}
4927   interface(MEMORY_INTER) %{
4928     base($reg);
4929     index(0xffffffff);
4930     scale(0x0);
4931     disp(0x0);
4932   %}
4933 %}
4934 
4935 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
4936 %{
4937   constraint(ALLOC_IN_RC(ptr_reg));
4938   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4939   match(AddP reg (LShiftL (ConvI2L ireg) scale));
4940   op_cost(0);
4941   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
4942   interface(MEMORY_INTER) %{
4943     base($reg);
4944     index($ireg);
4945     scale($scale);
4946     disp(0x0);
4947   %}
4948 %}
4949 
4950 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
4951 %{
4952   constraint(ALLOC_IN_RC(ptr_reg));
4953   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4954   match(AddP reg (LShiftL lreg scale));
4955   op_cost(0);
4956   format %{ "$reg, $lreg lsl($scale)" %}
4957   interface(MEMORY_INTER) %{
4958     base($reg);
4959     index($lreg);
4960     scale($scale);
4961     disp(0x0);
4962   %}
4963 %}
4964 
4965 operand indIndexI2L(iRegP reg, iRegI ireg)
4966 %{
4967   constraint(ALLOC_IN_RC(ptr_reg));
4968   match(AddP reg (ConvI2L ireg));
4969   op_cost(0);
4970   format %{ "$reg, $ireg, 0, I2L" %}
4971   interface(MEMORY_INTER) %{
4972     base($reg);
4973     index($ireg);
4974     scale(0x0);
4975     disp(0x0);
4976   %}
4977 %}
4978 
4979 operand indIndex(iRegP reg, iRegL lreg)
4980 %{
4981   constraint(ALLOC_IN_RC(ptr_reg));
4982   match(AddP reg lreg);
4983   op_cost(0);
4984   format %{ "$reg, $lreg" %}
4985   interface(MEMORY_INTER) %{
4986     base($reg);
4987     index($lreg);
4988     scale(0x0);
4989     disp(0x0);
4990   %}
4991 %}
4992 
4993 operand indOffI(iRegP reg, immIOffset off)
4994 %{
4995   constraint(ALLOC_IN_RC(ptr_reg));
4996   match(AddP reg off);
4997   op_cost(0);
4998   format %{ "[$reg, $off]" %}
4999   interface(MEMORY_INTER) %{
5000     base($reg);
5001     index(0xffffffff);
5002     scale(0x0);
5003     disp($off);
5004   %}
5005 %}
5006 
5007 operand indOffI4(iRegP reg, immIOffset4 off)
5008 %{
5009   constraint(ALLOC_IN_RC(ptr_reg));
5010   match(AddP reg off);
5011   op_cost(0);
5012   format %{ "[$reg, $off]" %}
5013   interface(MEMORY_INTER) %{
5014     base($reg);
5015     index(0xffffffff);
5016     scale(0x0);
5017     disp($off);
5018   %}
5019 %}
5020 
5021 operand indOffI8(iRegP reg, immIOffset8 off)
5022 %{
5023   constraint(ALLOC_IN_RC(ptr_reg));
5024   match(AddP reg off);
5025   op_cost(0);
5026   format %{ "[$reg, $off]" %}
5027   interface(MEMORY_INTER) %{
5028     base($reg);
5029     index(0xffffffff);
5030     scale(0x0);
5031     disp($off);
5032   %}
5033 %}
5034 
5035 operand indOffI16(iRegP reg, immIOffset16 off)
5036 %{
5037   constraint(ALLOC_IN_RC(ptr_reg));
5038   match(AddP reg off);
5039   op_cost(0);
5040   format %{ "[$reg, $off]" %}
5041   interface(MEMORY_INTER) %{
5042     base($reg);
5043     index(0xffffffff);
5044     scale(0x0);
5045     disp($off);
5046   %}
5047 %}
5048 
5049 operand indOffL(iRegP reg, immLoffset off)
5050 %{
5051   constraint(ALLOC_IN_RC(ptr_reg));
5052   match(AddP reg off);
5053   op_cost(0);
5054   format %{ "[$reg, $off]" %}
5055   interface(MEMORY_INTER) %{
5056     base($reg);
5057     index(0xffffffff);
5058     scale(0x0);
5059     disp($off);
5060   %}
5061 %}
5062 
5063 operand indOffL4(iRegP reg, immLoffset4 off)
5064 %{
5065   constraint(ALLOC_IN_RC(ptr_reg));
5066   match(AddP reg off);
5067   op_cost(0);
5068   format %{ "[$reg, $off]" %}
5069   interface(MEMORY_INTER) %{
5070     base($reg);
5071     index(0xffffffff);
5072     scale(0x0);
5073     disp($off);
5074   %}
5075 %}
5076 
5077 operand indOffL8(iRegP reg, immLoffset8 off)
5078 %{
5079   constraint(ALLOC_IN_RC(ptr_reg));
5080   match(AddP reg off);
5081   op_cost(0);
5082   format %{ "[$reg, $off]" %}
5083   interface(MEMORY_INTER) %{
5084     base($reg);
5085     index(0xffffffff);
5086     scale(0x0);
5087     disp($off);
5088   %}
5089 %}
5090 
5091 operand indOffL16(iRegP reg, immLoffset16 off)
5092 %{
5093   constraint(ALLOC_IN_RC(ptr_reg));
5094   match(AddP reg off);
5095   op_cost(0);
5096   format %{ "[$reg, $off]" %}
5097   interface(MEMORY_INTER) %{
5098     base($reg);
5099     index(0xffffffff);
5100     scale(0x0);
5101     disp($off);
5102   %}
5103 %}
5104 
5105 operand indirectN(iRegN reg)
5106 %{
5107   predicate(Universe::narrow_oop_shift() == 0);
5108   constraint(ALLOC_IN_RC(ptr_reg));
5109   match(DecodeN reg);
5110   op_cost(0);
5111   format %{ "[$reg]\t# narrow" %}
5112   interface(MEMORY_INTER) %{
5113     base($reg);
5114     index(0xffffffff);
5115     scale(0x0);
5116     disp(0x0);
5117   %}
5118 %}
5119 
5120 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5121 %{
5122   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5123   constraint(ALLOC_IN_RC(ptr_reg));
5124   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5125   op_cost(0);
5126   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5127   interface(MEMORY_INTER) %{
5128     base($reg);
5129     index($ireg);
5130     scale($scale);
5131     disp(0x0);
5132   %}
5133 %}
5134 
5135 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5136 %{
5137   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5138   constraint(ALLOC_IN_RC(ptr_reg));
5139   match(AddP (DecodeN reg) (LShiftL lreg scale));
5140   op_cost(0);
5141   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5142   interface(MEMORY_INTER) %{
5143     base($reg);
5144     index($lreg);
5145     scale($scale);
5146     disp(0x0);
5147   %}
5148 %}
5149 
5150 operand indIndexI2LN(iRegN reg, iRegI ireg)
5151 %{
5152   predicate(Universe::narrow_oop_shift() == 0);
5153   constraint(ALLOC_IN_RC(ptr_reg));
5154   match(AddP (DecodeN reg) (ConvI2L ireg));
5155   op_cost(0);
5156   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5157   interface(MEMORY_INTER) %{
5158     base($reg);
5159     index($ireg);
5160     scale(0x0);
5161     disp(0x0);
5162   %}
5163 %}
5164 
5165 operand indIndexN(iRegN reg, iRegL lreg)
5166 %{
5167   predicate(Universe::narrow_oop_shift() == 0);
5168   constraint(ALLOC_IN_RC(ptr_reg));
5169   match(AddP (DecodeN reg) lreg);
5170   op_cost(0);
5171   format %{ "$reg, $lreg\t# narrow" %}
5172   interface(MEMORY_INTER) %{
5173     base($reg);
5174     index($lreg);
5175     scale(0x0);
5176     disp(0x0);
5177   %}
5178 %}
5179 
5180 operand indOffIN(iRegN reg, immIOffset off)
5181 %{
5182   predicate(Universe::narrow_oop_shift() == 0);
5183   constraint(ALLOC_IN_RC(ptr_reg));
5184   match(AddP (DecodeN reg) off);
5185   op_cost(0);
5186   format %{ "[$reg, $off]\t# narrow" %}
5187   interface(MEMORY_INTER) %{
5188     base($reg);
5189     index(0xffffffff);
5190     scale(0x0);
5191     disp($off);
5192   %}
5193 %}
5194 
5195 operand indOffLN(iRegN reg, immLoffset off)
5196 %{
5197   predicate(Universe::narrow_oop_shift() == 0);
5198   constraint(ALLOC_IN_RC(ptr_reg));
5199   match(AddP (DecodeN reg) off);
5200   op_cost(0);
5201   format %{ "[$reg, $off]\t# narrow" %}
5202   interface(MEMORY_INTER) %{
5203     base($reg);
5204     index(0xffffffff);
5205     scale(0x0);
5206     disp($off);
5207   %}
5208 %}
5209 
5210 
5211 
5212 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5213 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5214 %{
5215   constraint(ALLOC_IN_RC(ptr_reg));
5216   match(AddP reg off);
5217   op_cost(0);
5218   format %{ "[$reg, $off]" %}
5219   interface(MEMORY_INTER) %{
5220     base($reg);
5221     index(0xffffffff);
5222     scale(0x0);
5223     disp($off);
5224   %}
5225 %}
5226 
5227 //----------Special Memory Operands--------------------------------------------
5228 // Stack Slot Operand - This operand is used for loading and storing temporary
5229 //                      values on the stack where a match requires a value to
5230 //                      flow through memory.
5231 operand stackSlotP(sRegP reg)
5232 %{
5233   constraint(ALLOC_IN_RC(stack_slots));
5234   op_cost(100);
5235   // No match rule because this operand is only generated in matching
5236   // match(RegP);
5237   format %{ "[$reg]" %}
5238   interface(MEMORY_INTER) %{
5239     base(0x1e);  // RSP
5240     index(0x0);  // No Index
5241     scale(0x0);  // No Scale
5242     disp($reg);  // Stack Offset
5243   %}
5244 %}
5245 
5246 operand stackSlotI(sRegI reg)
5247 %{
5248   constraint(ALLOC_IN_RC(stack_slots));
5249   // No match rule because this operand is only generated in matching
5250   // match(RegI);
5251   format %{ "[$reg]" %}
5252   interface(MEMORY_INTER) %{
5253     base(0x1e);  // RSP
5254     index(0x0);  // No Index
5255     scale(0x0);  // No Scale
5256     disp($reg);  // Stack Offset
5257   %}
5258 %}
5259 
5260 operand stackSlotF(sRegF reg)
5261 %{
5262   constraint(ALLOC_IN_RC(stack_slots));
5263   // No match rule because this operand is only generated in matching
5264   // match(RegF);
5265   format %{ "[$reg]" %}
5266   interface(MEMORY_INTER) %{
5267     base(0x1e);  // RSP
5268     index(0x0);  // No Index
5269     scale(0x0);  // No Scale
5270     disp($reg);  // Stack Offset
5271   %}
5272 %}
5273 
5274 operand stackSlotD(sRegD reg)
5275 %{
5276   constraint(ALLOC_IN_RC(stack_slots));
5277   // No match rule because this operand is only generated in matching
5278   // match(RegD);
5279   format %{ "[$reg]" %}
5280   interface(MEMORY_INTER) %{
5281     base(0x1e);  // RSP
5282     index(0x0);  // No Index
5283     scale(0x0);  // No Scale
5284     disp($reg);  // Stack Offset
5285   %}
5286 %}
5287 
5288 operand stackSlotL(sRegL reg)
5289 %{
5290   constraint(ALLOC_IN_RC(stack_slots));
5291   // No match rule because this operand is only generated in matching
5292   // match(RegL);
5293   format %{ "[$reg]" %}
5294   interface(MEMORY_INTER) %{
5295     base(0x1e);  // RSP
5296     index(0x0);  // No Index
5297     scale(0x0);  // No Scale
5298     disp($reg);  // Stack Offset
5299   %}
5300 %}
5301 
5302 // Operands for expressing Control Flow
5303 // NOTE: Label is a predefined operand which should not be redefined in
5304 //       the AD file. It is generically handled within the ADLC.
5305 
5306 //----------Conditional Branch Operands----------------------------------------
5307 // Comparison Op  - This is the operation of the comparison, and is limited to
5308 //                  the following set of codes:
5309 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5310 //
5311 // Other attributes of the comparison, such as unsignedness, are specified
5312 // by the comparison instruction that sets a condition code flags register.
5313 // That result is represented by a flags operand whose subtype is appropriate
5314 // to the unsignedness (etc.) of the comparison.
5315 //
5316 // Later, the instruction which matches both the Comparison Op (a Bool) and
5317 // the flags (produced by the Cmp) specifies the coding of the comparison op
5318 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5319 
5320 // used for signed integral comparisons and fp comparisons
5321 
5322 operand cmpOp()
5323 %{
5324   match(Bool);
5325 
5326   format %{ "" %}
5327   interface(COND_INTER) %{
5328     equal(0x0, "eq");
5329     not_equal(0x1, "ne");
5330     less(0xb, "lt");
5331     greater_equal(0xa, "ge");
5332     less_equal(0xd, "le");
5333     greater(0xc, "gt");
5334     overflow(0x6, "vs");
5335     no_overflow(0x7, "vc");
5336   %}
5337 %}
5338 
5339 // used for unsigned integral comparisons
5340 
5341 operand cmpOpU()
5342 %{
5343   match(Bool);
5344 
5345   format %{ "" %}
5346   interface(COND_INTER) %{
5347     equal(0x0, "eq");
5348     not_equal(0x1, "ne");
5349     less(0x3, "lo");
5350     greater_equal(0x2, "hs");
5351     less_equal(0x9, "ls");
5352     greater(0x8, "hi");
5353     overflow(0x6, "vs");
5354     no_overflow(0x7, "vc");
5355   %}
5356 %}
5357 
5358 // used for certain integral comparisons which can be
5359 // converted to cbxx or tbxx instructions
5360 
5361 operand cmpOpEqNe()
5362 %{
5363   match(Bool);
5364   match(CmpOp);
5365   op_cost(0);
5366   predicate(n->as_Bool()->_test._test == BoolTest::ne
5367             || n->as_Bool()->_test._test == BoolTest::eq);
5368 
5369   format %{ "" %}
5370   interface(COND_INTER) %{
5371     equal(0x0, "eq");
5372     not_equal(0x1, "ne");
5373     less(0xb, "lt");
5374     greater_equal(0xa, "ge");
5375     less_equal(0xd, "le");
5376     greater(0xc, "gt");
5377     overflow(0x6, "vs");
5378     no_overflow(0x7, "vc");
5379   %}
5380 %}
5381 
5382 // used for certain integral comparisons which can be
5383 // converted to cbxx or tbxx instructions
5384 
5385 operand cmpOpLtGe()
5386 %{
5387   match(Bool);
5388   match(CmpOp);
5389   op_cost(0);
5390 
5391   predicate(n->as_Bool()->_test._test == BoolTest::lt
5392             || n->as_Bool()->_test._test == BoolTest::ge);
5393 
5394   format %{ "" %}
5395   interface(COND_INTER) %{
5396     equal(0x0, "eq");
5397     not_equal(0x1, "ne");
5398     less(0xb, "lt");
5399     greater_equal(0xa, "ge");
5400     less_equal(0xd, "le");
5401     greater(0xc, "gt");
5402     overflow(0x6, "vs");
5403     no_overflow(0x7, "vc");
5404   %}
5405 %}
5406 
5407 // used for certain unsigned integral comparisons which can be
5408 // converted to cbxx or tbxx instructions
5409 
5410 operand cmpOpUEqNeLtGe()
5411 %{
5412   match(Bool);
5413   match(CmpOp);
5414   op_cost(0);
5415 
5416   predicate(n->as_Bool()->_test._test == BoolTest::eq
5417             || n->as_Bool()->_test._test == BoolTest::ne
5418             || n->as_Bool()->_test._test == BoolTest::lt
5419             || n->as_Bool()->_test._test == BoolTest::ge);
5420 
5421   format %{ "" %}
5422   interface(COND_INTER) %{
5423     equal(0x0, "eq");
5424     not_equal(0x1, "ne");
5425     less(0xb, "lt");
5426     greater_equal(0xa, "ge");
5427     less_equal(0xd, "le");
5428     greater(0xc, "gt");
5429     overflow(0x6, "vs");
5430     no_overflow(0x7, "vc");
5431   %}
5432 %}
5433 
5434 // Special operand allowing long args to int ops to be truncated for free
5435 
5436 operand iRegL2I(iRegL reg) %{
5437 
5438   op_cost(0);
5439 
5440   match(ConvL2I reg);
5441 
5442   format %{ "l2i($reg)" %}
5443 
5444   interface(REG_INTER)
5445 %}
5446 
5447 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5448 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5449 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5450 
5451 //----------OPERAND CLASSES----------------------------------------------------
5452 // Operand Classes are groups of operands that are used as to simplify
5453 // instruction definitions by not requiring the AD writer to specify
5454 // separate instructions for every form of operand when the
5455 // instruction accepts multiple operand types with the same basic
5456 // encoding and format. The classic case of this is memory operands.
5457 
5458 // memory is used to define read/write location for load/store
5459 // instruction defs. we can turn a memory op into an Address
5460 
5461 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5462                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5463 
5464 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5465 // operations. it allows the src to be either an iRegI or a (ConvL2I
5466 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5467 // can be elided because the 32-bit instruction will just employ the
5468 // lower 32 bits anyway.
5469 //
5470 // n.b. this does not elide all L2I conversions. if the truncated
5471 // value is consumed by more than one operation then the ConvL2I
5472 // cannot be bundled into the consuming nodes so an l2i gets planted
5473 // (actually a movw $dst $src) and the downstream instructions consume
5474 // the result of the l2i as an iRegI input. That's a shame since the
5475 // movw is actually redundant but its not too costly.
5476 
5477 opclass iRegIorL2I(iRegI, iRegL2I);
5478 
5479 //----------PIPELINE-----------------------------------------------------------
5480 // Rules which define the behavior of the target architectures pipeline.
5481 
5482 // For specific pipelines, eg A53, define the stages of that pipeline
5483 //pipe_desc(ISS, EX1, EX2, WR);
5484 #define ISS S0
5485 #define EX1 S1
5486 #define EX2 S2
5487 #define WR  S3
5488 
5489 // Integer ALU reg operation
5490 pipeline %{
5491 
5492 attributes %{
5493   // ARM instructions are of fixed length
5494   fixed_size_instructions;        // Fixed size instructions TODO does
5495   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5496   // ARM instructions come in 32-bit word units
5497   instruction_unit_size = 4;         // An instruction is 4 bytes long
5498   instruction_fetch_unit_size = 64;  // The processor fetches one line
5499   instruction_fetch_units = 1;       // of 64 bytes
5500 
5501   // List of nop instructions
5502   nops( MachNop );
5503 %}
5504 
5505 // We don't use an actual pipeline model so don't care about resources
5506 // or description. we do use pipeline classes to introduce fixed
5507 // latencies
5508 
5509 //----------RESOURCES----------------------------------------------------------
5510 // Resources are the functional units available to the machine
5511 
5512 resources( INS0, INS1, INS01 = INS0 | INS1,
5513            ALU0, ALU1, ALU = ALU0 | ALU1,
5514            MAC,
5515            DIV,
5516            BRANCH,
5517            LDST,
5518            NEON_FP);
5519 
5520 //----------PIPELINE DESCRIPTION-----------------------------------------------
5521 // Pipeline Description specifies the stages in the machine's pipeline
5522 
5523 // Define the pipeline as a generic 6 stage pipeline
5524 pipe_desc(S0, S1, S2, S3, S4, S5);
5525 
5526 //----------PIPELINE CLASSES---------------------------------------------------
5527 // Pipeline Classes describe the stages in which input and output are
5528 // referenced by the hardware pipeline.
5529 
5530 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5531 %{
5532   single_instruction;
5533   src1   : S1(read);
5534   src2   : S2(read);
5535   dst    : S5(write);
5536   INS01  : ISS;
5537   NEON_FP : S5;
5538 %}
5539 
5540 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5541 %{
5542   single_instruction;
5543   src1   : S1(read);
5544   src2   : S2(read);
5545   dst    : S5(write);
5546   INS01  : ISS;
5547   NEON_FP : S5;
5548 %}
5549 
5550 pipe_class fp_uop_s(vRegF dst, vRegF src)
5551 %{
5552   single_instruction;
5553   src    : S1(read);
5554   dst    : S5(write);
5555   INS01  : ISS;
5556   NEON_FP : S5;
5557 %}
5558 
5559 pipe_class fp_uop_d(vRegD dst, vRegD src)
5560 %{
5561   single_instruction;
5562   src    : S1(read);
5563   dst    : S5(write);
5564   INS01  : ISS;
5565   NEON_FP : S5;
5566 %}
5567 
5568 pipe_class fp_d2f(vRegF dst, vRegD src)
5569 %{
5570   single_instruction;
5571   src    : S1(read);
5572   dst    : S5(write);
5573   INS01  : ISS;
5574   NEON_FP : S5;
5575 %}
5576 
5577 pipe_class fp_f2d(vRegD dst, vRegF src)
5578 %{
5579   single_instruction;
5580   src    : S1(read);
5581   dst    : S5(write);
5582   INS01  : ISS;
5583   NEON_FP : S5;
5584 %}
5585 
5586 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5587 %{
5588   single_instruction;
5589   src    : S1(read);
5590   dst    : S5(write);
5591   INS01  : ISS;
5592   NEON_FP : S5;
5593 %}
5594 
5595 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5596 %{
5597   single_instruction;
5598   src    : S1(read);
5599   dst    : S5(write);
5600   INS01  : ISS;
5601   NEON_FP : S5;
5602 %}
5603 
5604 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5605 %{
5606   single_instruction;
5607   src    : S1(read);
5608   dst    : S5(write);
5609   INS01  : ISS;
5610   NEON_FP : S5;
5611 %}
5612 
5613 pipe_class fp_l2f(vRegF dst, iRegL src)
5614 %{
5615   single_instruction;
5616   src    : S1(read);
5617   dst    : S5(write);
5618   INS01  : ISS;
5619   NEON_FP : S5;
5620 %}
5621 
5622 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5623 %{
5624   single_instruction;
5625   src    : S1(read);
5626   dst    : S5(write);
5627   INS01  : ISS;
5628   NEON_FP : S5;
5629 %}
5630 
5631 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5632 %{
5633   single_instruction;
5634   src    : S1(read);
5635   dst    : S5(write);
5636   INS01  : ISS;
5637   NEON_FP : S5;
5638 %}
5639 
5640 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5641 %{
5642   single_instruction;
5643   src    : S1(read);
5644   dst    : S5(write);
5645   INS01  : ISS;
5646   NEON_FP : S5;
5647 %}
5648 
5649 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5650 %{
5651   single_instruction;
5652   src    : S1(read);
5653   dst    : S5(write);
5654   INS01  : ISS;
5655   NEON_FP : S5;
5656 %}
5657 
5658 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5659 %{
5660   single_instruction;
5661   src1   : S1(read);
5662   src2   : S2(read);
5663   dst    : S5(write);
5664   INS0   : ISS;
5665   NEON_FP : S5;
5666 %}
5667 
5668 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5669 %{
5670   single_instruction;
5671   src1   : S1(read);
5672   src2   : S2(read);
5673   dst    : S5(write);
5674   INS0   : ISS;
5675   NEON_FP : S5;
5676 %}
5677 
5678 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5679 %{
5680   single_instruction;
5681   cr     : S1(read);
5682   src1   : S1(read);
5683   src2   : S1(read);
5684   dst    : S3(write);
5685   INS01  : ISS;
5686   NEON_FP : S3;
5687 %}
5688 
5689 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5690 %{
5691   single_instruction;
5692   cr     : S1(read);
5693   src1   : S1(read);
5694   src2   : S1(read);
5695   dst    : S3(write);
5696   INS01  : ISS;
5697   NEON_FP : S3;
5698 %}
5699 
5700 pipe_class fp_imm_s(vRegF dst)
5701 %{
5702   single_instruction;
5703   dst    : S3(write);
5704   INS01  : ISS;
5705   NEON_FP : S3;
5706 %}
5707 
5708 pipe_class fp_imm_d(vRegD dst)
5709 %{
5710   single_instruction;
5711   dst    : S3(write);
5712   INS01  : ISS;
5713   NEON_FP : S3;
5714 %}
5715 
5716 pipe_class fp_load_constant_s(vRegF dst)
5717 %{
5718   single_instruction;
5719   dst    : S4(write);
5720   INS01  : ISS;
5721   NEON_FP : S4;
5722 %}
5723 
5724 pipe_class fp_load_constant_d(vRegD dst)
5725 %{
5726   single_instruction;
5727   dst    : S4(write);
5728   INS01  : ISS;
5729   NEON_FP : S4;
5730 %}
5731 
5732 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
5733 %{
5734   single_instruction;
5735   dst    : S5(write);
5736   src1   : S1(read);
5737   src2   : S1(read);
5738   INS01  : ISS;
5739   NEON_FP : S5;
5740 %}
5741 
5742 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
5743 %{
5744   single_instruction;
5745   dst    : S5(write);
5746   src1   : S1(read);
5747   src2   : S1(read);
5748   INS0   : ISS;
5749   NEON_FP : S5;
5750 %}
5751 
5752 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
5753 %{
5754   single_instruction;
5755   dst    : S5(write);
5756   src1   : S1(read);
5757   src2   : S1(read);
5758   dst    : S1(read);
5759   INS01  : ISS;
5760   NEON_FP : S5;
5761 %}
5762 
5763 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
5764 %{
5765   single_instruction;
5766   dst    : S5(write);
5767   src1   : S1(read);
5768   src2   : S1(read);
5769   dst    : S1(read);
5770   INS0   : ISS;
5771   NEON_FP : S5;
5772 %}
5773 
5774 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
5775 %{
5776   single_instruction;
5777   dst    : S4(write);
5778   src1   : S2(read);
5779   src2   : S2(read);
5780   INS01  : ISS;
5781   NEON_FP : S4;
5782 %}
5783 
5784 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
5785 %{
5786   single_instruction;
5787   dst    : S4(write);
5788   src1   : S2(read);
5789   src2   : S2(read);
5790   INS0   : ISS;
5791   NEON_FP : S4;
5792 %}
5793 
5794 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
5795 %{
5796   single_instruction;
5797   dst    : S3(write);
5798   src1   : S2(read);
5799   src2   : S2(read);
5800   INS01  : ISS;
5801   NEON_FP : S3;
5802 %}
5803 
5804 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
5805 %{
5806   single_instruction;
5807   dst    : S3(write);
5808   src1   : S2(read);
5809   src2   : S2(read);
5810   INS0   : ISS;
5811   NEON_FP : S3;
5812 %}
5813 
5814 pipe_class vshift64(vecD dst, vecD src, vecX shift)
5815 %{
5816   single_instruction;
5817   dst    : S3(write);
5818   src    : S1(read);
5819   shift  : S1(read);
5820   INS01  : ISS;
5821   NEON_FP : S3;
5822 %}
5823 
5824 pipe_class vshift128(vecX dst, vecX src, vecX shift)
5825 %{
5826   single_instruction;
5827   dst    : S3(write);
5828   src    : S1(read);
5829   shift  : S1(read);
5830   INS0   : ISS;
5831   NEON_FP : S3;
5832 %}
5833 
5834 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
5835 %{
5836   single_instruction;
5837   dst    : S3(write);
5838   src    : S1(read);
5839   INS01  : ISS;
5840   NEON_FP : S3;
5841 %}
5842 
5843 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
5844 %{
5845   single_instruction;
5846   dst    : S3(write);
5847   src    : S1(read);
5848   INS0   : ISS;
5849   NEON_FP : S3;
5850 %}
5851 
5852 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
5853 %{
5854   single_instruction;
5855   dst    : S5(write);
5856   src1   : S1(read);
5857   src2   : S1(read);
5858   INS01  : ISS;
5859   NEON_FP : S5;
5860 %}
5861 
5862 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
5863 %{
5864   single_instruction;
5865   dst    : S5(write);
5866   src1   : S1(read);
5867   src2   : S1(read);
5868   INS0   : ISS;
5869   NEON_FP : S5;
5870 %}
5871 
5872 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
5873 %{
5874   single_instruction;
5875   dst    : S5(write);
5876   src1   : S1(read);
5877   src2   : S1(read);
5878   INS0   : ISS;
5879   NEON_FP : S5;
5880 %}
5881 
5882 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
5883 %{
5884   single_instruction;
5885   dst    : S5(write);
5886   src1   : S1(read);
5887   src2   : S1(read);
5888   INS0   : ISS;
5889   NEON_FP : S5;
5890 %}
5891 
5892 pipe_class vsqrt_fp128(vecX dst, vecX src)
5893 %{
5894   single_instruction;
5895   dst    : S5(write);
5896   src    : S1(read);
5897   INS0   : ISS;
5898   NEON_FP : S5;
5899 %}
5900 
5901 pipe_class vunop_fp64(vecD dst, vecD src)
5902 %{
5903   single_instruction;
5904   dst    : S5(write);
5905   src    : S1(read);
5906   INS01  : ISS;
5907   NEON_FP : S5;
5908 %}
5909 
5910 pipe_class vunop_fp128(vecX dst, vecX src)
5911 %{
5912   single_instruction;
5913   dst    : S5(write);
5914   src    : S1(read);
5915   INS0   : ISS;
5916   NEON_FP : S5;
5917 %}
5918 
5919 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
5920 %{
5921   single_instruction;
5922   dst    : S3(write);
5923   src    : S1(read);
5924   INS01  : ISS;
5925   NEON_FP : S3;
5926 %}
5927 
5928 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
5929 %{
5930   single_instruction;
5931   dst    : S3(write);
5932   src    : S1(read);
5933   INS01  : ISS;
5934   NEON_FP : S3;
5935 %}
5936 
5937 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
5938 %{
5939   single_instruction;
5940   dst    : S3(write);
5941   src    : S1(read);
5942   INS01  : ISS;
5943   NEON_FP : S3;
5944 %}
5945 
5946 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
5947 %{
5948   single_instruction;
5949   dst    : S3(write);
5950   src    : S1(read);
5951   INS01  : ISS;
5952   NEON_FP : S3;
5953 %}
5954 
5955 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
5956 %{
5957   single_instruction;
5958   dst    : S3(write);
5959   src    : S1(read);
5960   INS01  : ISS;
5961   NEON_FP : S3;
5962 %}
5963 
5964 pipe_class vmovi_reg_imm64(vecD dst)
5965 %{
5966   single_instruction;
5967   dst    : S3(write);
5968   INS01  : ISS;
5969   NEON_FP : S3;
5970 %}
5971 
5972 pipe_class vmovi_reg_imm128(vecX dst)
5973 %{
5974   single_instruction;
5975   dst    : S3(write);
5976   INS0   : ISS;
5977   NEON_FP : S3;
5978 %}
5979 
5980 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
5981 %{
5982   single_instruction;
5983   dst    : S5(write);
5984   mem    : ISS(read);
5985   INS01  : ISS;
5986   NEON_FP : S3;
5987 %}
5988 
5989 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
5990 %{
5991   single_instruction;
5992   dst    : S5(write);
5993   mem    : ISS(read);
5994   INS01  : ISS;
5995   NEON_FP : S3;
5996 %}
5997 
5998 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
5999 %{
6000   single_instruction;
6001   mem    : ISS(read);
6002   src    : S2(read);
6003   INS01  : ISS;
6004   NEON_FP : S3;
6005 %}
6006 
6007 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6008 %{
6009   single_instruction;
6010   mem    : ISS(read);
6011   src    : S2(read);
6012   INS01  : ISS;
6013   NEON_FP : S3;
6014 %}
6015 
6016 //------- Integer ALU operations --------------------------
6017 
6018 // Integer ALU reg-reg operation
6019 // Operands needed in EX1, result generated in EX2
6020 // Eg.  ADD     x0, x1, x2
6021 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6022 %{
6023   single_instruction;
6024   dst    : EX2(write);
6025   src1   : EX1(read);
6026   src2   : EX1(read);
6027   INS01  : ISS; // Dual issue as instruction 0 or 1
6028   ALU    : EX2;
6029 %}
6030 
6031 // Integer ALU reg-reg operation with constant shift
6032 // Shifted register must be available in LATE_ISS instead of EX1
6033 // Eg.  ADD     x0, x1, x2, LSL #2
6034 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6035 %{
6036   single_instruction;
6037   dst    : EX2(write);
6038   src1   : EX1(read);
6039   src2   : ISS(read);
6040   INS01  : ISS;
6041   ALU    : EX2;
6042 %}
6043 
6044 // Integer ALU reg operation with constant shift
6045 // Eg.  LSL     x0, x1, #shift
6046 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6047 %{
6048   single_instruction;
6049   dst    : EX2(write);
6050   src1   : ISS(read);
6051   INS01  : ISS;
6052   ALU    : EX2;
6053 %}
6054 
6055 // Integer ALU reg-reg operation with variable shift
6056 // Both operands must be available in LATE_ISS instead of EX1
6057 // Result is available in EX1 instead of EX2
6058 // Eg.  LSLV    x0, x1, x2
6059 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6060 %{
6061   single_instruction;
6062   dst    : EX1(write);
6063   src1   : ISS(read);
6064   src2   : ISS(read);
6065   INS01  : ISS;
6066   ALU    : EX1;
6067 %}
6068 
6069 // Integer ALU reg-reg operation with extract
6070 // As for _vshift above, but result generated in EX2
6071 // Eg.  EXTR    x0, x1, x2, #N
6072 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6073 %{
6074   single_instruction;
6075   dst    : EX2(write);
6076   src1   : ISS(read);
6077   src2   : ISS(read);
6078   INS1   : ISS; // Can only dual issue as Instruction 1
6079   ALU    : EX1;
6080 %}
6081 
6082 // Integer ALU reg operation
6083 // Eg.  NEG     x0, x1
6084 pipe_class ialu_reg(iRegI dst, iRegI src)
6085 %{
6086   single_instruction;
6087   dst    : EX2(write);
6088   src    : EX1(read);
6089   INS01  : ISS;
6090   ALU    : EX2;
6091 %}
6092 
6093 // Integer ALU reg mmediate operation
6094 // Eg.  ADD     x0, x1, #N
6095 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6096 %{
6097   single_instruction;
6098   dst    : EX2(write);
6099   src1   : EX1(read);
6100   INS01  : ISS;
6101   ALU    : EX2;
6102 %}
6103 
6104 // Integer ALU immediate operation (no source operands)
6105 // Eg.  MOV     x0, #N
6106 pipe_class ialu_imm(iRegI dst)
6107 %{
6108   single_instruction;
6109   dst    : EX1(write);
6110   INS01  : ISS;
6111   ALU    : EX1;
6112 %}
6113 
6114 //------- Compare operation -------------------------------
6115 
6116 // Compare reg-reg
6117 // Eg.  CMP     x0, x1
6118 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6119 %{
6120   single_instruction;
6121 //  fixed_latency(16);
6122   cr     : EX2(write);
6123   op1    : EX1(read);
6124   op2    : EX1(read);
6125   INS01  : ISS;
6126   ALU    : EX2;
6127 %}
6128 
6129 // Compare reg-reg
6130 // Eg.  CMP     x0, #N
6131 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6132 %{
6133   single_instruction;
6134 //  fixed_latency(16);
6135   cr     : EX2(write);
6136   op1    : EX1(read);
6137   INS01  : ISS;
6138   ALU    : EX2;
6139 %}
6140 
6141 //------- Conditional instructions ------------------------
6142 
6143 // Conditional no operands
6144 // Eg.  CSINC   x0, zr, zr, <cond>
6145 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6146 %{
6147   single_instruction;
6148   cr     : EX1(read);
6149   dst    : EX2(write);
6150   INS01  : ISS;
6151   ALU    : EX2;
6152 %}
6153 
6154 // Conditional 2 operand
6155 // EG.  CSEL    X0, X1, X2, <cond>
6156 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6157 %{
6158   single_instruction;
6159   cr     : EX1(read);
6160   src1   : EX1(read);
6161   src2   : EX1(read);
6162   dst    : EX2(write);
6163   INS01  : ISS;
6164   ALU    : EX2;
6165 %}
6166 
6167 // Conditional 2 operand
6168 // EG.  CSEL    X0, X1, X2, <cond>
6169 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6170 %{
6171   single_instruction;
6172   cr     : EX1(read);
6173   src    : EX1(read);
6174   dst    : EX2(write);
6175   INS01  : ISS;
6176   ALU    : EX2;
6177 %}
6178 
6179 //------- Multiply pipeline operations --------------------
6180 
6181 // Multiply reg-reg
6182 // Eg.  MUL     w0, w1, w2
6183 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6184 %{
6185   single_instruction;
6186   dst    : WR(write);
6187   src1   : ISS(read);
6188   src2   : ISS(read);
6189   INS01  : ISS;
6190   MAC    : WR;
6191 %}
6192 
6193 // Multiply accumulate
6194 // Eg.  MADD    w0, w1, w2, w3
6195 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6196 %{
6197   single_instruction;
6198   dst    : WR(write);
6199   src1   : ISS(read);
6200   src2   : ISS(read);
6201   src3   : ISS(read);
6202   INS01  : ISS;
6203   MAC    : WR;
6204 %}
6205 
6206 // Eg.  MUL     w0, w1, w2
6207 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6208 %{
6209   single_instruction;
6210   fixed_latency(3); // Maximum latency for 64 bit mul
6211   dst    : WR(write);
6212   src1   : ISS(read);
6213   src2   : ISS(read);
6214   INS01  : ISS;
6215   MAC    : WR;
6216 %}
6217 
6218 // Multiply accumulate
6219 // Eg.  MADD    w0, w1, w2, w3
6220 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6221 %{
6222   single_instruction;
6223   fixed_latency(3); // Maximum latency for 64 bit mul
6224   dst    : WR(write);
6225   src1   : ISS(read);
6226   src2   : ISS(read);
6227   src3   : ISS(read);
6228   INS01  : ISS;
6229   MAC    : WR;
6230 %}
6231 
6232 //------- Divide pipeline operations --------------------
6233 
6234 // Eg.  SDIV    w0, w1, w2
6235 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6236 %{
6237   single_instruction;
6238   fixed_latency(8); // Maximum latency for 32 bit divide
6239   dst    : WR(write);
6240   src1   : ISS(read);
6241   src2   : ISS(read);
6242   INS0   : ISS; // Can only dual issue as instruction 0
6243   DIV    : WR;
6244 %}
6245 
6246 // Eg.  SDIV    x0, x1, x2
6247 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6248 %{
6249   single_instruction;
6250   fixed_latency(16); // Maximum latency for 64 bit divide
6251   dst    : WR(write);
6252   src1   : ISS(read);
6253   src2   : ISS(read);
6254   INS0   : ISS; // Can only dual issue as instruction 0
6255   DIV    : WR;
6256 %}
6257 
6258 //------- Load pipeline operations ------------------------
6259 
6260 // Load - prefetch
6261 // Eg.  PFRM    <mem>
6262 pipe_class iload_prefetch(memory mem)
6263 %{
6264   single_instruction;
6265   mem    : ISS(read);
6266   INS01  : ISS;
6267   LDST   : WR;
6268 %}
6269 
6270 // Load - reg, mem
6271 // Eg.  LDR     x0, <mem>
6272 pipe_class iload_reg_mem(iRegI dst, memory mem)
6273 %{
6274   single_instruction;
6275   dst    : WR(write);
6276   mem    : ISS(read);
6277   INS01  : ISS;
6278   LDST   : WR;
6279 %}
6280 
6281 // Load - reg, reg
6282 // Eg.  LDR     x0, [sp, x1]
6283 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6284 %{
6285   single_instruction;
6286   dst    : WR(write);
6287   src    : ISS(read);
6288   INS01  : ISS;
6289   LDST   : WR;
6290 %}
6291 
6292 //------- Store pipeline operations -----------------------
6293 
6294 // Store - zr, mem
6295 // Eg.  STR     zr, <mem>
6296 pipe_class istore_mem(memory mem)
6297 %{
6298   single_instruction;
6299   mem    : ISS(read);
6300   INS01  : ISS;
6301   LDST   : WR;
6302 %}
6303 
6304 // Store - reg, mem
6305 // Eg.  STR     x0, <mem>
6306 pipe_class istore_reg_mem(iRegI src, memory mem)
6307 %{
6308   single_instruction;
6309   mem    : ISS(read);
6310   src    : EX2(read);
6311   INS01  : ISS;
6312   LDST   : WR;
6313 %}
6314 
6315 // Store - reg, reg
6316 // Eg. STR      x0, [sp, x1]
6317 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6318 %{
6319   single_instruction;
6320   dst    : ISS(read);
6321   src    : EX2(read);
6322   INS01  : ISS;
6323   LDST   : WR;
6324 %}
6325 
6326 //------- Store pipeline operations -----------------------
6327 
6328 // Branch
6329 pipe_class pipe_branch()
6330 %{
6331   single_instruction;
6332   INS01  : ISS;
6333   BRANCH : EX1;
6334 %}
6335 
6336 // Conditional branch
6337 pipe_class pipe_branch_cond(rFlagsReg cr)
6338 %{
6339   single_instruction;
6340   cr     : EX1(read);
6341   INS01  : ISS;
6342   BRANCH : EX1;
6343 %}
6344 
6345 // Compare & Branch
6346 // EG.  CBZ/CBNZ
6347 pipe_class pipe_cmp_branch(iRegI op1)
6348 %{
6349   single_instruction;
6350   op1    : EX1(read);
6351   INS01  : ISS;
6352   BRANCH : EX1;
6353 %}
6354 
6355 //------- Synchronisation operations ----------------------
6356 
6357 // Any operation requiring serialization.
6358 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6359 pipe_class pipe_serial()
6360 %{
6361   single_instruction;
6362   force_serialization;
6363   fixed_latency(16);
6364   INS01  : ISS(2); // Cannot dual issue with any other instruction
6365   LDST   : WR;
6366 %}
6367 
6368 // Generic big/slow expanded idiom - also serialized
6369 pipe_class pipe_slow()
6370 %{
6371   instruction_count(10);
6372   multiple_bundles;
6373   force_serialization;
6374   fixed_latency(16);
6375   INS01  : ISS(2); // Cannot dual issue with any other instruction
6376   LDST   : WR;
6377 %}
6378 
6379 // Empty pipeline class
6380 pipe_class pipe_class_empty()
6381 %{
6382   single_instruction;
6383   fixed_latency(0);
6384 %}
6385 
6386 // Default pipeline class.
6387 pipe_class pipe_class_default()
6388 %{
6389   single_instruction;
6390   fixed_latency(2);
6391 %}
6392 
6393 // Pipeline class for compares.
6394 pipe_class pipe_class_compare()
6395 %{
6396   single_instruction;
6397   fixed_latency(16);
6398 %}
6399 
6400 // Pipeline class for memory operations.
6401 pipe_class pipe_class_memory()
6402 %{
6403   single_instruction;
6404   fixed_latency(16);
6405 %}
6406 
6407 // Pipeline class for call.
6408 pipe_class pipe_class_call()
6409 %{
6410   single_instruction;
6411   fixed_latency(100);
6412 %}
6413 
6414 // Define the class for the Nop node.
6415 define %{
6416    MachNop = pipe_class_empty;
6417 %}
6418 
6419 %}
6420 //----------INSTRUCTIONS-------------------------------------------------------
6421 //
6422 // match      -- States which machine-independent subtree may be replaced
6423 //               by this instruction.
6424 // ins_cost   -- The estimated cost of this instruction is used by instruction
6425 //               selection to identify a minimum cost tree of machine
6426 //               instructions that matches a tree of machine-independent
6427 //               instructions.
6428 // format     -- A string providing the disassembly for this instruction.
6429 //               The value of an instruction's operand may be inserted
6430 //               by referring to it with a '$' prefix.
6431 // opcode     -- Three instruction opcodes may be provided.  These are referred
6432 //               to within an encode class as $primary, $secondary, and $tertiary
6433 //               rrspectively.  The primary opcode is commonly used to
6434 //               indicate the type of machine instruction, while secondary
6435 //               and tertiary are often used for prefix options or addressing
6436 //               modes.
6437 // ins_encode -- A list of encode classes with parameters. The encode class
6438 //               name must have been defined in an 'enc_class' specification
6439 //               in the encode section of the architecture description.
6440 
6441 // ============================================================================
6442 // Memory (Load/Store) Instructions
6443 
6444 // Load Instructions
6445 
6446 // Load Byte (8 bit signed)
6447 instruct loadB(iRegINoSp dst, memory mem)
6448 %{
6449   match(Set dst (LoadB mem));
6450   predicate(!needs_acquiring_load(n));
6451 
6452   ins_cost(4 * INSN_COST);
6453   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6454 
6455   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6456 
6457   ins_pipe(iload_reg_mem);
6458 %}
6459 
6460 // Load Byte (8 bit signed) into long
6461 instruct loadB2L(iRegLNoSp dst, memory mem)
6462 %{
6463   match(Set dst (ConvI2L (LoadB mem)));
6464   predicate(!needs_acquiring_load(n->in(1)));
6465 
6466   ins_cost(4 * INSN_COST);
6467   format %{ "ldrsb  $dst, $mem\t# byte" %}
6468 
6469   ins_encode(aarch64_enc_ldrsb(dst, mem));
6470 
6471   ins_pipe(iload_reg_mem);
6472 %}
6473 
6474 // Load Byte (8 bit unsigned)
6475 instruct loadUB(iRegINoSp dst, memory mem)
6476 %{
6477   match(Set dst (LoadUB mem));
6478   predicate(!needs_acquiring_load(n));
6479 
6480   ins_cost(4 * INSN_COST);
6481   format %{ "ldrbw  $dst, $mem\t# byte" %}
6482 
6483   ins_encode(aarch64_enc_ldrb(dst, mem));
6484 
6485   ins_pipe(iload_reg_mem);
6486 %}
6487 
6488 // Load Byte (8 bit unsigned) into long
6489 instruct loadUB2L(iRegLNoSp dst, memory mem)
6490 %{
6491   match(Set dst (ConvI2L (LoadUB mem)));
6492   predicate(!needs_acquiring_load(n->in(1)));
6493 
6494   ins_cost(4 * INSN_COST);
6495   format %{ "ldrb  $dst, $mem\t# byte" %}
6496 
6497   ins_encode(aarch64_enc_ldrb(dst, mem));
6498 
6499   ins_pipe(iload_reg_mem);
6500 %}
6501 
6502 // Load Short (16 bit signed)
6503 instruct loadS(iRegINoSp dst, memory mem)
6504 %{
6505   match(Set dst (LoadS mem));
6506   predicate(!needs_acquiring_load(n));
6507 
6508   ins_cost(4 * INSN_COST);
6509   format %{ "ldrshw  $dst, $mem\t# short" %}
6510 
6511   ins_encode(aarch64_enc_ldrshw(dst, mem));
6512 
6513   ins_pipe(iload_reg_mem);
6514 %}
6515 
6516 // Load Short (16 bit signed) into long
6517 instruct loadS2L(iRegLNoSp dst, memory mem)
6518 %{
6519   match(Set dst (ConvI2L (LoadS mem)));
6520   predicate(!needs_acquiring_load(n->in(1)));
6521 
6522   ins_cost(4 * INSN_COST);
6523   format %{ "ldrsh  $dst, $mem\t# short" %}
6524 
6525   ins_encode(aarch64_enc_ldrsh(dst, mem));
6526 
6527   ins_pipe(iload_reg_mem);
6528 %}
6529 
6530 // Load Char (16 bit unsigned)
6531 instruct loadUS(iRegINoSp dst, memory mem)
6532 %{
6533   match(Set dst (LoadUS mem));
6534   predicate(!needs_acquiring_load(n));
6535 
6536   ins_cost(4 * INSN_COST);
6537   format %{ "ldrh  $dst, $mem\t# short" %}
6538 
6539   ins_encode(aarch64_enc_ldrh(dst, mem));
6540 
6541   ins_pipe(iload_reg_mem);
6542 %}
6543 
6544 // Load Short/Char (16 bit unsigned) into long
6545 instruct loadUS2L(iRegLNoSp dst, memory mem)
6546 %{
6547   match(Set dst (ConvI2L (LoadUS mem)));
6548   predicate(!needs_acquiring_load(n->in(1)));
6549 
6550   ins_cost(4 * INSN_COST);
6551   format %{ "ldrh  $dst, $mem\t# short" %}
6552 
6553   ins_encode(aarch64_enc_ldrh(dst, mem));
6554 
6555   ins_pipe(iload_reg_mem);
6556 %}
6557 
6558 // Load Integer (32 bit signed)
6559 instruct loadI(iRegINoSp dst, memory mem)
6560 %{
6561   match(Set dst (LoadI mem));
6562   predicate(!needs_acquiring_load(n));
6563 
6564   ins_cost(4 * INSN_COST);
6565   format %{ "ldrw  $dst, $mem\t# int" %}
6566 
6567   ins_encode(aarch64_enc_ldrw(dst, mem));
6568 
6569   ins_pipe(iload_reg_mem);
6570 %}
6571 
6572 // Load Integer (32 bit signed) into long
6573 instruct loadI2L(iRegLNoSp dst, memory mem)
6574 %{
6575   match(Set dst (ConvI2L (LoadI mem)));
6576   predicate(!needs_acquiring_load(n->in(1)));
6577 
6578   ins_cost(4 * INSN_COST);
6579   format %{ "ldrsw  $dst, $mem\t# int" %}
6580 
6581   ins_encode(aarch64_enc_ldrsw(dst, mem));
6582 
6583   ins_pipe(iload_reg_mem);
6584 %}
6585 
6586 // Load Integer (32 bit unsigned) into long
6587 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6588 %{
6589   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6590   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6591 
6592   ins_cost(4 * INSN_COST);
6593   format %{ "ldrw  $dst, $mem\t# int" %}
6594 
6595   ins_encode(aarch64_enc_ldrw(dst, mem));
6596 
6597   ins_pipe(iload_reg_mem);
6598 %}
6599 
6600 // Load Long (64 bit signed)
6601 instruct loadL(iRegLNoSp dst, memory mem)
6602 %{
6603   match(Set dst (LoadL mem));
6604   predicate(!needs_acquiring_load(n));
6605 
6606   ins_cost(4 * INSN_COST);
6607   format %{ "ldr  $dst, $mem\t# int" %}
6608 
6609   ins_encode(aarch64_enc_ldr(dst, mem));
6610 
6611   ins_pipe(iload_reg_mem);
6612 %}
6613 
6614 // Load Range
6615 instruct loadRange(iRegINoSp dst, memory mem)
6616 %{
6617   match(Set dst (LoadRange mem));
6618 
6619   ins_cost(4 * INSN_COST);
6620   format %{ "ldrw  $dst, $mem\t# range" %}
6621 
6622   ins_encode(aarch64_enc_ldrw(dst, mem));
6623 
6624   ins_pipe(iload_reg_mem);
6625 %}
6626 
6627 // Load Pointer
6628 instruct loadP(iRegPNoSp dst, memory mem)
6629 %{
6630   match(Set dst (LoadP mem));
6631   predicate(!needs_acquiring_load(n));
6632 
6633   ins_cost(4 * INSN_COST);
6634   format %{ "ldr  $dst, $mem\t# ptr" %}
6635 
6636   ins_encode(aarch64_enc_ldr(dst, mem));
6637 
6638   ins_pipe(iload_reg_mem);
6639 %}
6640 
6641 // Load Compressed Pointer
6642 instruct loadN(iRegNNoSp dst, memory mem)
6643 %{
6644   match(Set dst (LoadN mem));
6645   predicate(!needs_acquiring_load(n));
6646 
6647   ins_cost(4 * INSN_COST);
6648   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6649 
6650   ins_encode(aarch64_enc_ldrw(dst, mem));
6651 
6652   ins_pipe(iload_reg_mem);
6653 %}
6654 
6655 // Load Klass Pointer
6656 instruct loadKlass(iRegPNoSp dst, memory mem)
6657 %{
6658   match(Set dst (LoadKlass mem));
6659   predicate(!needs_acquiring_load(n));
6660 
6661   ins_cost(4 * INSN_COST);
6662   format %{ "ldr  $dst, $mem\t# class" %}
6663 
6664   ins_encode(aarch64_enc_ldr(dst, mem));
6665 
6666   ins_pipe(iload_reg_mem);
6667 %}
6668 
6669 // Load Narrow Klass Pointer
6670 instruct loadNKlass(iRegNNoSp dst, memory mem)
6671 %{
6672   match(Set dst (LoadNKlass mem));
6673   predicate(!needs_acquiring_load(n));
6674 
6675   ins_cost(4 * INSN_COST);
6676   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6677 
6678   ins_encode(aarch64_enc_ldrw(dst, mem));
6679 
6680   ins_pipe(iload_reg_mem);
6681 %}
6682 
6683 // Load Float
6684 instruct loadF(vRegF dst, memory mem)
6685 %{
6686   match(Set dst (LoadF mem));
6687   predicate(!needs_acquiring_load(n));
6688 
6689   ins_cost(4 * INSN_COST);
6690   format %{ "ldrs  $dst, $mem\t# float" %}
6691 
6692   ins_encode( aarch64_enc_ldrs(dst, mem) );
6693 
6694   ins_pipe(pipe_class_memory);
6695 %}
6696 
6697 // Load Double
6698 instruct loadD(vRegD dst, memory mem)
6699 %{
6700   match(Set dst (LoadD mem));
6701   predicate(!needs_acquiring_load(n));
6702 
6703   ins_cost(4 * INSN_COST);
6704   format %{ "ldrd  $dst, $mem\t# double" %}
6705 
6706   ins_encode( aarch64_enc_ldrd(dst, mem) );
6707 
6708   ins_pipe(pipe_class_memory);
6709 %}
6710 
6711 
6712 // Load Int Constant
6713 instruct loadConI(iRegINoSp dst, immI src)
6714 %{
6715   match(Set dst src);
6716 
6717   ins_cost(INSN_COST);
6718   format %{ "mov $dst, $src\t# int" %}
6719 
6720   ins_encode( aarch64_enc_movw_imm(dst, src) );
6721 
6722   ins_pipe(ialu_imm);
6723 %}
6724 
6725 // Load Long Constant
6726 instruct loadConL(iRegLNoSp dst, immL src)
6727 %{
6728   match(Set dst src);
6729 
6730   ins_cost(INSN_COST);
6731   format %{ "mov $dst, $src\t# long" %}
6732 
6733   ins_encode( aarch64_enc_mov_imm(dst, src) );
6734 
6735   ins_pipe(ialu_imm);
6736 %}
6737 
6738 // Load Pointer Constant
6739 
6740 instruct loadConP(iRegPNoSp dst, immP con)
6741 %{
6742   match(Set dst con);
6743 
6744   ins_cost(INSN_COST * 4);
6745   format %{
6746     "mov  $dst, $con\t# ptr\n\t"
6747   %}
6748 
6749   ins_encode(aarch64_enc_mov_p(dst, con));
6750 
6751   ins_pipe(ialu_imm);
6752 %}
6753 
6754 // Load Null Pointer Constant
6755 
6756 instruct loadConP0(iRegPNoSp dst, immP0 con)
6757 %{
6758   match(Set dst con);
6759 
6760   ins_cost(INSN_COST);
6761   format %{ "mov  $dst, $con\t# NULL ptr" %}
6762 
6763   ins_encode(aarch64_enc_mov_p0(dst, con));
6764 
6765   ins_pipe(ialu_imm);
6766 %}
6767 
6768 // Load Pointer Constant One
6769 
6770 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6771 %{
6772   match(Set dst con);
6773 
6774   ins_cost(INSN_COST);
6775   format %{ "mov  $dst, $con\t# NULL ptr" %}
6776 
6777   ins_encode(aarch64_enc_mov_p1(dst, con));
6778 
6779   ins_pipe(ialu_imm);
6780 %}
6781 
6782 // Load Poll Page Constant
6783 
6784 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6785 %{
6786   match(Set dst con);
6787 
6788   ins_cost(INSN_COST);
6789   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6790 
6791   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6792 
6793   ins_pipe(ialu_imm);
6794 %}
6795 
6796 // Load Byte Map Base Constant
6797 
6798 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6799 %{
6800   match(Set dst con);
6801 
6802   ins_cost(INSN_COST);
6803   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6804 
6805   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6806 
6807   ins_pipe(ialu_imm);
6808 %}
6809 
6810 // Load Narrow Pointer Constant
6811 
6812 instruct loadConN(iRegNNoSp dst, immN con)
6813 %{
6814   match(Set dst con);
6815 
6816   ins_cost(INSN_COST * 4);
6817   format %{ "mov  $dst, $con\t# compressed ptr" %}
6818 
6819   ins_encode(aarch64_enc_mov_n(dst, con));
6820 
6821   ins_pipe(ialu_imm);
6822 %}
6823 
6824 // Load Narrow Null Pointer Constant
6825 
6826 instruct loadConN0(iRegNNoSp dst, immN0 con)
6827 %{
6828   match(Set dst con);
6829 
6830   ins_cost(INSN_COST);
6831   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6832 
6833   ins_encode(aarch64_enc_mov_n0(dst, con));
6834 
6835   ins_pipe(ialu_imm);
6836 %}
6837 
6838 // Load Narrow Klass Constant
6839 
6840 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6841 %{
6842   match(Set dst con);
6843 
6844   ins_cost(INSN_COST);
6845   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6846 
6847   ins_encode(aarch64_enc_mov_nk(dst, con));
6848 
6849   ins_pipe(ialu_imm);
6850 %}
6851 
6852 // Load Packed Float Constant
6853 
6854 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6855   match(Set dst con);
6856   ins_cost(INSN_COST * 4);
6857   format %{ "fmovs  $dst, $con"%}
6858   ins_encode %{
6859     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6860   %}
6861 
6862   ins_pipe(fp_imm_s);
6863 %}
6864 
6865 // Load Float Constant
6866 
6867 instruct loadConF(vRegF dst, immF con) %{
6868   match(Set dst con);
6869 
6870   ins_cost(INSN_COST * 4);
6871 
6872   format %{
6873     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6874   %}
6875 
6876   ins_encode %{
6877     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6878   %}
6879 
6880   ins_pipe(fp_load_constant_s);
6881 %}
6882 
6883 // Load Packed Double Constant
6884 
6885 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6886   match(Set dst con);
6887   ins_cost(INSN_COST);
6888   format %{ "fmovd  $dst, $con"%}
6889   ins_encode %{
6890     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6891   %}
6892 
6893   ins_pipe(fp_imm_d);
6894 %}
6895 
6896 // Load Double Constant
6897 
6898 instruct loadConD(vRegD dst, immD con) %{
6899   match(Set dst con);
6900 
6901   ins_cost(INSN_COST * 5);
6902   format %{
6903     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6904   %}
6905 
6906   ins_encode %{
6907     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6908   %}
6909 
6910   ins_pipe(fp_load_constant_d);
6911 %}
6912 
6913 // Store Instructions
6914 
6915 // Store CMS card-mark Immediate
6916 instruct storeimmCM0(immI0 zero, memory mem)
6917 %{
6918   match(Set mem (StoreCM mem zero));
6919   predicate(unnecessary_storestore(n));
6920 
6921   ins_cost(INSN_COST);
6922   format %{ "storestore (elided)\n\t"
6923             "strb zr, $mem\t# byte" %}
6924 
6925   ins_encode(aarch64_enc_strb0(mem));
6926 
6927   ins_pipe(istore_mem);
6928 %}
6929 
6930 // Store CMS card-mark Immediate with intervening StoreStore
6931 // needed when using CMS with no conditional card marking
6932 instruct storeimmCM0_ordered(immI0 zero, memory mem)
6933 %{
6934   match(Set mem (StoreCM mem zero));
6935 
6936   ins_cost(INSN_COST * 2);
6937   format %{ "storestore\n\t"
6938             "dmb ishst"
6939             "\n\tstrb zr, $mem\t# byte" %}
6940 
6941   ins_encode(aarch64_enc_strb0_ordered(mem));
6942 
6943   ins_pipe(istore_mem);
6944 %}
6945 
6946 // Store Byte
6947 instruct storeB(iRegIorL2I src, memory mem)
6948 %{
6949   match(Set mem (StoreB mem src));
6950   predicate(!needs_releasing_store(n));
6951 
6952   ins_cost(INSN_COST);
6953   format %{ "strb  $src, $mem\t# byte" %}
6954 
6955   ins_encode(aarch64_enc_strb(src, mem));
6956 
6957   ins_pipe(istore_reg_mem);
6958 %}
6959 
6960 
6961 instruct storeimmB0(immI0 zero, memory mem)
6962 %{
6963   match(Set mem (StoreB mem zero));
6964   predicate(!needs_releasing_store(n));
6965 
6966   ins_cost(INSN_COST);
6967   format %{ "strb rscractch2, $mem\t# byte" %}
6968 
6969   ins_encode(aarch64_enc_strb0(mem));
6970 
6971   ins_pipe(istore_mem);
6972 %}
6973 
6974 // Store Char/Short
6975 instruct storeC(iRegIorL2I src, memory mem)
6976 %{
6977   match(Set mem (StoreC mem src));
6978   predicate(!needs_releasing_store(n));
6979 
6980   ins_cost(INSN_COST);
6981   format %{ "strh  $src, $mem\t# short" %}
6982 
6983   ins_encode(aarch64_enc_strh(src, mem));
6984 
6985   ins_pipe(istore_reg_mem);
6986 %}
6987 
6988 instruct storeimmC0(immI0 zero, memory mem)
6989 %{
6990   match(Set mem (StoreC mem zero));
6991   predicate(!needs_releasing_store(n));
6992 
6993   ins_cost(INSN_COST);
6994   format %{ "strh  zr, $mem\t# short" %}
6995 
6996   ins_encode(aarch64_enc_strh0(mem));
6997 
6998   ins_pipe(istore_mem);
6999 %}
7000 
7001 // Store Integer
7002 
7003 instruct storeI(iRegIorL2I src, memory mem)
7004 %{
7005   match(Set mem(StoreI mem src));
7006   predicate(!needs_releasing_store(n));
7007 
7008   ins_cost(INSN_COST);
7009   format %{ "strw  $src, $mem\t# int" %}
7010 
7011   ins_encode(aarch64_enc_strw(src, mem));
7012 
7013   ins_pipe(istore_reg_mem);
7014 %}
7015 
7016 instruct storeimmI0(immI0 zero, memory mem)
7017 %{
7018   match(Set mem(StoreI mem zero));
7019   predicate(!needs_releasing_store(n));
7020 
7021   ins_cost(INSN_COST);
7022   format %{ "strw  zr, $mem\t# int" %}
7023 
7024   ins_encode(aarch64_enc_strw0(mem));
7025 
7026   ins_pipe(istore_mem);
7027 %}
7028 
7029 // Store Long (64 bit signed)
7030 instruct storeL(iRegL src, memory mem)
7031 %{
7032   match(Set mem (StoreL mem src));
7033   predicate(!needs_releasing_store(n));
7034 
7035   ins_cost(INSN_COST);
7036   format %{ "str  $src, $mem\t# int" %}
7037 
7038   ins_encode(aarch64_enc_str(src, mem));
7039 
7040   ins_pipe(istore_reg_mem);
7041 %}
7042 
7043 // Store Long (64 bit signed)
7044 instruct storeimmL0(immL0 zero, memory mem)
7045 %{
7046   match(Set mem (StoreL mem zero));
7047   predicate(!needs_releasing_store(n));
7048 
7049   ins_cost(INSN_COST);
7050   format %{ "str  zr, $mem\t# int" %}
7051 
7052   ins_encode(aarch64_enc_str0(mem));
7053 
7054   ins_pipe(istore_mem);
7055 %}
7056 
7057 // Store Pointer
7058 instruct storeP(iRegP src, memory mem)
7059 %{
7060   match(Set mem (StoreP mem src));
7061   predicate(!needs_releasing_store(n));
7062 
7063   ins_cost(INSN_COST);
7064   format %{ "str  $src, $mem\t# ptr" %}
7065 
7066   ins_encode(aarch64_enc_str(src, mem));
7067 
7068   ins_pipe(istore_reg_mem);
7069 %}
7070 
7071 // Store Pointer
7072 instruct storeimmP0(immP0 zero, memory mem)
7073 %{
7074   match(Set mem (StoreP mem zero));
7075   predicate(!needs_releasing_store(n));
7076 
7077   ins_cost(INSN_COST);
7078   format %{ "str zr, $mem\t# ptr" %}
7079 
7080   ins_encode(aarch64_enc_str0(mem));
7081 
7082   ins_pipe(istore_mem);
7083 %}
7084 
7085 // Store Compressed Pointer
7086 instruct storeN(iRegN src, memory mem)
7087 %{
7088   match(Set mem (StoreN mem src));
7089   predicate(!needs_releasing_store(n));
7090 
7091   ins_cost(INSN_COST);
7092   format %{ "strw  $src, $mem\t# compressed ptr" %}
7093 
7094   ins_encode(aarch64_enc_strw(src, mem));
7095 
7096   ins_pipe(istore_reg_mem);
7097 %}
7098 
7099 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7100 %{
7101   match(Set mem (StoreN mem zero));
7102   predicate(Universe::narrow_oop_base() == NULL &&
7103             Universe::narrow_klass_base() == NULL &&
7104             (!needs_releasing_store(n)));
7105 
7106   ins_cost(INSN_COST);
7107   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7108 
7109   ins_encode(aarch64_enc_strw(heapbase, mem));
7110 
7111   ins_pipe(istore_reg_mem);
7112 %}
7113 
7114 // Store Float
7115 instruct storeF(vRegF src, memory mem)
7116 %{
7117   match(Set mem (StoreF mem src));
7118   predicate(!needs_releasing_store(n));
7119 
7120   ins_cost(INSN_COST);
7121   format %{ "strs  $src, $mem\t# float" %}
7122 
7123   ins_encode( aarch64_enc_strs(src, mem) );
7124 
7125   ins_pipe(pipe_class_memory);
7126 %}
7127 
7128 // TODO
7129 // implement storeImmF0 and storeFImmPacked
7130 
7131 // Store Double
7132 instruct storeD(vRegD src, memory mem)
7133 %{
7134   match(Set mem (StoreD mem src));
7135   predicate(!needs_releasing_store(n));
7136 
7137   ins_cost(INSN_COST);
7138   format %{ "strd  $src, $mem\t# double" %}
7139 
7140   ins_encode( aarch64_enc_strd(src, mem) );
7141 
7142   ins_pipe(pipe_class_memory);
7143 %}
7144 
7145 // Store Compressed Klass Pointer
7146 instruct storeNKlass(iRegN src, memory mem)
7147 %{
7148   predicate(!needs_releasing_store(n));
7149   match(Set mem (StoreNKlass mem src));
7150 
7151   ins_cost(INSN_COST);
7152   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7153 
7154   ins_encode(aarch64_enc_strw(src, mem));
7155 
7156   ins_pipe(istore_reg_mem);
7157 %}
7158 
7159 // TODO
7160 // implement storeImmD0 and storeDImmPacked
7161 
7162 // prefetch instructions
7163 // Must be safe to execute with invalid address (cannot fault).
7164 
7165 instruct prefetchalloc( memory mem ) %{
7166   match(PrefetchAllocation mem);
7167 
7168   ins_cost(INSN_COST);
7169   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7170 
7171   ins_encode( aarch64_enc_prefetchw(mem) );
7172 
7173   ins_pipe(iload_prefetch);
7174 %}
7175 
7176 //  ---------------- volatile loads and stores ----------------
7177 
7178 // Load Byte (8 bit signed)
7179 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7180 %{
7181   match(Set dst (LoadB mem));
7182 
7183   ins_cost(VOLATILE_REF_COST);
7184   format %{ "ldarsb  $dst, $mem\t# byte" %}
7185 
7186   ins_encode(aarch64_enc_ldarsb(dst, mem));
7187 
7188   ins_pipe(pipe_serial);
7189 %}
7190 
7191 // Load Byte (8 bit signed) into long
7192 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7193 %{
7194   match(Set dst (ConvI2L (LoadB mem)));
7195 
7196   ins_cost(VOLATILE_REF_COST);
7197   format %{ "ldarsb  $dst, $mem\t# byte" %}
7198 
7199   ins_encode(aarch64_enc_ldarsb(dst, mem));
7200 
7201   ins_pipe(pipe_serial);
7202 %}
7203 
7204 // Load Byte (8 bit unsigned)
7205 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7206 %{
7207   match(Set dst (LoadUB mem));
7208 
7209   ins_cost(VOLATILE_REF_COST);
7210   format %{ "ldarb  $dst, $mem\t# byte" %}
7211 
7212   ins_encode(aarch64_enc_ldarb(dst, mem));
7213 
7214   ins_pipe(pipe_serial);
7215 %}
7216 
7217 // Load Byte (8 bit unsigned) into long
7218 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7219 %{
7220   match(Set dst (ConvI2L (LoadUB mem)));
7221 
7222   ins_cost(VOLATILE_REF_COST);
7223   format %{ "ldarb  $dst, $mem\t# byte" %}
7224 
7225   ins_encode(aarch64_enc_ldarb(dst, mem));
7226 
7227   ins_pipe(pipe_serial);
7228 %}
7229 
7230 // Load Short (16 bit signed)
7231 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7232 %{
7233   match(Set dst (LoadS mem));
7234 
7235   ins_cost(VOLATILE_REF_COST);
7236   format %{ "ldarshw  $dst, $mem\t# short" %}
7237 
7238   ins_encode(aarch64_enc_ldarshw(dst, mem));
7239 
7240   ins_pipe(pipe_serial);
7241 %}
7242 
7243 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7244 %{
7245   match(Set dst (LoadUS mem));
7246 
7247   ins_cost(VOLATILE_REF_COST);
7248   format %{ "ldarhw  $dst, $mem\t# short" %}
7249 
7250   ins_encode(aarch64_enc_ldarhw(dst, mem));
7251 
7252   ins_pipe(pipe_serial);
7253 %}
7254 
7255 // Load Short/Char (16 bit unsigned) into long
7256 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7257 %{
7258   match(Set dst (ConvI2L (LoadUS mem)));
7259 
7260   ins_cost(VOLATILE_REF_COST);
7261   format %{ "ldarh  $dst, $mem\t# short" %}
7262 
7263   ins_encode(aarch64_enc_ldarh(dst, mem));
7264 
7265   ins_pipe(pipe_serial);
7266 %}
7267 
7268 // Load Short/Char (16 bit signed) into long
7269 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7270 %{
7271   match(Set dst (ConvI2L (LoadS mem)));
7272 
7273   ins_cost(VOLATILE_REF_COST);
7274   format %{ "ldarh  $dst, $mem\t# short" %}
7275 
7276   ins_encode(aarch64_enc_ldarsh(dst, mem));
7277 
7278   ins_pipe(pipe_serial);
7279 %}
7280 
7281 // Load Integer (32 bit signed)
7282 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7283 %{
7284   match(Set dst (LoadI mem));
7285 
7286   ins_cost(VOLATILE_REF_COST);
7287   format %{ "ldarw  $dst, $mem\t# int" %}
7288 
7289   ins_encode(aarch64_enc_ldarw(dst, mem));
7290 
7291   ins_pipe(pipe_serial);
7292 %}
7293 
7294 // Load Integer (32 bit unsigned) into long
7295 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7296 %{
7297   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7298 
7299   ins_cost(VOLATILE_REF_COST);
7300   format %{ "ldarw  $dst, $mem\t# int" %}
7301 
7302   ins_encode(aarch64_enc_ldarw(dst, mem));
7303 
7304   ins_pipe(pipe_serial);
7305 %}
7306 
7307 // Load Long (64 bit signed)
7308 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7309 %{
7310   match(Set dst (LoadL mem));
7311 
7312   ins_cost(VOLATILE_REF_COST);
7313   format %{ "ldar  $dst, $mem\t# int" %}
7314 
7315   ins_encode(aarch64_enc_ldar(dst, mem));
7316 
7317   ins_pipe(pipe_serial);
7318 %}
7319 
7320 // Load Pointer
7321 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7322 %{
7323   match(Set dst (LoadP mem));
7324 
7325   ins_cost(VOLATILE_REF_COST);
7326   format %{ "ldar  $dst, $mem\t# ptr" %}
7327 
7328   ins_encode(aarch64_enc_ldar(dst, mem));
7329 
7330   ins_pipe(pipe_serial);
7331 %}
7332 
7333 // Load Compressed Pointer
7334 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7335 %{
7336   match(Set dst (LoadN mem));
7337 
7338   ins_cost(VOLATILE_REF_COST);
7339   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7340 
7341   ins_encode(aarch64_enc_ldarw(dst, mem));
7342 
7343   ins_pipe(pipe_serial);
7344 %}
7345 
7346 // Load Float
7347 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7348 %{
7349   match(Set dst (LoadF mem));
7350 
7351   ins_cost(VOLATILE_REF_COST);
7352   format %{ "ldars  $dst, $mem\t# float" %}
7353 
7354   ins_encode( aarch64_enc_fldars(dst, mem) );
7355 
7356   ins_pipe(pipe_serial);
7357 %}
7358 
7359 // Load Double
7360 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7361 %{
7362   match(Set dst (LoadD mem));
7363 
7364   ins_cost(VOLATILE_REF_COST);
7365   format %{ "ldard  $dst, $mem\t# double" %}
7366 
7367   ins_encode( aarch64_enc_fldard(dst, mem) );
7368 
7369   ins_pipe(pipe_serial);
7370 %}
7371 
7372 // Store Byte
7373 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7374 %{
7375   match(Set mem (StoreB mem src));
7376 
7377   ins_cost(VOLATILE_REF_COST);
7378   format %{ "stlrb  $src, $mem\t# byte" %}
7379 
7380   ins_encode(aarch64_enc_stlrb(src, mem));
7381 
7382   ins_pipe(pipe_class_memory);
7383 %}
7384 
7385 // Store Char/Short
7386 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7387 %{
7388   match(Set mem (StoreC mem src));
7389 
7390   ins_cost(VOLATILE_REF_COST);
7391   format %{ "stlrh  $src, $mem\t# short" %}
7392 
7393   ins_encode(aarch64_enc_stlrh(src, mem));
7394 
7395   ins_pipe(pipe_class_memory);
7396 %}
7397 
7398 // Store Integer
7399 
7400 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7401 %{
7402   match(Set mem(StoreI mem src));
7403 
7404   ins_cost(VOLATILE_REF_COST);
7405   format %{ "stlrw  $src, $mem\t# int" %}
7406 
7407   ins_encode(aarch64_enc_stlrw(src, mem));
7408 
7409   ins_pipe(pipe_class_memory);
7410 %}
7411 
7412 // Store Long (64 bit signed)
7413 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7414 %{
7415   match(Set mem (StoreL mem src));
7416 
7417   ins_cost(VOLATILE_REF_COST);
7418   format %{ "stlr  $src, $mem\t# int" %}
7419 
7420   ins_encode(aarch64_enc_stlr(src, mem));
7421 
7422   ins_pipe(pipe_class_memory);
7423 %}
7424 
7425 // Store Pointer
7426 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7427 %{
7428   match(Set mem (StoreP mem src));
7429 
7430   ins_cost(VOLATILE_REF_COST);
7431   format %{ "stlr  $src, $mem\t# ptr" %}
7432 
7433   ins_encode(aarch64_enc_stlr(src, mem));
7434 
7435   ins_pipe(pipe_class_memory);
7436 %}
7437 
7438 // Store Compressed Pointer
7439 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7440 %{
7441   match(Set mem (StoreN mem src));
7442 
7443   ins_cost(VOLATILE_REF_COST);
7444   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7445 
7446   ins_encode(aarch64_enc_stlrw(src, mem));
7447 
7448   ins_pipe(pipe_class_memory);
7449 %}
7450 
7451 // Store Float
7452 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7453 %{
7454   match(Set mem (StoreF mem src));
7455 
7456   ins_cost(VOLATILE_REF_COST);
7457   format %{ "stlrs  $src, $mem\t# float" %}
7458 
7459   ins_encode( aarch64_enc_fstlrs(src, mem) );
7460 
7461   ins_pipe(pipe_class_memory);
7462 %}
7463 
7464 // TODO
7465 // implement storeImmF0 and storeFImmPacked
7466 
7467 // Store Double
7468 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7469 %{
7470   match(Set mem (StoreD mem src));
7471 
7472   ins_cost(VOLATILE_REF_COST);
7473   format %{ "stlrd  $src, $mem\t# double" %}
7474 
7475   ins_encode( aarch64_enc_fstlrd(src, mem) );
7476 
7477   ins_pipe(pipe_class_memory);
7478 %}
7479 
7480 //  ---------------- end of volatile loads and stores ----------------
7481 
7482 // ============================================================================
7483 // BSWAP Instructions
7484 
7485 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7486   match(Set dst (ReverseBytesI src));
7487 
7488   ins_cost(INSN_COST);
7489   format %{ "revw  $dst, $src" %}
7490 
7491   ins_encode %{
7492     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7493   %}
7494 
7495   ins_pipe(ialu_reg);
7496 %}
7497 
7498 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7499   match(Set dst (ReverseBytesL src));
7500 
7501   ins_cost(INSN_COST);
7502   format %{ "rev  $dst, $src" %}
7503 
7504   ins_encode %{
7505     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7506   %}
7507 
7508   ins_pipe(ialu_reg);
7509 %}
7510 
7511 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7512   match(Set dst (ReverseBytesUS src));
7513 
7514   ins_cost(INSN_COST);
7515   format %{ "rev16w  $dst, $src" %}
7516 
7517   ins_encode %{
7518     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7519   %}
7520 
7521   ins_pipe(ialu_reg);
7522 %}
7523 
7524 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7525   match(Set dst (ReverseBytesS src));
7526 
7527   ins_cost(INSN_COST);
7528   format %{ "rev16w  $dst, $src\n\t"
7529             "sbfmw $dst, $dst, #0, #15" %}
7530 
7531   ins_encode %{
7532     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7533     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7534   %}
7535 
7536   ins_pipe(ialu_reg);
7537 %}
7538 
7539 // ============================================================================
7540 // Zero Count Instructions
7541 
7542 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7543   match(Set dst (CountLeadingZerosI src));
7544 
7545   ins_cost(INSN_COST);
7546   format %{ "clzw  $dst, $src" %}
7547   ins_encode %{
7548     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7549   %}
7550 
7551   ins_pipe(ialu_reg);
7552 %}
7553 
7554 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7555   match(Set dst (CountLeadingZerosL src));
7556 
7557   ins_cost(INSN_COST);
7558   format %{ "clz   $dst, $src" %}
7559   ins_encode %{
7560     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7561   %}
7562 
7563   ins_pipe(ialu_reg);
7564 %}
7565 
7566 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7567   match(Set dst (CountTrailingZerosI src));
7568 
7569   ins_cost(INSN_COST * 2);
7570   format %{ "rbitw  $dst, $src\n\t"
7571             "clzw   $dst, $dst" %}
7572   ins_encode %{
7573     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7574     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7575   %}
7576 
7577   ins_pipe(ialu_reg);
7578 %}
7579 
7580 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7581   match(Set dst (CountTrailingZerosL src));
7582 
7583   ins_cost(INSN_COST * 2);
7584   format %{ "rbit   $dst, $src\n\t"
7585             "clz    $dst, $dst" %}
7586   ins_encode %{
7587     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7588     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7589   %}
7590 
7591   ins_pipe(ialu_reg);
7592 %}
7593 
7594 //---------- Population Count Instructions -------------------------------------
7595 //
7596 
7597 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7598   predicate(UsePopCountInstruction);
7599   match(Set dst (PopCountI src));
7600   effect(TEMP tmp);
7601   ins_cost(INSN_COST * 13);
7602 
7603   format %{ "movw   $src, $src\n\t"
7604             "mov    $tmp, $src\t# vector (1D)\n\t"
7605             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7606             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7607             "mov    $dst, $tmp\t# vector (1D)" %}
7608   ins_encode %{
7609     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7610     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7611     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7612     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7613     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7614   %}
7615 
7616   ins_pipe(pipe_class_default);
7617 %}
7618 
7619 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7620   predicate(UsePopCountInstruction);
7621   match(Set dst (PopCountI (LoadI mem)));
7622   effect(TEMP tmp);
7623   ins_cost(INSN_COST * 13);
7624 
7625   format %{ "ldrs   $tmp, $mem\n\t"
7626             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7627             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7628             "mov    $dst, $tmp\t# vector (1D)" %}
7629   ins_encode %{
7630     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7631     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7632                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7633     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7634     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7635     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7636   %}
7637 
7638   ins_pipe(pipe_class_default);
7639 %}
7640 
7641 // Note: Long.bitCount(long) returns an int.
7642 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7643   predicate(UsePopCountInstruction);
7644   match(Set dst (PopCountL src));
7645   effect(TEMP tmp);
7646   ins_cost(INSN_COST * 13);
7647 
7648   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7649             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7650             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7651             "mov    $dst, $tmp\t# vector (1D)" %}
7652   ins_encode %{
7653     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7654     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7655     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7656     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7657   %}
7658 
7659   ins_pipe(pipe_class_default);
7660 %}
7661 
7662 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7663   predicate(UsePopCountInstruction);
7664   match(Set dst (PopCountL (LoadL mem)));
7665   effect(TEMP tmp);
7666   ins_cost(INSN_COST * 13);
7667 
7668   format %{ "ldrd   $tmp, $mem\n\t"
7669             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7670             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7671             "mov    $dst, $tmp\t# vector (1D)" %}
7672   ins_encode %{
7673     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7674     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7675                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7676     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7677     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7678     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7679   %}
7680 
7681   ins_pipe(pipe_class_default);
7682 %}
7683 
7684 // ============================================================================
7685 // MemBar Instruction
7686 
7687 instruct load_fence() %{
7688   match(LoadFence);
7689   ins_cost(VOLATILE_REF_COST);
7690 
7691   format %{ "load_fence" %}
7692 
7693   ins_encode %{
7694     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7695   %}
7696   ins_pipe(pipe_serial);
7697 %}
7698 
7699 instruct unnecessary_membar_acquire() %{
7700   predicate(unnecessary_acquire(n));
7701   match(MemBarAcquire);
7702   ins_cost(0);
7703 
7704   format %{ "membar_acquire (elided)" %}
7705 
7706   ins_encode %{
7707     __ block_comment("membar_acquire (elided)");
7708   %}
7709 
7710   ins_pipe(pipe_class_empty);
7711 %}
7712 
7713 instruct membar_acquire() %{
7714   match(MemBarAcquire);
7715   ins_cost(VOLATILE_REF_COST);
7716 
7717   format %{ "membar_acquire\n\t"
7718             "dmb ish" %}
7719 
7720   ins_encode %{
7721     __ block_comment("membar_acquire");
7722     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7723   %}
7724 
7725   ins_pipe(pipe_serial);
7726 %}
7727 
7728 
7729 instruct membar_acquire_lock() %{
7730   match(MemBarAcquireLock);
7731   ins_cost(VOLATILE_REF_COST);
7732 
7733   format %{ "membar_acquire_lock (elided)" %}
7734 
7735   ins_encode %{
7736     __ block_comment("membar_acquire_lock (elided)");
7737   %}
7738 
7739   ins_pipe(pipe_serial);
7740 %}
7741 
7742 instruct store_fence() %{
7743   match(StoreFence);
7744   ins_cost(VOLATILE_REF_COST);
7745 
7746   format %{ "store_fence" %}
7747 
7748   ins_encode %{
7749     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7750   %}
7751   ins_pipe(pipe_serial);
7752 %}
7753 
7754 instruct unnecessary_membar_release() %{
7755   predicate(unnecessary_release(n));
7756   match(MemBarRelease);
7757   ins_cost(0);
7758 
7759   format %{ "membar_release (elided)" %}
7760 
7761   ins_encode %{
7762     __ block_comment("membar_release (elided)");
7763   %}
7764   ins_pipe(pipe_serial);
7765 %}
7766 
7767 instruct membar_release() %{
7768   match(MemBarRelease);
7769   ins_cost(VOLATILE_REF_COST);
7770 
7771   format %{ "membar_release\n\t"
7772             "dmb ish" %}
7773 
7774   ins_encode %{
7775     __ block_comment("membar_release");
7776     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7777   %}
7778   ins_pipe(pipe_serial);
7779 %}
7780 
7781 instruct membar_storestore() %{
7782   match(MemBarStoreStore);
7783   ins_cost(VOLATILE_REF_COST);
7784 
7785   format %{ "MEMBAR-store-store" %}
7786 
7787   ins_encode %{
7788     __ membar(Assembler::StoreStore);
7789   %}
7790   ins_pipe(pipe_serial);
7791 %}
7792 
7793 instruct membar_release_lock() %{
7794   match(MemBarReleaseLock);
7795   ins_cost(VOLATILE_REF_COST);
7796 
7797   format %{ "membar_release_lock (elided)" %}
7798 
7799   ins_encode %{
7800     __ block_comment("membar_release_lock (elided)");
7801   %}
7802 
7803   ins_pipe(pipe_serial);
7804 %}
7805 
7806 instruct unnecessary_membar_volatile() %{
7807   predicate(unnecessary_volatile(n));
7808   match(MemBarVolatile);
7809   ins_cost(0);
7810 
7811   format %{ "membar_volatile (elided)" %}
7812 
7813   ins_encode %{
7814     __ block_comment("membar_volatile (elided)");
7815   %}
7816 
7817   ins_pipe(pipe_serial);
7818 %}
7819 
7820 instruct membar_volatile() %{
7821   match(MemBarVolatile);
7822   ins_cost(VOLATILE_REF_COST*100);
7823 
7824   format %{ "membar_volatile\n\t"
7825              "dmb ish"%}
7826 
7827   ins_encode %{
7828     __ block_comment("membar_volatile");
7829     __ membar(Assembler::StoreLoad);
7830   %}
7831 
7832   ins_pipe(pipe_serial);
7833 %}
7834 
7835 // ============================================================================
7836 // Cast/Convert Instructions
7837 
7838 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7839   match(Set dst (CastX2P src));
7840 
7841   ins_cost(INSN_COST);
7842   format %{ "mov $dst, $src\t# long -> ptr" %}
7843 
7844   ins_encode %{
7845     if ($dst$$reg != $src$$reg) {
7846       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7847     }
7848   %}
7849 
7850   ins_pipe(ialu_reg);
7851 %}
7852 
7853 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7854   match(Set dst (CastP2X src));
7855 
7856   ins_cost(INSN_COST);
7857   format %{ "mov $dst, $src\t# ptr -> long" %}
7858 
7859   ins_encode %{
7860     if ($dst$$reg != $src$$reg) {
7861       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7862     }
7863   %}
7864 
7865   ins_pipe(ialu_reg);
7866 %}
7867 
7868 // Convert oop into int for vectors alignment masking
7869 instruct convP2I(iRegINoSp dst, iRegP src) %{
7870   match(Set dst (ConvL2I (CastP2X src)));
7871 
7872   ins_cost(INSN_COST);
7873   format %{ "movw $dst, $src\t# ptr -> int" %}
7874   ins_encode %{
7875     __ movw($dst$$Register, $src$$Register);
7876   %}
7877 
7878   ins_pipe(ialu_reg);
7879 %}
7880 
7881 // Convert compressed oop into int for vectors alignment masking
7882 // in case of 32bit oops (heap < 4Gb).
7883 instruct convN2I(iRegINoSp dst, iRegN src)
7884 %{
7885   predicate(Universe::narrow_oop_shift() == 0);
7886   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7887 
7888   ins_cost(INSN_COST);
7889   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7890   ins_encode %{
7891     __ movw($dst$$Register, $src$$Register);
7892   %}
7893 
7894   ins_pipe(ialu_reg);
7895 %}
7896 
7897 
7898 // Convert oop pointer into compressed form
7899 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7900   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7901   match(Set dst (EncodeP src));
7902   effect(KILL cr);
7903   ins_cost(INSN_COST * 3);
7904   format %{ "encode_heap_oop $dst, $src" %}
7905   ins_encode %{
7906     Register s = $src$$Register;
7907     Register d = $dst$$Register;
7908     __ encode_heap_oop(d, s);
7909   %}
7910   ins_pipe(ialu_reg);
7911 %}
7912 
7913 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7914   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7915   match(Set dst (EncodeP src));
7916   ins_cost(INSN_COST * 3);
7917   format %{ "encode_heap_oop_not_null $dst, $src" %}
7918   ins_encode %{
7919     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7920   %}
7921   ins_pipe(ialu_reg);
7922 %}
7923 
7924 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7925   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7926             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7927   match(Set dst (DecodeN src));
7928   ins_cost(INSN_COST * 3);
7929   format %{ "decode_heap_oop $dst, $src" %}
7930   ins_encode %{
7931     Register s = $src$$Register;
7932     Register d = $dst$$Register;
7933     __ decode_heap_oop(d, s);
7934   %}
7935   ins_pipe(ialu_reg);
7936 %}
7937 
7938 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7939   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7940             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7941   match(Set dst (DecodeN src));
7942   ins_cost(INSN_COST * 3);
7943   format %{ "decode_heap_oop_not_null $dst, $src" %}
7944   ins_encode %{
7945     Register s = $src$$Register;
7946     Register d = $dst$$Register;
7947     __ decode_heap_oop_not_null(d, s);
7948   %}
7949   ins_pipe(ialu_reg);
7950 %}
7951 
7952 // n.b. AArch64 implementations of encode_klass_not_null and
7953 // decode_klass_not_null do not modify the flags register so, unlike
7954 // Intel, we don't kill CR as a side effect here
7955 
7956 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7957   match(Set dst (EncodePKlass src));
7958 
7959   ins_cost(INSN_COST * 3);
7960   format %{ "encode_klass_not_null $dst,$src" %}
7961 
7962   ins_encode %{
7963     Register src_reg = as_Register($src$$reg);
7964     Register dst_reg = as_Register($dst$$reg);
7965     __ encode_klass_not_null(dst_reg, src_reg);
7966   %}
7967 
7968    ins_pipe(ialu_reg);
7969 %}
7970 
7971 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
7972   match(Set dst (DecodeNKlass src));
7973 
7974   ins_cost(INSN_COST * 3);
7975   format %{ "decode_klass_not_null $dst,$src" %}
7976 
7977   ins_encode %{
7978     Register src_reg = as_Register($src$$reg);
7979     Register dst_reg = as_Register($dst$$reg);
7980     if (dst_reg != src_reg) {
7981       __ decode_klass_not_null(dst_reg, src_reg);
7982     } else {
7983       __ decode_klass_not_null(dst_reg);
7984     }
7985   %}
7986 
7987    ins_pipe(ialu_reg);
7988 %}
7989 
7990 instruct checkCastPP(iRegPNoSp dst)
7991 %{
7992   match(Set dst (CheckCastPP dst));
7993 
7994   size(0);
7995   format %{ "# checkcastPP of $dst" %}
7996   ins_encode(/* empty encoding */);
7997   ins_pipe(pipe_class_empty);
7998 %}
7999 
8000 instruct castPP(iRegPNoSp dst)
8001 %{
8002   match(Set dst (CastPP dst));
8003 
8004   size(0);
8005   format %{ "# castPP of $dst" %}
8006   ins_encode(/* empty encoding */);
8007   ins_pipe(pipe_class_empty);
8008 %}
8009 
8010 instruct castII(iRegI dst)
8011 %{
8012   match(Set dst (CastII dst));
8013 
8014   size(0);
8015   format %{ "# castII of $dst" %}
8016   ins_encode(/* empty encoding */);
8017   ins_cost(0);
8018   ins_pipe(pipe_class_empty);
8019 %}
8020 
8021 // ============================================================================
8022 // Atomic operation instructions
8023 //
8024 // Intel and SPARC both implement Ideal Node LoadPLocked and
8025 // Store{PIL}Conditional instructions using a normal load for the
8026 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8027 //
8028 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8029 // pair to lock object allocations from Eden space when not using
8030 // TLABs.
8031 //
8032 // There does not appear to be a Load{IL}Locked Ideal Node and the
8033 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8034 // and to use StoreIConditional only for 32-bit and StoreLConditional
8035 // only for 64-bit.
8036 //
8037 // We implement LoadPLocked and StorePLocked instructions using,
8038 // respectively the AArch64 hw load-exclusive and store-conditional
8039 // instructions. Whereas we must implement each of
8040 // Store{IL}Conditional using a CAS which employs a pair of
8041 // instructions comprising a load-exclusive followed by a
8042 // store-conditional.
8043 
8044 
8045 // Locked-load (linked load) of the current heap-top
8046 // used when updating the eden heap top
8047 // implemented using ldaxr on AArch64
8048 
8049 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8050 %{
8051   match(Set dst (LoadPLocked mem));
8052 
8053   ins_cost(VOLATILE_REF_COST);
8054 
8055   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8056 
8057   ins_encode(aarch64_enc_ldaxr(dst, mem));
8058 
8059   ins_pipe(pipe_serial);
8060 %}
8061 
8062 // Conditional-store of the updated heap-top.
8063 // Used during allocation of the shared heap.
8064 // Sets flag (EQ) on success.
8065 // implemented using stlxr on AArch64.
8066 
8067 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8068 %{
8069   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8070 
8071   ins_cost(VOLATILE_REF_COST);
8072 
8073  // TODO
8074  // do we need to do a store-conditional release or can we just use a
8075  // plain store-conditional?
8076 
8077   format %{
8078     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8079     "cmpw rscratch1, zr\t# EQ on successful write"
8080   %}
8081 
8082   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8083 
8084   ins_pipe(pipe_serial);
8085 %}
8086 
8087 
8088 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8089 // when attempting to rebias a lock towards the current thread.  We
8090 // must use the acquire form of cmpxchg in order to guarantee acquire
8091 // semantics in this case.
8092 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8093 %{
8094   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8095 
8096   ins_cost(VOLATILE_REF_COST);
8097 
8098   format %{
8099     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8100     "cmpw rscratch1, zr\t# EQ on successful write"
8101   %}
8102 
8103   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8104 
8105   ins_pipe(pipe_slow);
8106 %}
8107 
8108 // storeIConditional also has acquire semantics, for no better reason
8109 // than matching storeLConditional.  At the time of writing this
8110 // comment storeIConditional was not used anywhere by AArch64.
8111 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8112 %{
8113   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8114 
8115   ins_cost(VOLATILE_REF_COST);
8116 
8117   format %{
8118     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8119     "cmpw rscratch1, zr\t# EQ on successful write"
8120   %}
8121 
8122   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8123 
8124   ins_pipe(pipe_slow);
8125 %}
8126 
8127 // standard CompareAndSwapX when we are using barriers
8128 // these have higher priority than the rules selected by a predicate
8129 
8130 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8131 // can't match them
8132 
8133 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8134 
8135   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8136   ins_cost(2 * VOLATILE_REF_COST);
8137 
8138   effect(KILL cr);
8139 
8140   format %{
8141     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8142     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8143   %}
8144 
8145   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8146             aarch64_enc_cset_eq(res));
8147 
8148   ins_pipe(pipe_slow);
8149 %}
8150 
8151 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8152 
8153   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8154   ins_cost(2 * VOLATILE_REF_COST);
8155 
8156   effect(KILL cr);
8157 
8158   format %{
8159     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8160     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8161   %}
8162 
8163   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8164             aarch64_enc_cset_eq(res));
8165 
8166   ins_pipe(pipe_slow);
8167 %}
8168 
8169 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8170 
8171   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8172   ins_cost(2 * VOLATILE_REF_COST);
8173 
8174   effect(KILL cr);
8175 
8176  format %{
8177     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8178     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8179  %}
8180 
8181  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8182             aarch64_enc_cset_eq(res));
8183 
8184   ins_pipe(pipe_slow);
8185 %}
8186 
8187 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8188 
8189   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8190   ins_cost(2 * VOLATILE_REF_COST);
8191 
8192   effect(KILL cr);
8193 
8194  format %{
8195     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8196     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8197  %}
8198 
8199  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8200             aarch64_enc_cset_eq(res));
8201 
8202   ins_pipe(pipe_slow);
8203 %}
8204 
8205 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8206 
8207   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8208   ins_cost(2 * VOLATILE_REF_COST);
8209 
8210   effect(KILL cr);
8211 
8212  format %{
8213     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8214     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8215  %}
8216 
8217  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8218             aarch64_enc_cset_eq(res));
8219 
8220   ins_pipe(pipe_slow);
8221 %}
8222 
8223 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8224 
8225   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8226   ins_cost(2 * VOLATILE_REF_COST);
8227 
8228   effect(KILL cr);
8229 
8230  format %{
8231     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8232     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8233  %}
8234 
8235  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8236             aarch64_enc_cset_eq(res));
8237 
8238   ins_pipe(pipe_slow);
8239 %}
8240 
8241 // alternative CompareAndSwapX when we are eliding barriers
8242 
8243 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8244 
8245   predicate(needs_acquiring_load_exclusive(n));
8246   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8247   ins_cost(VOLATILE_REF_COST);
8248 
8249   effect(KILL cr);
8250 
8251  format %{
8252     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8253     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8254  %}
8255 
8256  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8257             aarch64_enc_cset_eq(res));
8258 
8259   ins_pipe(pipe_slow);
8260 %}
8261 
8262 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8263 
8264   predicate(needs_acquiring_load_exclusive(n));
8265   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8266   ins_cost(VOLATILE_REF_COST);
8267 
8268   effect(KILL cr);
8269 
8270  format %{
8271     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8272     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8273  %}
8274 
8275  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8276             aarch64_enc_cset_eq(res));
8277 
8278   ins_pipe(pipe_slow);
8279 %}
8280 
8281 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8282 
8283   predicate(needs_acquiring_load_exclusive(n));
8284   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8285   ins_cost(VOLATILE_REF_COST);
8286 
8287   effect(KILL cr);
8288 
8289  format %{
8290     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8291     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8292  %}
8293 
8294  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8295             aarch64_enc_cset_eq(res));
8296 
8297   ins_pipe(pipe_slow);
8298 %}
8299 
8300 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8301 
8302   predicate(needs_acquiring_load_exclusive(n));
8303   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8304   ins_cost(VOLATILE_REF_COST);
8305 
8306   effect(KILL cr);
8307 
8308  format %{
8309     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8310     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8311  %}
8312 
8313  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8314             aarch64_enc_cset_eq(res));
8315 
8316   ins_pipe(pipe_slow);
8317 %}
8318 
8319 
8320 // ---------------------------------------------------------------------
8321 
8322 
8323 // BEGIN This section of the file is automatically generated. Do not edit --------------
8324 
8325 // Sundry CAS operations.  Note that release is always true,
8326 // regardless of the memory ordering of the CAS.  This is because we
8327 // need the volatile case to be sequentially consistent but there is
8328 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8329 // can't check the type of memory ordering here, so we always emit a
8330 // STLXR.
8331 
8332 // This section is generated from aarch64_ad_cas.m4
8333 
8334 
8335 
8336 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8337   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8338   ins_cost(2 * VOLATILE_REF_COST);
8339   effect(TEMP_DEF res, KILL cr);
8340   format %{
8341     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8342   %}
8343   ins_encode %{
8344     __ uxtbw(rscratch2, $oldval$$Register);
8345     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
8346                Assembler::byte, /*acquire*/ false, /*release*/ true,
8347                /*weak*/ false, $res$$Register);
8348     __ sxtbw($res$$Register, $res$$Register);
8349   %}
8350   ins_pipe(pipe_slow);
8351 %}
8352 
8353 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8354   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8355   ins_cost(2 * VOLATILE_REF_COST);
8356   effect(TEMP_DEF res, KILL cr);
8357   format %{
8358     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8359   %}
8360   ins_encode %{
8361     __ uxthw(rscratch2, $oldval$$Register);
8362     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
8363                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8364                /*weak*/ false, $res$$Register);
8365     __ sxthw($res$$Register, $res$$Register);
8366   %}
8367   ins_pipe(pipe_slow);
8368 %}
8369 
8370 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8371   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8372   ins_cost(2 * VOLATILE_REF_COST);
8373   effect(TEMP_DEF res, KILL cr);
8374   format %{
8375     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8376   %}
8377   ins_encode %{
8378     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8379                Assembler::word, /*acquire*/ false, /*release*/ true,
8380                /*weak*/ false, $res$$Register);
8381   %}
8382   ins_pipe(pipe_slow);
8383 %}
8384 
8385 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8386   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8387   ins_cost(2 * VOLATILE_REF_COST);
8388   effect(TEMP_DEF res, KILL cr);
8389   format %{
8390     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8391   %}
8392   ins_encode %{
8393     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8394                Assembler::xword, /*acquire*/ false, /*release*/ true,
8395                /*weak*/ false, $res$$Register);
8396   %}
8397   ins_pipe(pipe_slow);
8398 %}
8399 
8400 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8401   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8402   ins_cost(2 * VOLATILE_REF_COST);
8403   effect(TEMP_DEF res, KILL cr);
8404   format %{
8405     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8406   %}
8407   ins_encode %{
8408     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8409                Assembler::word, /*acquire*/ false, /*release*/ true,
8410                /*weak*/ false, $res$$Register);
8411   %}
8412   ins_pipe(pipe_slow);
8413 %}
8414 
8415 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8416   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8417   ins_cost(2 * VOLATILE_REF_COST);
8418   effect(TEMP_DEF res, KILL cr);
8419   format %{
8420     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8421   %}
8422   ins_encode %{
8423     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8424                Assembler::xword, /*acquire*/ false, /*release*/ true,
8425                /*weak*/ false, $res$$Register);
8426   %}
8427   ins_pipe(pipe_slow);
8428 %}
8429 
8430 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8431   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8432   ins_cost(2 * VOLATILE_REF_COST);
8433   effect(KILL cr);
8434   format %{
8435     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8436     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8437   %}
8438   ins_encode %{
8439     __ uxtbw(rscratch2, $oldval$$Register);
8440     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
8441                Assembler::byte, /*acquire*/ false, /*release*/ true,
8442                /*weak*/ true, noreg);
8443     __ csetw($res$$Register, Assembler::EQ);
8444   %}
8445   ins_pipe(pipe_slow);
8446 %}
8447 
8448 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8449   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8450   ins_cost(2 * VOLATILE_REF_COST);
8451   effect(KILL cr);
8452   format %{
8453     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8454     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8455   %}
8456   ins_encode %{
8457     __ uxthw(rscratch2, $oldval$$Register);
8458     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
8459                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8460                /*weak*/ true, noreg);
8461     __ csetw($res$$Register, Assembler::EQ);
8462   %}
8463   ins_pipe(pipe_slow);
8464 %}
8465 
8466 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8467   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8468   ins_cost(2 * VOLATILE_REF_COST);
8469   effect(KILL cr);
8470   format %{
8471     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8472     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8473   %}
8474   ins_encode %{
8475     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8476                Assembler::word, /*acquire*/ false, /*release*/ true,
8477                /*weak*/ true, noreg);
8478     __ csetw($res$$Register, Assembler::EQ);
8479   %}
8480   ins_pipe(pipe_slow);
8481 %}
8482 
8483 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8484   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8485   ins_cost(2 * VOLATILE_REF_COST);
8486   effect(KILL cr);
8487   format %{
8488     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8489     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8490   %}
8491   ins_encode %{
8492     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8493                Assembler::xword, /*acquire*/ false, /*release*/ true,
8494                /*weak*/ true, noreg);
8495     __ csetw($res$$Register, Assembler::EQ);
8496   %}
8497   ins_pipe(pipe_slow);
8498 %}
8499 
8500 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8501   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8502   ins_cost(2 * VOLATILE_REF_COST);
8503   effect(KILL cr);
8504   format %{
8505     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8506     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8507   %}
8508   ins_encode %{
8509     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8510                Assembler::word, /*acquire*/ false, /*release*/ true,
8511                /*weak*/ true, noreg);
8512     __ csetw($res$$Register, Assembler::EQ);
8513   %}
8514   ins_pipe(pipe_slow);
8515 %}
8516 
8517 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8518   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8519   ins_cost(2 * VOLATILE_REF_COST);
8520   effect(KILL cr);
8521   format %{
8522     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8523     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8524   %}
8525   ins_encode %{
8526     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8527                Assembler::xword, /*acquire*/ false, /*release*/ true,
8528                /*weak*/ true, noreg);
8529     __ csetw($res$$Register, Assembler::EQ);
8530   %}
8531   ins_pipe(pipe_slow);
8532 %}
8533 
8534 // END This section of the file is automatically generated. Do not edit --------------
8535 // ---------------------------------------------------------------------
8536 
8537 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
8538   match(Set prev (GetAndSetI mem newv));
8539   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8540   ins_encode %{
8541     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8542   %}
8543   ins_pipe(pipe_serial);
8544 %}
8545 
8546 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
8547   match(Set prev (GetAndSetL mem newv));
8548   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8549   ins_encode %{
8550     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8551   %}
8552   ins_pipe(pipe_serial);
8553 %}
8554 
8555 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
8556   match(Set prev (GetAndSetN mem newv));
8557   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8558   ins_encode %{
8559     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8560   %}
8561   ins_pipe(pipe_serial);
8562 %}
8563 
8564 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
8565   match(Set prev (GetAndSetP mem newv));
8566   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8567   ins_encode %{
8568     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8569   %}
8570   ins_pipe(pipe_serial);
8571 %}
8572 
8573 
8574 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8575   match(Set newval (GetAndAddL mem incr));
8576   ins_cost(INSN_COST * 10);
8577   format %{ "get_and_addL $newval, [$mem], $incr" %}
8578   ins_encode %{
8579     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8580   %}
8581   ins_pipe(pipe_serial);
8582 %}
8583 
8584 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8585   predicate(n->as_LoadStore()->result_not_used());
8586   match(Set dummy (GetAndAddL mem incr));
8587   ins_cost(INSN_COST * 9);
8588   format %{ "get_and_addL [$mem], $incr" %}
8589   ins_encode %{
8590     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8591   %}
8592   ins_pipe(pipe_serial);
8593 %}
8594 
8595 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8596   match(Set newval (GetAndAddL mem incr));
8597   ins_cost(INSN_COST * 10);
8598   format %{ "get_and_addL $newval, [$mem], $incr" %}
8599   ins_encode %{
8600     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8601   %}
8602   ins_pipe(pipe_serial);
8603 %}
8604 
8605 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8606   predicate(n->as_LoadStore()->result_not_used());
8607   match(Set dummy (GetAndAddL mem incr));
8608   ins_cost(INSN_COST * 9);
8609   format %{ "get_and_addL [$mem], $incr" %}
8610   ins_encode %{
8611     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8612   %}
8613   ins_pipe(pipe_serial);
8614 %}
8615 
8616 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8617   match(Set newval (GetAndAddI mem incr));
8618   ins_cost(INSN_COST * 10);
8619   format %{ "get_and_addI $newval, [$mem], $incr" %}
8620   ins_encode %{
8621     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8622   %}
8623   ins_pipe(pipe_serial);
8624 %}
8625 
8626 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8627   predicate(n->as_LoadStore()->result_not_used());
8628   match(Set dummy (GetAndAddI mem incr));
8629   ins_cost(INSN_COST * 9);
8630   format %{ "get_and_addI [$mem], $incr" %}
8631   ins_encode %{
8632     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8633   %}
8634   ins_pipe(pipe_serial);
8635 %}
8636 
8637 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8638   match(Set newval (GetAndAddI mem incr));
8639   ins_cost(INSN_COST * 10);
8640   format %{ "get_and_addI $newval, [$mem], $incr" %}
8641   ins_encode %{
8642     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8643   %}
8644   ins_pipe(pipe_serial);
8645 %}
8646 
8647 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
8648   predicate(n->as_LoadStore()->result_not_used());
8649   match(Set dummy (GetAndAddI mem incr));
8650   ins_cost(INSN_COST * 9);
8651   format %{ "get_and_addI [$mem], $incr" %}
8652   ins_encode %{
8653     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
8654   %}
8655   ins_pipe(pipe_serial);
8656 %}
8657 
8658 // Manifest a CmpL result in an integer register.
8659 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
8660 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
8661 %{
8662   match(Set dst (CmpL3 src1 src2));
8663   effect(KILL flags);
8664 
8665   ins_cost(INSN_COST * 6);
8666   format %{
8667       "cmp $src1, $src2"
8668       "csetw $dst, ne"
8669       "cnegw $dst, lt"
8670   %}
8671   // format %{ "CmpL3 $dst, $src1, $src2" %}
8672   ins_encode %{
8673     __ cmp($src1$$Register, $src2$$Register);
8674     __ csetw($dst$$Register, Assembler::NE);
8675     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8676   %}
8677 
8678   ins_pipe(pipe_class_default);
8679 %}
8680 
8681 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
8682 %{
8683   match(Set dst (CmpL3 src1 src2));
8684   effect(KILL flags);
8685 
8686   ins_cost(INSN_COST * 6);
8687   format %{
8688       "cmp $src1, $src2"
8689       "csetw $dst, ne"
8690       "cnegw $dst, lt"
8691   %}
8692   ins_encode %{
8693     int32_t con = (int32_t)$src2$$constant;
8694      if (con < 0) {
8695       __ adds(zr, $src1$$Register, -con);
8696     } else {
8697       __ subs(zr, $src1$$Register, con);
8698     }
8699     __ csetw($dst$$Register, Assembler::NE);
8700     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8701   %}
8702 
8703   ins_pipe(pipe_class_default);
8704 %}
8705 
8706 // ============================================================================
8707 // Conditional Move Instructions
8708 
8709 // n.b. we have identical rules for both a signed compare op (cmpOp)
8710 // and an unsigned compare op (cmpOpU). it would be nice if we could
8711 // define an op class which merged both inputs and use it to type the
8712 // argument to a single rule. unfortunatelyt his fails because the
8713 // opclass does not live up to the COND_INTER interface of its
8714 // component operands. When the generic code tries to negate the
8715 // operand it ends up running the generci Machoper::negate method
8716 // which throws a ShouldNotHappen. So, we have to provide two flavours
8717 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
8718 
8719 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8720   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8721 
8722   ins_cost(INSN_COST * 2);
8723   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
8724 
8725   ins_encode %{
8726     __ cselw(as_Register($dst$$reg),
8727              as_Register($src2$$reg),
8728              as_Register($src1$$reg),
8729              (Assembler::Condition)$cmp$$cmpcode);
8730   %}
8731 
8732   ins_pipe(icond_reg_reg);
8733 %}
8734 
8735 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8736   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8737 
8738   ins_cost(INSN_COST * 2);
8739   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
8740 
8741   ins_encode %{
8742     __ cselw(as_Register($dst$$reg),
8743              as_Register($src2$$reg),
8744              as_Register($src1$$reg),
8745              (Assembler::Condition)$cmp$$cmpcode);
8746   %}
8747 
8748   ins_pipe(icond_reg_reg);
8749 %}
8750 
8751 // special cases where one arg is zero
8752 
8753 // n.b. this is selected in preference to the rule above because it
8754 // avoids loading constant 0 into a source register
8755 
8756 // TODO
8757 // we ought only to be able to cull one of these variants as the ideal
8758 // transforms ought always to order the zero consistently (to left/right?)
8759 
8760 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8761   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8762 
8763   ins_cost(INSN_COST * 2);
8764   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
8765 
8766   ins_encode %{
8767     __ cselw(as_Register($dst$$reg),
8768              as_Register($src$$reg),
8769              zr,
8770              (Assembler::Condition)$cmp$$cmpcode);
8771   %}
8772 
8773   ins_pipe(icond_reg);
8774 %}
8775 
8776 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8777   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8778 
8779   ins_cost(INSN_COST * 2);
8780   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
8781 
8782   ins_encode %{
8783     __ cselw(as_Register($dst$$reg),
8784              as_Register($src$$reg),
8785              zr,
8786              (Assembler::Condition)$cmp$$cmpcode);
8787   %}
8788 
8789   ins_pipe(icond_reg);
8790 %}
8791 
8792 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8793   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8794 
8795   ins_cost(INSN_COST * 2);
8796   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
8797 
8798   ins_encode %{
8799     __ cselw(as_Register($dst$$reg),
8800              zr,
8801              as_Register($src$$reg),
8802              (Assembler::Condition)$cmp$$cmpcode);
8803   %}
8804 
8805   ins_pipe(icond_reg);
8806 %}
8807 
8808 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8809   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8810 
8811   ins_cost(INSN_COST * 2);
8812   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
8813 
8814   ins_encode %{
8815     __ cselw(as_Register($dst$$reg),
8816              zr,
8817              as_Register($src$$reg),
8818              (Assembler::Condition)$cmp$$cmpcode);
8819   %}
8820 
8821   ins_pipe(icond_reg);
8822 %}
8823 
8824 // special case for creating a boolean 0 or 1
8825 
8826 // n.b. this is selected in preference to the rule above because it
8827 // avoids loading constants 0 and 1 into a source register
8828 
8829 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8830   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8831 
8832   ins_cost(INSN_COST * 2);
8833   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
8834 
8835   ins_encode %{
8836     // equivalently
8837     // cset(as_Register($dst$$reg),
8838     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8839     __ csincw(as_Register($dst$$reg),
8840              zr,
8841              zr,
8842              (Assembler::Condition)$cmp$$cmpcode);
8843   %}
8844 
8845   ins_pipe(icond_none);
8846 %}
8847 
8848 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8849   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8850 
8851   ins_cost(INSN_COST * 2);
8852   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
8853 
8854   ins_encode %{
8855     // equivalently
8856     // cset(as_Register($dst$$reg),
8857     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8858     __ csincw(as_Register($dst$$reg),
8859              zr,
8860              zr,
8861              (Assembler::Condition)$cmp$$cmpcode);
8862   %}
8863 
8864   ins_pipe(icond_none);
8865 %}
8866 
8867 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8868   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8869 
8870   ins_cost(INSN_COST * 2);
8871   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
8872 
8873   ins_encode %{
8874     __ csel(as_Register($dst$$reg),
8875             as_Register($src2$$reg),
8876             as_Register($src1$$reg),
8877             (Assembler::Condition)$cmp$$cmpcode);
8878   %}
8879 
8880   ins_pipe(icond_reg_reg);
8881 %}
8882 
8883 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8884   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8885 
8886   ins_cost(INSN_COST * 2);
8887   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
8888 
8889   ins_encode %{
8890     __ csel(as_Register($dst$$reg),
8891             as_Register($src2$$reg),
8892             as_Register($src1$$reg),
8893             (Assembler::Condition)$cmp$$cmpcode);
8894   %}
8895 
8896   ins_pipe(icond_reg_reg);
8897 %}
8898 
8899 // special cases where one arg is zero
8900 
8901 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8902   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8903 
8904   ins_cost(INSN_COST * 2);
8905   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
8906 
8907   ins_encode %{
8908     __ csel(as_Register($dst$$reg),
8909             zr,
8910             as_Register($src$$reg),
8911             (Assembler::Condition)$cmp$$cmpcode);
8912   %}
8913 
8914   ins_pipe(icond_reg);
8915 %}
8916 
8917 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8918   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8919 
8920   ins_cost(INSN_COST * 2);
8921   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
8922 
8923   ins_encode %{
8924     __ csel(as_Register($dst$$reg),
8925             zr,
8926             as_Register($src$$reg),
8927             (Assembler::Condition)$cmp$$cmpcode);
8928   %}
8929 
8930   ins_pipe(icond_reg);
8931 %}
8932 
8933 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8934   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8935 
8936   ins_cost(INSN_COST * 2);
8937   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
8938 
8939   ins_encode %{
8940     __ csel(as_Register($dst$$reg),
8941             as_Register($src$$reg),
8942             zr,
8943             (Assembler::Condition)$cmp$$cmpcode);
8944   %}
8945 
8946   ins_pipe(icond_reg);
8947 %}
8948 
8949 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8950   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8951 
8952   ins_cost(INSN_COST * 2);
8953   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
8954 
8955   ins_encode %{
8956     __ csel(as_Register($dst$$reg),
8957             as_Register($src$$reg),
8958             zr,
8959             (Assembler::Condition)$cmp$$cmpcode);
8960   %}
8961 
8962   ins_pipe(icond_reg);
8963 %}
8964 
8965 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8966   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8967 
8968   ins_cost(INSN_COST * 2);
8969   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
8970 
8971   ins_encode %{
8972     __ csel(as_Register($dst$$reg),
8973             as_Register($src2$$reg),
8974             as_Register($src1$$reg),
8975             (Assembler::Condition)$cmp$$cmpcode);
8976   %}
8977 
8978   ins_pipe(icond_reg_reg);
8979 %}
8980 
8981 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8982   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8983 
8984   ins_cost(INSN_COST * 2);
8985   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
8986 
8987   ins_encode %{
8988     __ csel(as_Register($dst$$reg),
8989             as_Register($src2$$reg),
8990             as_Register($src1$$reg),
8991             (Assembler::Condition)$cmp$$cmpcode);
8992   %}
8993 
8994   ins_pipe(icond_reg_reg);
8995 %}
8996 
8997 // special cases where one arg is zero
8998 
8999 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9000   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9001 
9002   ins_cost(INSN_COST * 2);
9003   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9004 
9005   ins_encode %{
9006     __ csel(as_Register($dst$$reg),
9007             zr,
9008             as_Register($src$$reg),
9009             (Assembler::Condition)$cmp$$cmpcode);
9010   %}
9011 
9012   ins_pipe(icond_reg);
9013 %}
9014 
9015 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9016   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9017 
9018   ins_cost(INSN_COST * 2);
9019   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9020 
9021   ins_encode %{
9022     __ csel(as_Register($dst$$reg),
9023             zr,
9024             as_Register($src$$reg),
9025             (Assembler::Condition)$cmp$$cmpcode);
9026   %}
9027 
9028   ins_pipe(icond_reg);
9029 %}
9030 
9031 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9032   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9033 
9034   ins_cost(INSN_COST * 2);
9035   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9036 
9037   ins_encode %{
9038     __ csel(as_Register($dst$$reg),
9039             as_Register($src$$reg),
9040             zr,
9041             (Assembler::Condition)$cmp$$cmpcode);
9042   %}
9043 
9044   ins_pipe(icond_reg);
9045 %}
9046 
9047 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9048   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9049 
9050   ins_cost(INSN_COST * 2);
9051   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9052 
9053   ins_encode %{
9054     __ csel(as_Register($dst$$reg),
9055             as_Register($src$$reg),
9056             zr,
9057             (Assembler::Condition)$cmp$$cmpcode);
9058   %}
9059 
9060   ins_pipe(icond_reg);
9061 %}
9062 
9063 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9064   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9065 
9066   ins_cost(INSN_COST * 2);
9067   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9068 
9069   ins_encode %{
9070     __ cselw(as_Register($dst$$reg),
9071              as_Register($src2$$reg),
9072              as_Register($src1$$reg),
9073              (Assembler::Condition)$cmp$$cmpcode);
9074   %}
9075 
9076   ins_pipe(icond_reg_reg);
9077 %}
9078 
9079 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9080   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9081 
9082   ins_cost(INSN_COST * 2);
9083   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9084 
9085   ins_encode %{
9086     __ cselw(as_Register($dst$$reg),
9087              as_Register($src2$$reg),
9088              as_Register($src1$$reg),
9089              (Assembler::Condition)$cmp$$cmpcode);
9090   %}
9091 
9092   ins_pipe(icond_reg_reg);
9093 %}
9094 
9095 // special cases where one arg is zero
9096 
9097 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9098   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9099 
9100   ins_cost(INSN_COST * 2);
9101   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9102 
9103   ins_encode %{
9104     __ cselw(as_Register($dst$$reg),
9105              zr,
9106              as_Register($src$$reg),
9107              (Assembler::Condition)$cmp$$cmpcode);
9108   %}
9109 
9110   ins_pipe(icond_reg);
9111 %}
9112 
9113 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9114   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9115 
9116   ins_cost(INSN_COST * 2);
9117   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9118 
9119   ins_encode %{
9120     __ cselw(as_Register($dst$$reg),
9121              zr,
9122              as_Register($src$$reg),
9123              (Assembler::Condition)$cmp$$cmpcode);
9124   %}
9125 
9126   ins_pipe(icond_reg);
9127 %}
9128 
9129 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9130   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9131 
9132   ins_cost(INSN_COST * 2);
9133   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9134 
9135   ins_encode %{
9136     __ cselw(as_Register($dst$$reg),
9137              as_Register($src$$reg),
9138              zr,
9139              (Assembler::Condition)$cmp$$cmpcode);
9140   %}
9141 
9142   ins_pipe(icond_reg);
9143 %}
9144 
9145 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9146   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9147 
9148   ins_cost(INSN_COST * 2);
9149   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9150 
9151   ins_encode %{
9152     __ cselw(as_Register($dst$$reg),
9153              as_Register($src$$reg),
9154              zr,
9155              (Assembler::Condition)$cmp$$cmpcode);
9156   %}
9157 
9158   ins_pipe(icond_reg);
9159 %}
9160 
9161 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9162 %{
9163   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9164 
9165   ins_cost(INSN_COST * 3);
9166 
9167   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9168   ins_encode %{
9169     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9170     __ fcsels(as_FloatRegister($dst$$reg),
9171               as_FloatRegister($src2$$reg),
9172               as_FloatRegister($src1$$reg),
9173               cond);
9174   %}
9175 
9176   ins_pipe(fp_cond_reg_reg_s);
9177 %}
9178 
9179 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9180 %{
9181   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9182 
9183   ins_cost(INSN_COST * 3);
9184 
9185   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9186   ins_encode %{
9187     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9188     __ fcsels(as_FloatRegister($dst$$reg),
9189               as_FloatRegister($src2$$reg),
9190               as_FloatRegister($src1$$reg),
9191               cond);
9192   %}
9193 
9194   ins_pipe(fp_cond_reg_reg_s);
9195 %}
9196 
9197 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9198 %{
9199   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9200 
9201   ins_cost(INSN_COST * 3);
9202 
9203   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9204   ins_encode %{
9205     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9206     __ fcseld(as_FloatRegister($dst$$reg),
9207               as_FloatRegister($src2$$reg),
9208               as_FloatRegister($src1$$reg),
9209               cond);
9210   %}
9211 
9212   ins_pipe(fp_cond_reg_reg_d);
9213 %}
9214 
9215 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9216 %{
9217   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9218 
9219   ins_cost(INSN_COST * 3);
9220 
9221   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9222   ins_encode %{
9223     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9224     __ fcseld(as_FloatRegister($dst$$reg),
9225               as_FloatRegister($src2$$reg),
9226               as_FloatRegister($src1$$reg),
9227               cond);
9228   %}
9229 
9230   ins_pipe(fp_cond_reg_reg_d);
9231 %}
9232 
9233 // ============================================================================
9234 // Arithmetic Instructions
9235 //
9236 
9237 // Integer Addition
9238 
9239 // TODO
9240 // these currently employ operations which do not set CR and hence are
9241 // not flagged as killing CR but we would like to isolate the cases
9242 // where we want to set flags from those where we don't. need to work
9243 // out how to do that.
9244 
9245 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9246   match(Set dst (AddI src1 src2));
9247 
9248   ins_cost(INSN_COST);
9249   format %{ "addw  $dst, $src1, $src2" %}
9250 
9251   ins_encode %{
9252     __ addw(as_Register($dst$$reg),
9253             as_Register($src1$$reg),
9254             as_Register($src2$$reg));
9255   %}
9256 
9257   ins_pipe(ialu_reg_reg);
9258 %}
9259 
9260 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9261   match(Set dst (AddI src1 src2));
9262 
9263   ins_cost(INSN_COST);
9264   format %{ "addw $dst, $src1, $src2" %}
9265 
9266   // use opcode to indicate that this is an add not a sub
9267   opcode(0x0);
9268 
9269   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9270 
9271   ins_pipe(ialu_reg_imm);
9272 %}
9273 
9274 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9275   match(Set dst (AddI (ConvL2I src1) src2));
9276 
9277   ins_cost(INSN_COST);
9278   format %{ "addw $dst, $src1, $src2" %}
9279 
9280   // use opcode to indicate that this is an add not a sub
9281   opcode(0x0);
9282 
9283   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9284 
9285   ins_pipe(ialu_reg_imm);
9286 %}
9287 
9288 // Pointer Addition
9289 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9290   match(Set dst (AddP src1 src2));
9291 
9292   ins_cost(INSN_COST);
9293   format %{ "add $dst, $src1, $src2\t# ptr" %}
9294 
9295   ins_encode %{
9296     __ add(as_Register($dst$$reg),
9297            as_Register($src1$$reg),
9298            as_Register($src2$$reg));
9299   %}
9300 
9301   ins_pipe(ialu_reg_reg);
9302 %}
9303 
9304 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9305   match(Set dst (AddP src1 (ConvI2L src2)));
9306 
9307   ins_cost(1.9 * INSN_COST);
9308   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9309 
9310   ins_encode %{
9311     __ add(as_Register($dst$$reg),
9312            as_Register($src1$$reg),
9313            as_Register($src2$$reg), ext::sxtw);
9314   %}
9315 
9316   ins_pipe(ialu_reg_reg);
9317 %}
9318 
9319 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9320   match(Set dst (AddP src1 (LShiftL src2 scale)));
9321 
9322   ins_cost(1.9 * INSN_COST);
9323   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9324 
9325   ins_encode %{
9326     __ lea(as_Register($dst$$reg),
9327            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9328                    Address::lsl($scale$$constant)));
9329   %}
9330 
9331   ins_pipe(ialu_reg_reg_shift);
9332 %}
9333 
9334 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9335   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9336 
9337   ins_cost(1.9 * INSN_COST);
9338   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9339 
9340   ins_encode %{
9341     __ lea(as_Register($dst$$reg),
9342            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9343                    Address::sxtw($scale$$constant)));
9344   %}
9345 
9346   ins_pipe(ialu_reg_reg_shift);
9347 %}
9348 
9349 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9350   match(Set dst (LShiftL (ConvI2L src) scale));
9351 
9352   ins_cost(INSN_COST);
9353   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9354 
9355   ins_encode %{
9356     __ sbfiz(as_Register($dst$$reg),
9357           as_Register($src$$reg),
9358           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9359   %}
9360 
9361   ins_pipe(ialu_reg_shift);
9362 %}
9363 
9364 // Pointer Immediate Addition
9365 // n.b. this needs to be more expensive than using an indirect memory
9366 // operand
9367 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9368   match(Set dst (AddP src1 src2));
9369 
9370   ins_cost(INSN_COST);
9371   format %{ "add $dst, $src1, $src2\t# ptr" %}
9372 
9373   // use opcode to indicate that this is an add not a sub
9374   opcode(0x0);
9375 
9376   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9377 
9378   ins_pipe(ialu_reg_imm);
9379 %}
9380 
9381 // Long Addition
9382 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9383 
9384   match(Set dst (AddL src1 src2));
9385 
9386   ins_cost(INSN_COST);
9387   format %{ "add  $dst, $src1, $src2" %}
9388 
9389   ins_encode %{
9390     __ add(as_Register($dst$$reg),
9391            as_Register($src1$$reg),
9392            as_Register($src2$$reg));
9393   %}
9394 
9395   ins_pipe(ialu_reg_reg);
9396 %}
9397 
9398 // No constant pool entries requiredLong Immediate Addition.
9399 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9400   match(Set dst (AddL src1 src2));
9401 
9402   ins_cost(INSN_COST);
9403   format %{ "add $dst, $src1, $src2" %}
9404 
9405   // use opcode to indicate that this is an add not a sub
9406   opcode(0x0);
9407 
9408   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9409 
9410   ins_pipe(ialu_reg_imm);
9411 %}
9412 
9413 // Integer Subtraction
9414 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9415   match(Set dst (SubI src1 src2));
9416 
9417   ins_cost(INSN_COST);
9418   format %{ "subw  $dst, $src1, $src2" %}
9419 
9420   ins_encode %{
9421     __ subw(as_Register($dst$$reg),
9422             as_Register($src1$$reg),
9423             as_Register($src2$$reg));
9424   %}
9425 
9426   ins_pipe(ialu_reg_reg);
9427 %}
9428 
9429 // Immediate Subtraction
9430 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9431   match(Set dst (SubI src1 src2));
9432 
9433   ins_cost(INSN_COST);
9434   format %{ "subw $dst, $src1, $src2" %}
9435 
9436   // use opcode to indicate that this is a sub not an add
9437   opcode(0x1);
9438 
9439   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9440 
9441   ins_pipe(ialu_reg_imm);
9442 %}
9443 
9444 // Long Subtraction
9445 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9446 
9447   match(Set dst (SubL src1 src2));
9448 
9449   ins_cost(INSN_COST);
9450   format %{ "sub  $dst, $src1, $src2" %}
9451 
9452   ins_encode %{
9453     __ sub(as_Register($dst$$reg),
9454            as_Register($src1$$reg),
9455            as_Register($src2$$reg));
9456   %}
9457 
9458   ins_pipe(ialu_reg_reg);
9459 %}
9460 
9461 // No constant pool entries requiredLong Immediate Subtraction.
9462 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9463   match(Set dst (SubL src1 src2));
9464 
9465   ins_cost(INSN_COST);
9466   format %{ "sub$dst, $src1, $src2" %}
9467 
9468   // use opcode to indicate that this is a sub not an add
9469   opcode(0x1);
9470 
9471   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9472 
9473   ins_pipe(ialu_reg_imm);
9474 %}
9475 
9476 // Integer Negation (special case for sub)
9477 
9478 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9479   match(Set dst (SubI zero src));
9480 
9481   ins_cost(INSN_COST);
9482   format %{ "negw $dst, $src\t# int" %}
9483 
9484   ins_encode %{
9485     __ negw(as_Register($dst$$reg),
9486             as_Register($src$$reg));
9487   %}
9488 
9489   ins_pipe(ialu_reg);
9490 %}
9491 
9492 // Long Negation
9493 
9494 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
9495   match(Set dst (SubL zero src));
9496 
9497   ins_cost(INSN_COST);
9498   format %{ "neg $dst, $src\t# long" %}
9499 
9500   ins_encode %{
9501     __ neg(as_Register($dst$$reg),
9502            as_Register($src$$reg));
9503   %}
9504 
9505   ins_pipe(ialu_reg);
9506 %}
9507 
9508 // Integer Multiply
9509 
9510 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9511   match(Set dst (MulI src1 src2));
9512 
9513   ins_cost(INSN_COST * 3);
9514   format %{ "mulw  $dst, $src1, $src2" %}
9515 
9516   ins_encode %{
9517     __ mulw(as_Register($dst$$reg),
9518             as_Register($src1$$reg),
9519             as_Register($src2$$reg));
9520   %}
9521 
9522   ins_pipe(imul_reg_reg);
9523 %}
9524 
9525 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9526   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9527 
9528   ins_cost(INSN_COST * 3);
9529   format %{ "smull  $dst, $src1, $src2" %}
9530 
9531   ins_encode %{
9532     __ smull(as_Register($dst$$reg),
9533              as_Register($src1$$reg),
9534              as_Register($src2$$reg));
9535   %}
9536 
9537   ins_pipe(imul_reg_reg);
9538 %}
9539 
9540 // Long Multiply
9541 
9542 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9543   match(Set dst (MulL src1 src2));
9544 
9545   ins_cost(INSN_COST * 5);
9546   format %{ "mul  $dst, $src1, $src2" %}
9547 
9548   ins_encode %{
9549     __ mul(as_Register($dst$$reg),
9550            as_Register($src1$$reg),
9551            as_Register($src2$$reg));
9552   %}
9553 
9554   ins_pipe(lmul_reg_reg);
9555 %}
9556 
9557 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9558 %{
9559   match(Set dst (MulHiL src1 src2));
9560 
9561   ins_cost(INSN_COST * 7);
9562   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9563 
9564   ins_encode %{
9565     __ smulh(as_Register($dst$$reg),
9566              as_Register($src1$$reg),
9567              as_Register($src2$$reg));
9568   %}
9569 
9570   ins_pipe(lmul_reg_reg);
9571 %}
9572 
9573 // Combined Integer Multiply & Add/Sub
9574 
9575 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9576   match(Set dst (AddI src3 (MulI src1 src2)));
9577 
9578   ins_cost(INSN_COST * 3);
9579   format %{ "madd  $dst, $src1, $src2, $src3" %}
9580 
9581   ins_encode %{
9582     __ maddw(as_Register($dst$$reg),
9583              as_Register($src1$$reg),
9584              as_Register($src2$$reg),
9585              as_Register($src3$$reg));
9586   %}
9587 
9588   ins_pipe(imac_reg_reg);
9589 %}
9590 
9591 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9592   match(Set dst (SubI src3 (MulI src1 src2)));
9593 
9594   ins_cost(INSN_COST * 3);
9595   format %{ "msub  $dst, $src1, $src2, $src3" %}
9596 
9597   ins_encode %{
9598     __ msubw(as_Register($dst$$reg),
9599              as_Register($src1$$reg),
9600              as_Register($src2$$reg),
9601              as_Register($src3$$reg));
9602   %}
9603 
9604   ins_pipe(imac_reg_reg);
9605 %}
9606 
9607 // Combined Integer Multiply & Neg
9608 
9609 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
9610   match(Set dst (MulI (SubI zero src1) src2));
9611   match(Set dst (MulI src1 (SubI zero src2)));
9612 
9613   ins_cost(INSN_COST * 3);
9614   format %{ "mneg  $dst, $src1, $src2" %}
9615 
9616   ins_encode %{
9617     __ mnegw(as_Register($dst$$reg),
9618              as_Register($src1$$reg),
9619              as_Register($src2$$reg));
9620   %}
9621 
9622   ins_pipe(imac_reg_reg);
9623 %}
9624 
9625 // Combined Long Multiply & Add/Sub
9626 
9627 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9628   match(Set dst (AddL src3 (MulL src1 src2)));
9629 
9630   ins_cost(INSN_COST * 5);
9631   format %{ "madd  $dst, $src1, $src2, $src3" %}
9632 
9633   ins_encode %{
9634     __ madd(as_Register($dst$$reg),
9635             as_Register($src1$$reg),
9636             as_Register($src2$$reg),
9637             as_Register($src3$$reg));
9638   %}
9639 
9640   ins_pipe(lmac_reg_reg);
9641 %}
9642 
9643 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9644   match(Set dst (SubL src3 (MulL src1 src2)));
9645 
9646   ins_cost(INSN_COST * 5);
9647   format %{ "msub  $dst, $src1, $src2, $src3" %}
9648 
9649   ins_encode %{
9650     __ msub(as_Register($dst$$reg),
9651             as_Register($src1$$reg),
9652             as_Register($src2$$reg),
9653             as_Register($src3$$reg));
9654   %}
9655 
9656   ins_pipe(lmac_reg_reg);
9657 %}
9658 
9659 // Combined Long Multiply & Neg
9660 
9661 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
9662   match(Set dst (MulL (SubL zero src1) src2));
9663   match(Set dst (MulL src1 (SubL zero src2)));
9664 
9665   ins_cost(INSN_COST * 5);
9666   format %{ "mneg  $dst, $src1, $src2" %}
9667 
9668   ins_encode %{
9669     __ mneg(as_Register($dst$$reg),
9670             as_Register($src1$$reg),
9671             as_Register($src2$$reg));
9672   %}
9673 
9674   ins_pipe(lmac_reg_reg);
9675 %}
9676 
9677 // Integer Divide
9678 
9679 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9680   match(Set dst (DivI src1 src2));
9681 
9682   ins_cost(INSN_COST * 19);
9683   format %{ "sdivw  $dst, $src1, $src2" %}
9684 
9685   ins_encode(aarch64_enc_divw(dst, src1, src2));
9686   ins_pipe(idiv_reg_reg);
9687 %}
9688 
9689 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
9690   match(Set dst (URShiftI (RShiftI src1 div1) div2));
9691   ins_cost(INSN_COST);
9692   format %{ "lsrw $dst, $src1, $div1" %}
9693   ins_encode %{
9694     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
9695   %}
9696   ins_pipe(ialu_reg_shift);
9697 %}
9698 
9699 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
9700   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
9701   ins_cost(INSN_COST);
9702   format %{ "addw $dst, $src, LSR $div1" %}
9703 
9704   ins_encode %{
9705     __ addw(as_Register($dst$$reg),
9706               as_Register($src$$reg),
9707               as_Register($src$$reg),
9708               Assembler::LSR, 31);
9709   %}
9710   ins_pipe(ialu_reg);
9711 %}
9712 
9713 // Long Divide
9714 
9715 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9716   match(Set dst (DivL src1 src2));
9717 
9718   ins_cost(INSN_COST * 35);
9719   format %{ "sdiv   $dst, $src1, $src2" %}
9720 
9721   ins_encode(aarch64_enc_div(dst, src1, src2));
9722   ins_pipe(ldiv_reg_reg);
9723 %}
9724 
9725 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
9726   match(Set dst (URShiftL (RShiftL src1 div1) div2));
9727   ins_cost(INSN_COST);
9728   format %{ "lsr $dst, $src1, $div1" %}
9729   ins_encode %{
9730     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
9731   %}
9732   ins_pipe(ialu_reg_shift);
9733 %}
9734 
9735 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
9736   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
9737   ins_cost(INSN_COST);
9738   format %{ "add $dst, $src, $div1" %}
9739 
9740   ins_encode %{
9741     __ add(as_Register($dst$$reg),
9742               as_Register($src$$reg),
9743               as_Register($src$$reg),
9744               Assembler::LSR, 63);
9745   %}
9746   ins_pipe(ialu_reg);
9747 %}
9748 
9749 // Integer Remainder
9750 
9751 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9752   match(Set dst (ModI src1 src2));
9753 
9754   ins_cost(INSN_COST * 22);
9755   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
9756             "msubw($dst, rscratch1, $src2, $src1" %}
9757 
9758   ins_encode(aarch64_enc_modw(dst, src1, src2));
9759   ins_pipe(idiv_reg_reg);
9760 %}
9761 
9762 // Long Remainder
9763 
9764 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9765   match(Set dst (ModL src1 src2));
9766 
9767   ins_cost(INSN_COST * 38);
9768   format %{ "sdiv   rscratch1, $src1, $src2\n"
9769             "msub($dst, rscratch1, $src2, $src1" %}
9770 
9771   ins_encode(aarch64_enc_mod(dst, src1, src2));
9772   ins_pipe(ldiv_reg_reg);
9773 %}
9774 
9775 // Integer Shifts
9776 
9777 // Shift Left Register
9778 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9779   match(Set dst (LShiftI src1 src2));
9780 
9781   ins_cost(INSN_COST * 2);
9782   format %{ "lslvw  $dst, $src1, $src2" %}
9783 
9784   ins_encode %{
9785     __ lslvw(as_Register($dst$$reg),
9786              as_Register($src1$$reg),
9787              as_Register($src2$$reg));
9788   %}
9789 
9790   ins_pipe(ialu_reg_reg_vshift);
9791 %}
9792 
9793 // Shift Left Immediate
9794 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9795   match(Set dst (LShiftI src1 src2));
9796 
9797   ins_cost(INSN_COST);
9798   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
9799 
9800   ins_encode %{
9801     __ lslw(as_Register($dst$$reg),
9802             as_Register($src1$$reg),
9803             $src2$$constant & 0x1f);
9804   %}
9805 
9806   ins_pipe(ialu_reg_shift);
9807 %}
9808 
9809 // Shift Right Logical Register
9810 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9811   match(Set dst (URShiftI src1 src2));
9812 
9813   ins_cost(INSN_COST * 2);
9814   format %{ "lsrvw  $dst, $src1, $src2" %}
9815 
9816   ins_encode %{
9817     __ lsrvw(as_Register($dst$$reg),
9818              as_Register($src1$$reg),
9819              as_Register($src2$$reg));
9820   %}
9821 
9822   ins_pipe(ialu_reg_reg_vshift);
9823 %}
9824 
9825 // Shift Right Logical Immediate
9826 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9827   match(Set dst (URShiftI src1 src2));
9828 
9829   ins_cost(INSN_COST);
9830   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
9831 
9832   ins_encode %{
9833     __ lsrw(as_Register($dst$$reg),
9834             as_Register($src1$$reg),
9835             $src2$$constant & 0x1f);
9836   %}
9837 
9838   ins_pipe(ialu_reg_shift);
9839 %}
9840 
9841 // Shift Right Arithmetic Register
9842 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9843   match(Set dst (RShiftI src1 src2));
9844 
9845   ins_cost(INSN_COST * 2);
9846   format %{ "asrvw  $dst, $src1, $src2" %}
9847 
9848   ins_encode %{
9849     __ asrvw(as_Register($dst$$reg),
9850              as_Register($src1$$reg),
9851              as_Register($src2$$reg));
9852   %}
9853 
9854   ins_pipe(ialu_reg_reg_vshift);
9855 %}
9856 
9857 // Shift Right Arithmetic Immediate
9858 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9859   match(Set dst (RShiftI src1 src2));
9860 
9861   ins_cost(INSN_COST);
9862   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
9863 
9864   ins_encode %{
9865     __ asrw(as_Register($dst$$reg),
9866             as_Register($src1$$reg),
9867             $src2$$constant & 0x1f);
9868   %}
9869 
9870   ins_pipe(ialu_reg_shift);
9871 %}
9872 
9873 // Combined Int Mask and Right Shift (using UBFM)
9874 // TODO
9875 
9876 // Long Shifts
9877 
9878 // Shift Left Register
9879 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9880   match(Set dst (LShiftL src1 src2));
9881 
9882   ins_cost(INSN_COST * 2);
9883   format %{ "lslv  $dst, $src1, $src2" %}
9884 
9885   ins_encode %{
9886     __ lslv(as_Register($dst$$reg),
9887             as_Register($src1$$reg),
9888             as_Register($src2$$reg));
9889   %}
9890 
9891   ins_pipe(ialu_reg_reg_vshift);
9892 %}
9893 
9894 // Shift Left Immediate
9895 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9896   match(Set dst (LShiftL src1 src2));
9897 
9898   ins_cost(INSN_COST);
9899   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
9900 
9901   ins_encode %{
9902     __ lsl(as_Register($dst$$reg),
9903             as_Register($src1$$reg),
9904             $src2$$constant & 0x3f);
9905   %}
9906 
9907   ins_pipe(ialu_reg_shift);
9908 %}
9909 
9910 // Shift Right Logical Register
9911 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9912   match(Set dst (URShiftL src1 src2));
9913 
9914   ins_cost(INSN_COST * 2);
9915   format %{ "lsrv  $dst, $src1, $src2" %}
9916 
9917   ins_encode %{
9918     __ lsrv(as_Register($dst$$reg),
9919             as_Register($src1$$reg),
9920             as_Register($src2$$reg));
9921   %}
9922 
9923   ins_pipe(ialu_reg_reg_vshift);
9924 %}
9925 
9926 // Shift Right Logical Immediate
9927 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9928   match(Set dst (URShiftL src1 src2));
9929 
9930   ins_cost(INSN_COST);
9931   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
9932 
9933   ins_encode %{
9934     __ lsr(as_Register($dst$$reg),
9935            as_Register($src1$$reg),
9936            $src2$$constant & 0x3f);
9937   %}
9938 
9939   ins_pipe(ialu_reg_shift);
9940 %}
9941 
9942 // A special-case pattern for card table stores.
9943 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
9944   match(Set dst (URShiftL (CastP2X src1) src2));
9945 
9946   ins_cost(INSN_COST);
9947   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
9948 
9949   ins_encode %{
9950     __ lsr(as_Register($dst$$reg),
9951            as_Register($src1$$reg),
9952            $src2$$constant & 0x3f);
9953   %}
9954 
9955   ins_pipe(ialu_reg_shift);
9956 %}
9957 
9958 // Shift Right Arithmetic Register
9959 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9960   match(Set dst (RShiftL src1 src2));
9961 
9962   ins_cost(INSN_COST * 2);
9963   format %{ "asrv  $dst, $src1, $src2" %}
9964 
9965   ins_encode %{
9966     __ asrv(as_Register($dst$$reg),
9967             as_Register($src1$$reg),
9968             as_Register($src2$$reg));
9969   %}
9970 
9971   ins_pipe(ialu_reg_reg_vshift);
9972 %}
9973 
9974 // Shift Right Arithmetic Immediate
9975 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9976   match(Set dst (RShiftL src1 src2));
9977 
9978   ins_cost(INSN_COST);
9979   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
9980 
9981   ins_encode %{
9982     __ asr(as_Register($dst$$reg),
9983            as_Register($src1$$reg),
9984            $src2$$constant & 0x3f);
9985   %}
9986 
9987   ins_pipe(ialu_reg_shift);
9988 %}
9989 
9990 // BEGIN This section of the file is automatically generated. Do not edit --------------
9991 
9992 instruct regL_not_reg(iRegLNoSp dst,
9993                          iRegL src1, immL_M1 m1,
9994                          rFlagsReg cr) %{
9995   match(Set dst (XorL src1 m1));
9996   ins_cost(INSN_COST);
9997   format %{ "eon  $dst, $src1, zr" %}
9998 
9999   ins_encode %{
10000     __ eon(as_Register($dst$$reg),
10001               as_Register($src1$$reg),
10002               zr,
10003               Assembler::LSL, 0);
10004   %}
10005 
10006   ins_pipe(ialu_reg);
10007 %}
10008 instruct regI_not_reg(iRegINoSp dst,
10009                          iRegIorL2I src1, immI_M1 m1,
10010                          rFlagsReg cr) %{
10011   match(Set dst (XorI src1 m1));
10012   ins_cost(INSN_COST);
10013   format %{ "eonw  $dst, $src1, zr" %}
10014 
10015   ins_encode %{
10016     __ eonw(as_Register($dst$$reg),
10017               as_Register($src1$$reg),
10018               zr,
10019               Assembler::LSL, 0);
10020   %}
10021 
10022   ins_pipe(ialu_reg);
10023 %}
10024 
10025 instruct AndI_reg_not_reg(iRegINoSp dst,
10026                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10027                          rFlagsReg cr) %{
10028   match(Set dst (AndI src1 (XorI src2 m1)));
10029   ins_cost(INSN_COST);
10030   format %{ "bicw  $dst, $src1, $src2" %}
10031 
10032   ins_encode %{
10033     __ bicw(as_Register($dst$$reg),
10034               as_Register($src1$$reg),
10035               as_Register($src2$$reg),
10036               Assembler::LSL, 0);
10037   %}
10038 
10039   ins_pipe(ialu_reg_reg);
10040 %}
10041 
10042 instruct AndL_reg_not_reg(iRegLNoSp dst,
10043                          iRegL src1, iRegL src2, immL_M1 m1,
10044                          rFlagsReg cr) %{
10045   match(Set dst (AndL src1 (XorL src2 m1)));
10046   ins_cost(INSN_COST);
10047   format %{ "bic  $dst, $src1, $src2" %}
10048 
10049   ins_encode %{
10050     __ bic(as_Register($dst$$reg),
10051               as_Register($src1$$reg),
10052               as_Register($src2$$reg),
10053               Assembler::LSL, 0);
10054   %}
10055 
10056   ins_pipe(ialu_reg_reg);
10057 %}
10058 
10059 instruct OrI_reg_not_reg(iRegINoSp dst,
10060                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10061                          rFlagsReg cr) %{
10062   match(Set dst (OrI src1 (XorI src2 m1)));
10063   ins_cost(INSN_COST);
10064   format %{ "ornw  $dst, $src1, $src2" %}
10065 
10066   ins_encode %{
10067     __ ornw(as_Register($dst$$reg),
10068               as_Register($src1$$reg),
10069               as_Register($src2$$reg),
10070               Assembler::LSL, 0);
10071   %}
10072 
10073   ins_pipe(ialu_reg_reg);
10074 %}
10075 
10076 instruct OrL_reg_not_reg(iRegLNoSp dst,
10077                          iRegL src1, iRegL src2, immL_M1 m1,
10078                          rFlagsReg cr) %{
10079   match(Set dst (OrL src1 (XorL src2 m1)));
10080   ins_cost(INSN_COST);
10081   format %{ "orn  $dst, $src1, $src2" %}
10082 
10083   ins_encode %{
10084     __ orn(as_Register($dst$$reg),
10085               as_Register($src1$$reg),
10086               as_Register($src2$$reg),
10087               Assembler::LSL, 0);
10088   %}
10089 
10090   ins_pipe(ialu_reg_reg);
10091 %}
10092 
10093 instruct XorI_reg_not_reg(iRegINoSp dst,
10094                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10095                          rFlagsReg cr) %{
10096   match(Set dst (XorI m1 (XorI src2 src1)));
10097   ins_cost(INSN_COST);
10098   format %{ "eonw  $dst, $src1, $src2" %}
10099 
10100   ins_encode %{
10101     __ eonw(as_Register($dst$$reg),
10102               as_Register($src1$$reg),
10103               as_Register($src2$$reg),
10104               Assembler::LSL, 0);
10105   %}
10106 
10107   ins_pipe(ialu_reg_reg);
10108 %}
10109 
10110 instruct XorL_reg_not_reg(iRegLNoSp dst,
10111                          iRegL src1, iRegL src2, immL_M1 m1,
10112                          rFlagsReg cr) %{
10113   match(Set dst (XorL m1 (XorL src2 src1)));
10114   ins_cost(INSN_COST);
10115   format %{ "eon  $dst, $src1, $src2" %}
10116 
10117   ins_encode %{
10118     __ eon(as_Register($dst$$reg),
10119               as_Register($src1$$reg),
10120               as_Register($src2$$reg),
10121               Assembler::LSL, 0);
10122   %}
10123 
10124   ins_pipe(ialu_reg_reg);
10125 %}
10126 
10127 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10128                          iRegIorL2I src1, iRegIorL2I src2,
10129                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10130   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10131   ins_cost(1.9 * INSN_COST);
10132   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10133 
10134   ins_encode %{
10135     __ bicw(as_Register($dst$$reg),
10136               as_Register($src1$$reg),
10137               as_Register($src2$$reg),
10138               Assembler::LSR,
10139               $src3$$constant & 0x1f);
10140   %}
10141 
10142   ins_pipe(ialu_reg_reg_shift);
10143 %}
10144 
10145 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10146                          iRegL src1, iRegL src2,
10147                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10148   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10149   ins_cost(1.9 * INSN_COST);
10150   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10151 
10152   ins_encode %{
10153     __ bic(as_Register($dst$$reg),
10154               as_Register($src1$$reg),
10155               as_Register($src2$$reg),
10156               Assembler::LSR,
10157               $src3$$constant & 0x3f);
10158   %}
10159 
10160   ins_pipe(ialu_reg_reg_shift);
10161 %}
10162 
10163 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10164                          iRegIorL2I src1, iRegIorL2I src2,
10165                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10166   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10167   ins_cost(1.9 * INSN_COST);
10168   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10169 
10170   ins_encode %{
10171     __ bicw(as_Register($dst$$reg),
10172               as_Register($src1$$reg),
10173               as_Register($src2$$reg),
10174               Assembler::ASR,
10175               $src3$$constant & 0x1f);
10176   %}
10177 
10178   ins_pipe(ialu_reg_reg_shift);
10179 %}
10180 
10181 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10182                          iRegL src1, iRegL src2,
10183                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10184   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10185   ins_cost(1.9 * INSN_COST);
10186   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10187 
10188   ins_encode %{
10189     __ bic(as_Register($dst$$reg),
10190               as_Register($src1$$reg),
10191               as_Register($src2$$reg),
10192               Assembler::ASR,
10193               $src3$$constant & 0x3f);
10194   %}
10195 
10196   ins_pipe(ialu_reg_reg_shift);
10197 %}
10198 
10199 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10200                          iRegIorL2I src1, iRegIorL2I src2,
10201                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10202   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10203   ins_cost(1.9 * INSN_COST);
10204   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10205 
10206   ins_encode %{
10207     __ bicw(as_Register($dst$$reg),
10208               as_Register($src1$$reg),
10209               as_Register($src2$$reg),
10210               Assembler::LSL,
10211               $src3$$constant & 0x1f);
10212   %}
10213 
10214   ins_pipe(ialu_reg_reg_shift);
10215 %}
10216 
10217 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10218                          iRegL src1, iRegL src2,
10219                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10220   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10221   ins_cost(1.9 * INSN_COST);
10222   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10223 
10224   ins_encode %{
10225     __ bic(as_Register($dst$$reg),
10226               as_Register($src1$$reg),
10227               as_Register($src2$$reg),
10228               Assembler::LSL,
10229               $src3$$constant & 0x3f);
10230   %}
10231 
10232   ins_pipe(ialu_reg_reg_shift);
10233 %}
10234 
10235 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10236                          iRegIorL2I src1, iRegIorL2I src2,
10237                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10238   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10239   ins_cost(1.9 * INSN_COST);
10240   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10241 
10242   ins_encode %{
10243     __ eonw(as_Register($dst$$reg),
10244               as_Register($src1$$reg),
10245               as_Register($src2$$reg),
10246               Assembler::LSR,
10247               $src3$$constant & 0x1f);
10248   %}
10249 
10250   ins_pipe(ialu_reg_reg_shift);
10251 %}
10252 
10253 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10254                          iRegL src1, iRegL src2,
10255                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10256   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10257   ins_cost(1.9 * INSN_COST);
10258   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10259 
10260   ins_encode %{
10261     __ eon(as_Register($dst$$reg),
10262               as_Register($src1$$reg),
10263               as_Register($src2$$reg),
10264               Assembler::LSR,
10265               $src3$$constant & 0x3f);
10266   %}
10267 
10268   ins_pipe(ialu_reg_reg_shift);
10269 %}
10270 
10271 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10272                          iRegIorL2I src1, iRegIorL2I src2,
10273                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10274   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10275   ins_cost(1.9 * INSN_COST);
10276   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10277 
10278   ins_encode %{
10279     __ eonw(as_Register($dst$$reg),
10280               as_Register($src1$$reg),
10281               as_Register($src2$$reg),
10282               Assembler::ASR,
10283               $src3$$constant & 0x1f);
10284   %}
10285 
10286   ins_pipe(ialu_reg_reg_shift);
10287 %}
10288 
10289 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10290                          iRegL src1, iRegL src2,
10291                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10292   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10293   ins_cost(1.9 * INSN_COST);
10294   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10295 
10296   ins_encode %{
10297     __ eon(as_Register($dst$$reg),
10298               as_Register($src1$$reg),
10299               as_Register($src2$$reg),
10300               Assembler::ASR,
10301               $src3$$constant & 0x3f);
10302   %}
10303 
10304   ins_pipe(ialu_reg_reg_shift);
10305 %}
10306 
10307 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10308                          iRegIorL2I src1, iRegIorL2I src2,
10309                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10310   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10311   ins_cost(1.9 * INSN_COST);
10312   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10313 
10314   ins_encode %{
10315     __ eonw(as_Register($dst$$reg),
10316               as_Register($src1$$reg),
10317               as_Register($src2$$reg),
10318               Assembler::LSL,
10319               $src3$$constant & 0x1f);
10320   %}
10321 
10322   ins_pipe(ialu_reg_reg_shift);
10323 %}
10324 
10325 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10326                          iRegL src1, iRegL src2,
10327                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10328   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10329   ins_cost(1.9 * INSN_COST);
10330   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10331 
10332   ins_encode %{
10333     __ eon(as_Register($dst$$reg),
10334               as_Register($src1$$reg),
10335               as_Register($src2$$reg),
10336               Assembler::LSL,
10337               $src3$$constant & 0x3f);
10338   %}
10339 
10340   ins_pipe(ialu_reg_reg_shift);
10341 %}
10342 
10343 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10344                          iRegIorL2I src1, iRegIorL2I src2,
10345                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10346   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10347   ins_cost(1.9 * INSN_COST);
10348   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10349 
10350   ins_encode %{
10351     __ ornw(as_Register($dst$$reg),
10352               as_Register($src1$$reg),
10353               as_Register($src2$$reg),
10354               Assembler::LSR,
10355               $src3$$constant & 0x1f);
10356   %}
10357 
10358   ins_pipe(ialu_reg_reg_shift);
10359 %}
10360 
10361 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10362                          iRegL src1, iRegL src2,
10363                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10364   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10365   ins_cost(1.9 * INSN_COST);
10366   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10367 
10368   ins_encode %{
10369     __ orn(as_Register($dst$$reg),
10370               as_Register($src1$$reg),
10371               as_Register($src2$$reg),
10372               Assembler::LSR,
10373               $src3$$constant & 0x3f);
10374   %}
10375 
10376   ins_pipe(ialu_reg_reg_shift);
10377 %}
10378 
10379 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10380                          iRegIorL2I src1, iRegIorL2I src2,
10381                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10382   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10383   ins_cost(1.9 * INSN_COST);
10384   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10385 
10386   ins_encode %{
10387     __ ornw(as_Register($dst$$reg),
10388               as_Register($src1$$reg),
10389               as_Register($src2$$reg),
10390               Assembler::ASR,
10391               $src3$$constant & 0x1f);
10392   %}
10393 
10394   ins_pipe(ialu_reg_reg_shift);
10395 %}
10396 
10397 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10398                          iRegL src1, iRegL src2,
10399                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10400   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10401   ins_cost(1.9 * INSN_COST);
10402   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10403 
10404   ins_encode %{
10405     __ orn(as_Register($dst$$reg),
10406               as_Register($src1$$reg),
10407               as_Register($src2$$reg),
10408               Assembler::ASR,
10409               $src3$$constant & 0x3f);
10410   %}
10411 
10412   ins_pipe(ialu_reg_reg_shift);
10413 %}
10414 
10415 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10416                          iRegIorL2I src1, iRegIorL2I src2,
10417                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10418   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10419   ins_cost(1.9 * INSN_COST);
10420   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10421 
10422   ins_encode %{
10423     __ ornw(as_Register($dst$$reg),
10424               as_Register($src1$$reg),
10425               as_Register($src2$$reg),
10426               Assembler::LSL,
10427               $src3$$constant & 0x1f);
10428   %}
10429 
10430   ins_pipe(ialu_reg_reg_shift);
10431 %}
10432 
10433 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10434                          iRegL src1, iRegL src2,
10435                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10436   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10437   ins_cost(1.9 * INSN_COST);
10438   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10439 
10440   ins_encode %{
10441     __ orn(as_Register($dst$$reg),
10442               as_Register($src1$$reg),
10443               as_Register($src2$$reg),
10444               Assembler::LSL,
10445               $src3$$constant & 0x3f);
10446   %}
10447 
10448   ins_pipe(ialu_reg_reg_shift);
10449 %}
10450 
10451 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10452                          iRegIorL2I src1, iRegIorL2I src2,
10453                          immI src3, rFlagsReg cr) %{
10454   match(Set dst (AndI src1 (URShiftI src2 src3)));
10455 
10456   ins_cost(1.9 * INSN_COST);
10457   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10458 
10459   ins_encode %{
10460     __ andw(as_Register($dst$$reg),
10461               as_Register($src1$$reg),
10462               as_Register($src2$$reg),
10463               Assembler::LSR,
10464               $src3$$constant & 0x1f);
10465   %}
10466 
10467   ins_pipe(ialu_reg_reg_shift);
10468 %}
10469 
10470 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10471                          iRegL src1, iRegL src2,
10472                          immI src3, rFlagsReg cr) %{
10473   match(Set dst (AndL src1 (URShiftL src2 src3)));
10474 
10475   ins_cost(1.9 * INSN_COST);
10476   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10477 
10478   ins_encode %{
10479     __ andr(as_Register($dst$$reg),
10480               as_Register($src1$$reg),
10481               as_Register($src2$$reg),
10482               Assembler::LSR,
10483               $src3$$constant & 0x3f);
10484   %}
10485 
10486   ins_pipe(ialu_reg_reg_shift);
10487 %}
10488 
10489 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10490                          iRegIorL2I src1, iRegIorL2I src2,
10491                          immI src3, rFlagsReg cr) %{
10492   match(Set dst (AndI src1 (RShiftI src2 src3)));
10493 
10494   ins_cost(1.9 * INSN_COST);
10495   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10496 
10497   ins_encode %{
10498     __ andw(as_Register($dst$$reg),
10499               as_Register($src1$$reg),
10500               as_Register($src2$$reg),
10501               Assembler::ASR,
10502               $src3$$constant & 0x1f);
10503   %}
10504 
10505   ins_pipe(ialu_reg_reg_shift);
10506 %}
10507 
10508 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10509                          iRegL src1, iRegL src2,
10510                          immI src3, rFlagsReg cr) %{
10511   match(Set dst (AndL src1 (RShiftL src2 src3)));
10512 
10513   ins_cost(1.9 * INSN_COST);
10514   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10515 
10516   ins_encode %{
10517     __ andr(as_Register($dst$$reg),
10518               as_Register($src1$$reg),
10519               as_Register($src2$$reg),
10520               Assembler::ASR,
10521               $src3$$constant & 0x3f);
10522   %}
10523 
10524   ins_pipe(ialu_reg_reg_shift);
10525 %}
10526 
10527 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10528                          iRegIorL2I src1, iRegIorL2I src2,
10529                          immI src3, rFlagsReg cr) %{
10530   match(Set dst (AndI src1 (LShiftI src2 src3)));
10531 
10532   ins_cost(1.9 * INSN_COST);
10533   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10534 
10535   ins_encode %{
10536     __ andw(as_Register($dst$$reg),
10537               as_Register($src1$$reg),
10538               as_Register($src2$$reg),
10539               Assembler::LSL,
10540               $src3$$constant & 0x1f);
10541   %}
10542 
10543   ins_pipe(ialu_reg_reg_shift);
10544 %}
10545 
10546 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10547                          iRegL src1, iRegL src2,
10548                          immI src3, rFlagsReg cr) %{
10549   match(Set dst (AndL src1 (LShiftL src2 src3)));
10550 
10551   ins_cost(1.9 * INSN_COST);
10552   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10553 
10554   ins_encode %{
10555     __ andr(as_Register($dst$$reg),
10556               as_Register($src1$$reg),
10557               as_Register($src2$$reg),
10558               Assembler::LSL,
10559               $src3$$constant & 0x3f);
10560   %}
10561 
10562   ins_pipe(ialu_reg_reg_shift);
10563 %}
10564 
10565 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10566                          iRegIorL2I src1, iRegIorL2I src2,
10567                          immI src3, rFlagsReg cr) %{
10568   match(Set dst (XorI src1 (URShiftI src2 src3)));
10569 
10570   ins_cost(1.9 * INSN_COST);
10571   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10572 
10573   ins_encode %{
10574     __ eorw(as_Register($dst$$reg),
10575               as_Register($src1$$reg),
10576               as_Register($src2$$reg),
10577               Assembler::LSR,
10578               $src3$$constant & 0x1f);
10579   %}
10580 
10581   ins_pipe(ialu_reg_reg_shift);
10582 %}
10583 
10584 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10585                          iRegL src1, iRegL src2,
10586                          immI src3, rFlagsReg cr) %{
10587   match(Set dst (XorL src1 (URShiftL src2 src3)));
10588 
10589   ins_cost(1.9 * INSN_COST);
10590   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10591 
10592   ins_encode %{
10593     __ eor(as_Register($dst$$reg),
10594               as_Register($src1$$reg),
10595               as_Register($src2$$reg),
10596               Assembler::LSR,
10597               $src3$$constant & 0x3f);
10598   %}
10599 
10600   ins_pipe(ialu_reg_reg_shift);
10601 %}
10602 
10603 instruct XorI_reg_RShift_reg(iRegINoSp dst,
10604                          iRegIorL2I src1, iRegIorL2I src2,
10605                          immI src3, rFlagsReg cr) %{
10606   match(Set dst (XorI src1 (RShiftI src2 src3)));
10607 
10608   ins_cost(1.9 * INSN_COST);
10609   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
10610 
10611   ins_encode %{
10612     __ eorw(as_Register($dst$$reg),
10613               as_Register($src1$$reg),
10614               as_Register($src2$$reg),
10615               Assembler::ASR,
10616               $src3$$constant & 0x1f);
10617   %}
10618 
10619   ins_pipe(ialu_reg_reg_shift);
10620 %}
10621 
10622 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
10623                          iRegL src1, iRegL src2,
10624                          immI src3, rFlagsReg cr) %{
10625   match(Set dst (XorL src1 (RShiftL src2 src3)));
10626 
10627   ins_cost(1.9 * INSN_COST);
10628   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
10629 
10630   ins_encode %{
10631     __ eor(as_Register($dst$$reg),
10632               as_Register($src1$$reg),
10633               as_Register($src2$$reg),
10634               Assembler::ASR,
10635               $src3$$constant & 0x3f);
10636   %}
10637 
10638   ins_pipe(ialu_reg_reg_shift);
10639 %}
10640 
10641 instruct XorI_reg_LShift_reg(iRegINoSp dst,
10642                          iRegIorL2I src1, iRegIorL2I src2,
10643                          immI src3, rFlagsReg cr) %{
10644   match(Set dst (XorI src1 (LShiftI src2 src3)));
10645 
10646   ins_cost(1.9 * INSN_COST);
10647   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
10648 
10649   ins_encode %{
10650     __ eorw(as_Register($dst$$reg),
10651               as_Register($src1$$reg),
10652               as_Register($src2$$reg),
10653               Assembler::LSL,
10654               $src3$$constant & 0x1f);
10655   %}
10656 
10657   ins_pipe(ialu_reg_reg_shift);
10658 %}
10659 
10660 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
10661                          iRegL src1, iRegL src2,
10662                          immI src3, rFlagsReg cr) %{
10663   match(Set dst (XorL src1 (LShiftL src2 src3)));
10664 
10665   ins_cost(1.9 * INSN_COST);
10666   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
10667 
10668   ins_encode %{
10669     __ eor(as_Register($dst$$reg),
10670               as_Register($src1$$reg),
10671               as_Register($src2$$reg),
10672               Assembler::LSL,
10673               $src3$$constant & 0x3f);
10674   %}
10675 
10676   ins_pipe(ialu_reg_reg_shift);
10677 %}
10678 
10679 instruct OrI_reg_URShift_reg(iRegINoSp dst,
10680                          iRegIorL2I src1, iRegIorL2I src2,
10681                          immI src3, rFlagsReg cr) %{
10682   match(Set dst (OrI src1 (URShiftI src2 src3)));
10683 
10684   ins_cost(1.9 * INSN_COST);
10685   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
10686 
10687   ins_encode %{
10688     __ orrw(as_Register($dst$$reg),
10689               as_Register($src1$$reg),
10690               as_Register($src2$$reg),
10691               Assembler::LSR,
10692               $src3$$constant & 0x1f);
10693   %}
10694 
10695   ins_pipe(ialu_reg_reg_shift);
10696 %}
10697 
10698 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
10699                          iRegL src1, iRegL src2,
10700                          immI src3, rFlagsReg cr) %{
10701   match(Set dst (OrL src1 (URShiftL src2 src3)));
10702 
10703   ins_cost(1.9 * INSN_COST);
10704   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
10705 
10706   ins_encode %{
10707     __ orr(as_Register($dst$$reg),
10708               as_Register($src1$$reg),
10709               as_Register($src2$$reg),
10710               Assembler::LSR,
10711               $src3$$constant & 0x3f);
10712   %}
10713 
10714   ins_pipe(ialu_reg_reg_shift);
10715 %}
10716 
10717 instruct OrI_reg_RShift_reg(iRegINoSp dst,
10718                          iRegIorL2I src1, iRegIorL2I src2,
10719                          immI src3, rFlagsReg cr) %{
10720   match(Set dst (OrI src1 (RShiftI src2 src3)));
10721 
10722   ins_cost(1.9 * INSN_COST);
10723   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
10724 
10725   ins_encode %{
10726     __ orrw(as_Register($dst$$reg),
10727               as_Register($src1$$reg),
10728               as_Register($src2$$reg),
10729               Assembler::ASR,
10730               $src3$$constant & 0x1f);
10731   %}
10732 
10733   ins_pipe(ialu_reg_reg_shift);
10734 %}
10735 
10736 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
10737                          iRegL src1, iRegL src2,
10738                          immI src3, rFlagsReg cr) %{
10739   match(Set dst (OrL src1 (RShiftL src2 src3)));
10740 
10741   ins_cost(1.9 * INSN_COST);
10742   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
10743 
10744   ins_encode %{
10745     __ orr(as_Register($dst$$reg),
10746               as_Register($src1$$reg),
10747               as_Register($src2$$reg),
10748               Assembler::ASR,
10749               $src3$$constant & 0x3f);
10750   %}
10751 
10752   ins_pipe(ialu_reg_reg_shift);
10753 %}
10754 
10755 instruct OrI_reg_LShift_reg(iRegINoSp dst,
10756                          iRegIorL2I src1, iRegIorL2I src2,
10757                          immI src3, rFlagsReg cr) %{
10758   match(Set dst (OrI src1 (LShiftI src2 src3)));
10759 
10760   ins_cost(1.9 * INSN_COST);
10761   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
10762 
10763   ins_encode %{
10764     __ orrw(as_Register($dst$$reg),
10765               as_Register($src1$$reg),
10766               as_Register($src2$$reg),
10767               Assembler::LSL,
10768               $src3$$constant & 0x1f);
10769   %}
10770 
10771   ins_pipe(ialu_reg_reg_shift);
10772 %}
10773 
10774 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
10775                          iRegL src1, iRegL src2,
10776                          immI src3, rFlagsReg cr) %{
10777   match(Set dst (OrL src1 (LShiftL src2 src3)));
10778 
10779   ins_cost(1.9 * INSN_COST);
10780   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
10781 
10782   ins_encode %{
10783     __ orr(as_Register($dst$$reg),
10784               as_Register($src1$$reg),
10785               as_Register($src2$$reg),
10786               Assembler::LSL,
10787               $src3$$constant & 0x3f);
10788   %}
10789 
10790   ins_pipe(ialu_reg_reg_shift);
10791 %}
10792 
10793 instruct AddI_reg_URShift_reg(iRegINoSp dst,
10794                          iRegIorL2I src1, iRegIorL2I src2,
10795                          immI src3, rFlagsReg cr) %{
10796   match(Set dst (AddI src1 (URShiftI src2 src3)));
10797 
10798   ins_cost(1.9 * INSN_COST);
10799   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
10800 
10801   ins_encode %{
10802     __ addw(as_Register($dst$$reg),
10803               as_Register($src1$$reg),
10804               as_Register($src2$$reg),
10805               Assembler::LSR,
10806               $src3$$constant & 0x1f);
10807   %}
10808 
10809   ins_pipe(ialu_reg_reg_shift);
10810 %}
10811 
10812 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
10813                          iRegL src1, iRegL src2,
10814                          immI src3, rFlagsReg cr) %{
10815   match(Set dst (AddL src1 (URShiftL src2 src3)));
10816 
10817   ins_cost(1.9 * INSN_COST);
10818   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
10819 
10820   ins_encode %{
10821     __ add(as_Register($dst$$reg),
10822               as_Register($src1$$reg),
10823               as_Register($src2$$reg),
10824               Assembler::LSR,
10825               $src3$$constant & 0x3f);
10826   %}
10827 
10828   ins_pipe(ialu_reg_reg_shift);
10829 %}
10830 
10831 instruct AddI_reg_RShift_reg(iRegINoSp dst,
10832                          iRegIorL2I src1, iRegIorL2I src2,
10833                          immI src3, rFlagsReg cr) %{
10834   match(Set dst (AddI src1 (RShiftI src2 src3)));
10835 
10836   ins_cost(1.9 * INSN_COST);
10837   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
10838 
10839   ins_encode %{
10840     __ addw(as_Register($dst$$reg),
10841               as_Register($src1$$reg),
10842               as_Register($src2$$reg),
10843               Assembler::ASR,
10844               $src3$$constant & 0x1f);
10845   %}
10846 
10847   ins_pipe(ialu_reg_reg_shift);
10848 %}
10849 
10850 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
10851                          iRegL src1, iRegL src2,
10852                          immI src3, rFlagsReg cr) %{
10853   match(Set dst (AddL src1 (RShiftL src2 src3)));
10854 
10855   ins_cost(1.9 * INSN_COST);
10856   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
10857 
10858   ins_encode %{
10859     __ add(as_Register($dst$$reg),
10860               as_Register($src1$$reg),
10861               as_Register($src2$$reg),
10862               Assembler::ASR,
10863               $src3$$constant & 0x3f);
10864   %}
10865 
10866   ins_pipe(ialu_reg_reg_shift);
10867 %}
10868 
10869 instruct AddI_reg_LShift_reg(iRegINoSp dst,
10870                          iRegIorL2I src1, iRegIorL2I src2,
10871                          immI src3, rFlagsReg cr) %{
10872   match(Set dst (AddI src1 (LShiftI src2 src3)));
10873 
10874   ins_cost(1.9 * INSN_COST);
10875   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
10876 
10877   ins_encode %{
10878     __ addw(as_Register($dst$$reg),
10879               as_Register($src1$$reg),
10880               as_Register($src2$$reg),
10881               Assembler::LSL,
10882               $src3$$constant & 0x1f);
10883   %}
10884 
10885   ins_pipe(ialu_reg_reg_shift);
10886 %}
10887 
10888 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
10889                          iRegL src1, iRegL src2,
10890                          immI src3, rFlagsReg cr) %{
10891   match(Set dst (AddL src1 (LShiftL src2 src3)));
10892 
10893   ins_cost(1.9 * INSN_COST);
10894   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
10895 
10896   ins_encode %{
10897     __ add(as_Register($dst$$reg),
10898               as_Register($src1$$reg),
10899               as_Register($src2$$reg),
10900               Assembler::LSL,
10901               $src3$$constant & 0x3f);
10902   %}
10903 
10904   ins_pipe(ialu_reg_reg_shift);
10905 %}
10906 
10907 instruct SubI_reg_URShift_reg(iRegINoSp dst,
10908                          iRegIorL2I src1, iRegIorL2I src2,
10909                          immI src3, rFlagsReg cr) %{
10910   match(Set dst (SubI src1 (URShiftI src2 src3)));
10911 
10912   ins_cost(1.9 * INSN_COST);
10913   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
10914 
10915   ins_encode %{
10916     __ subw(as_Register($dst$$reg),
10917               as_Register($src1$$reg),
10918               as_Register($src2$$reg),
10919               Assembler::LSR,
10920               $src3$$constant & 0x1f);
10921   %}
10922 
10923   ins_pipe(ialu_reg_reg_shift);
10924 %}
10925 
10926 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
10927                          iRegL src1, iRegL src2,
10928                          immI src3, rFlagsReg cr) %{
10929   match(Set dst (SubL src1 (URShiftL src2 src3)));
10930 
10931   ins_cost(1.9 * INSN_COST);
10932   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
10933 
10934   ins_encode %{
10935     __ sub(as_Register($dst$$reg),
10936               as_Register($src1$$reg),
10937               as_Register($src2$$reg),
10938               Assembler::LSR,
10939               $src3$$constant & 0x3f);
10940   %}
10941 
10942   ins_pipe(ialu_reg_reg_shift);
10943 %}
10944 
10945 instruct SubI_reg_RShift_reg(iRegINoSp dst,
10946                          iRegIorL2I src1, iRegIorL2I src2,
10947                          immI src3, rFlagsReg cr) %{
10948   match(Set dst (SubI src1 (RShiftI src2 src3)));
10949 
10950   ins_cost(1.9 * INSN_COST);
10951   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
10952 
10953   ins_encode %{
10954     __ subw(as_Register($dst$$reg),
10955               as_Register($src1$$reg),
10956               as_Register($src2$$reg),
10957               Assembler::ASR,
10958               $src3$$constant & 0x1f);
10959   %}
10960 
10961   ins_pipe(ialu_reg_reg_shift);
10962 %}
10963 
10964 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
10965                          iRegL src1, iRegL src2,
10966                          immI src3, rFlagsReg cr) %{
10967   match(Set dst (SubL src1 (RShiftL src2 src3)));
10968 
10969   ins_cost(1.9 * INSN_COST);
10970   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
10971 
10972   ins_encode %{
10973     __ sub(as_Register($dst$$reg),
10974               as_Register($src1$$reg),
10975               as_Register($src2$$reg),
10976               Assembler::ASR,
10977               $src3$$constant & 0x3f);
10978   %}
10979 
10980   ins_pipe(ialu_reg_reg_shift);
10981 %}
10982 
10983 instruct SubI_reg_LShift_reg(iRegINoSp dst,
10984                          iRegIorL2I src1, iRegIorL2I src2,
10985                          immI src3, rFlagsReg cr) %{
10986   match(Set dst (SubI src1 (LShiftI src2 src3)));
10987 
10988   ins_cost(1.9 * INSN_COST);
10989   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
10990 
10991   ins_encode %{
10992     __ subw(as_Register($dst$$reg),
10993               as_Register($src1$$reg),
10994               as_Register($src2$$reg),
10995               Assembler::LSL,
10996               $src3$$constant & 0x1f);
10997   %}
10998 
10999   ins_pipe(ialu_reg_reg_shift);
11000 %}
11001 
11002 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11003                          iRegL src1, iRegL src2,
11004                          immI src3, rFlagsReg cr) %{
11005   match(Set dst (SubL src1 (LShiftL src2 src3)));
11006 
11007   ins_cost(1.9 * INSN_COST);
11008   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11009 
11010   ins_encode %{
11011     __ sub(as_Register($dst$$reg),
11012               as_Register($src1$$reg),
11013               as_Register($src2$$reg),
11014               Assembler::LSL,
11015               $src3$$constant & 0x3f);
11016   %}
11017 
11018   ins_pipe(ialu_reg_reg_shift);
11019 %}
11020 
11021 
11022 
11023 // Shift Left followed by Shift Right.
11024 // This idiom is used by the compiler for the i2b bytecode etc.
11025 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11026 %{
11027   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11028   // Make sure we are not going to exceed what sbfm can do.
11029   predicate((unsigned int)n->in(2)->get_int() <= 63
11030             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11031 
11032   ins_cost(INSN_COST * 2);
11033   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11034   ins_encode %{
11035     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11036     int s = 63 - lshift;
11037     int r = (rshift - lshift) & 63;
11038     __ sbfm(as_Register($dst$$reg),
11039             as_Register($src$$reg),
11040             r, s);
11041   %}
11042 
11043   ins_pipe(ialu_reg_shift);
11044 %}
11045 
11046 // Shift Left followed by Shift Right.
11047 // This idiom is used by the compiler for the i2b bytecode etc.
11048 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11049 %{
11050   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11051   // Make sure we are not going to exceed what sbfmw can do.
11052   predicate((unsigned int)n->in(2)->get_int() <= 31
11053             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11054 
11055   ins_cost(INSN_COST * 2);
11056   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11057   ins_encode %{
11058     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11059     int s = 31 - lshift;
11060     int r = (rshift - lshift) & 31;
11061     __ sbfmw(as_Register($dst$$reg),
11062             as_Register($src$$reg),
11063             r, s);
11064   %}
11065 
11066   ins_pipe(ialu_reg_shift);
11067 %}
11068 
11069 // Shift Left followed by Shift Right.
11070 // This idiom is used by the compiler for the i2b bytecode etc.
11071 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11072 %{
11073   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11074   // Make sure we are not going to exceed what ubfm can do.
11075   predicate((unsigned int)n->in(2)->get_int() <= 63
11076             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11077 
11078   ins_cost(INSN_COST * 2);
11079   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11080   ins_encode %{
11081     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11082     int s = 63 - lshift;
11083     int r = (rshift - lshift) & 63;
11084     __ ubfm(as_Register($dst$$reg),
11085             as_Register($src$$reg),
11086             r, s);
11087   %}
11088 
11089   ins_pipe(ialu_reg_shift);
11090 %}
11091 
11092 // Shift Left followed by Shift Right.
11093 // This idiom is used by the compiler for the i2b bytecode etc.
11094 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11095 %{
11096   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11097   // Make sure we are not going to exceed what ubfmw can do.
11098   predicate((unsigned int)n->in(2)->get_int() <= 31
11099             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11100 
11101   ins_cost(INSN_COST * 2);
11102   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11103   ins_encode %{
11104     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11105     int s = 31 - lshift;
11106     int r = (rshift - lshift) & 31;
11107     __ ubfmw(as_Register($dst$$reg),
11108             as_Register($src$$reg),
11109             r, s);
11110   %}
11111 
11112   ins_pipe(ialu_reg_shift);
11113 %}
11114 // Bitfield extract with shift & mask
11115 
11116 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11117 %{
11118   match(Set dst (AndI (URShiftI src rshift) mask));
11119 
11120   ins_cost(INSN_COST);
11121   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11122   ins_encode %{
11123     int rshift = $rshift$$constant;
11124     long mask = $mask$$constant;
11125     int width = exact_log2(mask+1);
11126     __ ubfxw(as_Register($dst$$reg),
11127             as_Register($src$$reg), rshift, width);
11128   %}
11129   ins_pipe(ialu_reg_shift);
11130 %}
11131 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11132 %{
11133   match(Set dst (AndL (URShiftL src rshift) mask));
11134 
11135   ins_cost(INSN_COST);
11136   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11137   ins_encode %{
11138     int rshift = $rshift$$constant;
11139     long mask = $mask$$constant;
11140     int width = exact_log2(mask+1);
11141     __ ubfx(as_Register($dst$$reg),
11142             as_Register($src$$reg), rshift, width);
11143   %}
11144   ins_pipe(ialu_reg_shift);
11145 %}
11146 
11147 // We can use ubfx when extending an And with a mask when we know mask
11148 // is positive.  We know that because immI_bitmask guarantees it.
11149 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11150 %{
11151   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11152 
11153   ins_cost(INSN_COST * 2);
11154   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11155   ins_encode %{
11156     int rshift = $rshift$$constant;
11157     long mask = $mask$$constant;
11158     int width = exact_log2(mask+1);
11159     __ ubfx(as_Register($dst$$reg),
11160             as_Register($src$$reg), rshift, width);
11161   %}
11162   ins_pipe(ialu_reg_shift);
11163 %}
11164 
11165 // We can use ubfiz when masking by a positive number and then left shifting the result.
11166 // We know that the mask is positive because immI_bitmask guarantees it.
11167 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11168 %{
11169   match(Set dst (LShiftI (AndI src mask) lshift));
11170   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11171     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
11172 
11173   ins_cost(INSN_COST);
11174   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11175   ins_encode %{
11176     int lshift = $lshift$$constant;
11177     long mask = $mask$$constant;
11178     int width = exact_log2(mask+1);
11179     __ ubfizw(as_Register($dst$$reg),
11180           as_Register($src$$reg), lshift, width);
11181   %}
11182   ins_pipe(ialu_reg_shift);
11183 %}
11184 // We can use ubfiz when masking by a positive number and then left shifting the result.
11185 // We know that the mask is positive because immL_bitmask guarantees it.
11186 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11187 %{
11188   match(Set dst (LShiftL (AndL src mask) lshift));
11189   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
11190     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
11191 
11192   ins_cost(INSN_COST);
11193   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11194   ins_encode %{
11195     int lshift = $lshift$$constant;
11196     long mask = $mask$$constant;
11197     int width = exact_log2(mask+1);
11198     __ ubfiz(as_Register($dst$$reg),
11199           as_Register($src$$reg), lshift, width);
11200   %}
11201   ins_pipe(ialu_reg_shift);
11202 %}
11203 
11204 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11205 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11206 %{
11207   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
11208   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11209     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
11210 
11211   ins_cost(INSN_COST);
11212   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11213   ins_encode %{
11214     int lshift = $lshift$$constant;
11215     long mask = $mask$$constant;
11216     int width = exact_log2(mask+1);
11217     __ ubfiz(as_Register($dst$$reg),
11218              as_Register($src$$reg), lshift, width);
11219   %}
11220   ins_pipe(ialu_reg_shift);
11221 %}
11222 
11223 // Rotations
11224 
11225 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11226 %{
11227   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11228   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11229 
11230   ins_cost(INSN_COST);
11231   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11232 
11233   ins_encode %{
11234     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11235             $rshift$$constant & 63);
11236   %}
11237   ins_pipe(ialu_reg_reg_extr);
11238 %}
11239 
11240 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11241 %{
11242   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11243   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11244 
11245   ins_cost(INSN_COST);
11246   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11247 
11248   ins_encode %{
11249     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11250             $rshift$$constant & 31);
11251   %}
11252   ins_pipe(ialu_reg_reg_extr);
11253 %}
11254 
11255 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11256 %{
11257   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11258   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11259 
11260   ins_cost(INSN_COST);
11261   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11262 
11263   ins_encode %{
11264     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11265             $rshift$$constant & 63);
11266   %}
11267   ins_pipe(ialu_reg_reg_extr);
11268 %}
11269 
11270 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11271 %{
11272   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11273   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11274 
11275   ins_cost(INSN_COST);
11276   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11277 
11278   ins_encode %{
11279     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11280             $rshift$$constant & 31);
11281   %}
11282   ins_pipe(ialu_reg_reg_extr);
11283 %}
11284 
11285 
11286 // rol expander
11287 
11288 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11289 %{
11290   effect(DEF dst, USE src, USE shift);
11291 
11292   format %{ "rol    $dst, $src, $shift" %}
11293   ins_cost(INSN_COST * 3);
11294   ins_encode %{
11295     __ subw(rscratch1, zr, as_Register($shift$$reg));
11296     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11297             rscratch1);
11298     %}
11299   ins_pipe(ialu_reg_reg_vshift);
11300 %}
11301 
11302 // rol expander
11303 
11304 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11305 %{
11306   effect(DEF dst, USE src, USE shift);
11307 
11308   format %{ "rol    $dst, $src, $shift" %}
11309   ins_cost(INSN_COST * 3);
11310   ins_encode %{
11311     __ subw(rscratch1, zr, as_Register($shift$$reg));
11312     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11313             rscratch1);
11314     %}
11315   ins_pipe(ialu_reg_reg_vshift);
11316 %}
11317 
11318 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11319 %{
11320   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11321 
11322   expand %{
11323     rolL_rReg(dst, src, shift, cr);
11324   %}
11325 %}
11326 
11327 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11328 %{
11329   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11330 
11331   expand %{
11332     rolL_rReg(dst, src, shift, cr);
11333   %}
11334 %}
11335 
11336 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11337 %{
11338   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11339 
11340   expand %{
11341     rolI_rReg(dst, src, shift, cr);
11342   %}
11343 %}
11344 
11345 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11346 %{
11347   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11348 
11349   expand %{
11350     rolI_rReg(dst, src, shift, cr);
11351   %}
11352 %}
11353 
11354 // ror expander
11355 
11356 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11357 %{
11358   effect(DEF dst, USE src, USE shift);
11359 
11360   format %{ "ror    $dst, $src, $shift" %}
11361   ins_cost(INSN_COST);
11362   ins_encode %{
11363     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11364             as_Register($shift$$reg));
11365     %}
11366   ins_pipe(ialu_reg_reg_vshift);
11367 %}
11368 
11369 // ror expander
11370 
11371 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11372 %{
11373   effect(DEF dst, USE src, USE shift);
11374 
11375   format %{ "ror    $dst, $src, $shift" %}
11376   ins_cost(INSN_COST);
11377   ins_encode %{
11378     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11379             as_Register($shift$$reg));
11380     %}
11381   ins_pipe(ialu_reg_reg_vshift);
11382 %}
11383 
11384 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11385 %{
11386   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11387 
11388   expand %{
11389     rorL_rReg(dst, src, shift, cr);
11390   %}
11391 %}
11392 
11393 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11394 %{
11395   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11396 
11397   expand %{
11398     rorL_rReg(dst, src, shift, cr);
11399   %}
11400 %}
11401 
11402 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11403 %{
11404   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11405 
11406   expand %{
11407     rorI_rReg(dst, src, shift, cr);
11408   %}
11409 %}
11410 
11411 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11412 %{
11413   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11414 
11415   expand %{
11416     rorI_rReg(dst, src, shift, cr);
11417   %}
11418 %}
11419 
11420 // Add/subtract (extended)
11421 
11422 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11423 %{
11424   match(Set dst (AddL src1 (ConvI2L src2)));
11425   ins_cost(INSN_COST);
11426   format %{ "add  $dst, $src1, $src2, sxtw" %}
11427 
11428    ins_encode %{
11429      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11430             as_Register($src2$$reg), ext::sxtw);
11431    %}
11432   ins_pipe(ialu_reg_reg);
11433 %};
11434 
11435 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11436 %{
11437   match(Set dst (SubL src1 (ConvI2L src2)));
11438   ins_cost(INSN_COST);
11439   format %{ "sub  $dst, $src1, $src2, sxtw" %}
11440 
11441    ins_encode %{
11442      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11443             as_Register($src2$$reg), ext::sxtw);
11444    %}
11445   ins_pipe(ialu_reg_reg);
11446 %};
11447 
11448 
11449 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11450 %{
11451   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11452   ins_cost(INSN_COST);
11453   format %{ "add  $dst, $src1, $src2, sxth" %}
11454 
11455    ins_encode %{
11456      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11457             as_Register($src2$$reg), ext::sxth);
11458    %}
11459   ins_pipe(ialu_reg_reg);
11460 %}
11461 
11462 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11463 %{
11464   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11465   ins_cost(INSN_COST);
11466   format %{ "add  $dst, $src1, $src2, sxtb" %}
11467 
11468    ins_encode %{
11469      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11470             as_Register($src2$$reg), ext::sxtb);
11471    %}
11472   ins_pipe(ialu_reg_reg);
11473 %}
11474 
11475 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11476 %{
11477   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11478   ins_cost(INSN_COST);
11479   format %{ "add  $dst, $src1, $src2, uxtb" %}
11480 
11481    ins_encode %{
11482      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11483             as_Register($src2$$reg), ext::uxtb);
11484    %}
11485   ins_pipe(ialu_reg_reg);
11486 %}
11487 
11488 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11489 %{
11490   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11491   ins_cost(INSN_COST);
11492   format %{ "add  $dst, $src1, $src2, sxth" %}
11493 
11494    ins_encode %{
11495      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11496             as_Register($src2$$reg), ext::sxth);
11497    %}
11498   ins_pipe(ialu_reg_reg);
11499 %}
11500 
11501 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11502 %{
11503   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11504   ins_cost(INSN_COST);
11505   format %{ "add  $dst, $src1, $src2, sxtw" %}
11506 
11507    ins_encode %{
11508      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11509             as_Register($src2$$reg), ext::sxtw);
11510    %}
11511   ins_pipe(ialu_reg_reg);
11512 %}
11513 
11514 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11515 %{
11516   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11517   ins_cost(INSN_COST);
11518   format %{ "add  $dst, $src1, $src2, sxtb" %}
11519 
11520    ins_encode %{
11521      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11522             as_Register($src2$$reg), ext::sxtb);
11523    %}
11524   ins_pipe(ialu_reg_reg);
11525 %}
11526 
11527 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11528 %{
11529   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11530   ins_cost(INSN_COST);
11531   format %{ "add  $dst, $src1, $src2, uxtb" %}
11532 
11533    ins_encode %{
11534      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11535             as_Register($src2$$reg), ext::uxtb);
11536    %}
11537   ins_pipe(ialu_reg_reg);
11538 %}
11539 
11540 
11541 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11542 %{
11543   match(Set dst (AddI src1 (AndI src2 mask)));
11544   ins_cost(INSN_COST);
11545   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11546 
11547    ins_encode %{
11548      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11549             as_Register($src2$$reg), ext::uxtb);
11550    %}
11551   ins_pipe(ialu_reg_reg);
11552 %}
11553 
11554 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11555 %{
11556   match(Set dst (AddI src1 (AndI src2 mask)));
11557   ins_cost(INSN_COST);
11558   format %{ "addw  $dst, $src1, $src2, uxth" %}
11559 
11560    ins_encode %{
11561      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11562             as_Register($src2$$reg), ext::uxth);
11563    %}
11564   ins_pipe(ialu_reg_reg);
11565 %}
11566 
11567 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11568 %{
11569   match(Set dst (AddL src1 (AndL src2 mask)));
11570   ins_cost(INSN_COST);
11571   format %{ "add  $dst, $src1, $src2, uxtb" %}
11572 
11573    ins_encode %{
11574      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11575             as_Register($src2$$reg), ext::uxtb);
11576    %}
11577   ins_pipe(ialu_reg_reg);
11578 %}
11579 
11580 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11581 %{
11582   match(Set dst (AddL src1 (AndL src2 mask)));
11583   ins_cost(INSN_COST);
11584   format %{ "add  $dst, $src1, $src2, uxth" %}
11585 
11586    ins_encode %{
11587      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11588             as_Register($src2$$reg), ext::uxth);
11589    %}
11590   ins_pipe(ialu_reg_reg);
11591 %}
11592 
11593 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11594 %{
11595   match(Set dst (AddL src1 (AndL src2 mask)));
11596   ins_cost(INSN_COST);
11597   format %{ "add  $dst, $src1, $src2, uxtw" %}
11598 
11599    ins_encode %{
11600      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11601             as_Register($src2$$reg), ext::uxtw);
11602    %}
11603   ins_pipe(ialu_reg_reg);
11604 %}
11605 
11606 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11607 %{
11608   match(Set dst (SubI src1 (AndI src2 mask)));
11609   ins_cost(INSN_COST);
11610   format %{ "subw  $dst, $src1, $src2, uxtb" %}
11611 
11612    ins_encode %{
11613      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11614             as_Register($src2$$reg), ext::uxtb);
11615    %}
11616   ins_pipe(ialu_reg_reg);
11617 %}
11618 
11619 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11620 %{
11621   match(Set dst (SubI src1 (AndI src2 mask)));
11622   ins_cost(INSN_COST);
11623   format %{ "subw  $dst, $src1, $src2, uxth" %}
11624 
11625    ins_encode %{
11626      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11627             as_Register($src2$$reg), ext::uxth);
11628    %}
11629   ins_pipe(ialu_reg_reg);
11630 %}
11631 
11632 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11633 %{
11634   match(Set dst (SubL src1 (AndL src2 mask)));
11635   ins_cost(INSN_COST);
11636   format %{ "sub  $dst, $src1, $src2, uxtb" %}
11637 
11638    ins_encode %{
11639      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11640             as_Register($src2$$reg), ext::uxtb);
11641    %}
11642   ins_pipe(ialu_reg_reg);
11643 %}
11644 
11645 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11646 %{
11647   match(Set dst (SubL src1 (AndL src2 mask)));
11648   ins_cost(INSN_COST);
11649   format %{ "sub  $dst, $src1, $src2, uxth" %}
11650 
11651    ins_encode %{
11652      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11653             as_Register($src2$$reg), ext::uxth);
11654    %}
11655   ins_pipe(ialu_reg_reg);
11656 %}
11657 
11658 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11659 %{
11660   match(Set dst (SubL src1 (AndL src2 mask)));
11661   ins_cost(INSN_COST);
11662   format %{ "sub  $dst, $src1, $src2, uxtw" %}
11663 
11664    ins_encode %{
11665      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11666             as_Register($src2$$reg), ext::uxtw);
11667    %}
11668   ins_pipe(ialu_reg_reg);
11669 %}
11670 
11671 
11672 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
11673 %{
11674   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11675   ins_cost(1.9 * INSN_COST);
11676   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
11677 
11678    ins_encode %{
11679      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11680             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
11681    %}
11682   ins_pipe(ialu_reg_reg_shift);
11683 %}
11684 
11685 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
11686 %{
11687   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11688   ins_cost(1.9 * INSN_COST);
11689   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
11690 
11691    ins_encode %{
11692      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11693             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
11694    %}
11695   ins_pipe(ialu_reg_reg_shift);
11696 %}
11697 
11698 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
11699 %{
11700   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11701   ins_cost(1.9 * INSN_COST);
11702   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
11703 
11704    ins_encode %{
11705      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11706             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
11707    %}
11708   ins_pipe(ialu_reg_reg_shift);
11709 %}
11710 
11711 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
11712 %{
11713   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11714   ins_cost(1.9 * INSN_COST);
11715   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
11716 
11717    ins_encode %{
11718      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11719             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
11720    %}
11721   ins_pipe(ialu_reg_reg_shift);
11722 %}
11723 
11724 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
11725 %{
11726   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11727   ins_cost(1.9 * INSN_COST);
11728   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
11729 
11730    ins_encode %{
11731      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11732             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
11733    %}
11734   ins_pipe(ialu_reg_reg_shift);
11735 %}
11736 
11737 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
11738 %{
11739   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
11740   ins_cost(1.9 * INSN_COST);
11741   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
11742 
11743    ins_encode %{
11744      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11745             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
11746    %}
11747   ins_pipe(ialu_reg_reg_shift);
11748 %}
11749 
11750 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
11751 %{
11752   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
11753   ins_cost(1.9 * INSN_COST);
11754   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
11755 
11756    ins_encode %{
11757      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11758             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
11759    %}
11760   ins_pipe(ialu_reg_reg_shift);
11761 %}
11762 
11763 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
11764 %{
11765   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
11766   ins_cost(1.9 * INSN_COST);
11767   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
11768 
11769    ins_encode %{
11770      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11771             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
11772    %}
11773   ins_pipe(ialu_reg_reg_shift);
11774 %}
11775 
11776 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
11777 %{
11778   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
11779   ins_cost(1.9 * INSN_COST);
11780   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
11781 
11782    ins_encode %{
11783      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11784             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
11785    %}
11786   ins_pipe(ialu_reg_reg_shift);
11787 %}
11788 
11789 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
11790 %{
11791   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
11792   ins_cost(1.9 * INSN_COST);
11793   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
11794 
11795    ins_encode %{
11796      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11797             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
11798    %}
11799   ins_pipe(ialu_reg_reg_shift);
11800 %}
11801 
11802 
11803 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
11804 %{
11805   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
11806   ins_cost(1.9 * INSN_COST);
11807   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
11808 
11809    ins_encode %{
11810      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11811             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
11812    %}
11813   ins_pipe(ialu_reg_reg_shift);
11814 %};
11815 
11816 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
11817 %{
11818   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
11819   ins_cost(1.9 * INSN_COST);
11820   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
11821 
11822    ins_encode %{
11823      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11824             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
11825    %}
11826   ins_pipe(ialu_reg_reg_shift);
11827 %};
11828 
11829 
11830 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
11831 %{
11832   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
11833   ins_cost(1.9 * INSN_COST);
11834   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
11835 
11836    ins_encode %{
11837      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11838             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
11839    %}
11840   ins_pipe(ialu_reg_reg_shift);
11841 %}
11842 
11843 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
11844 %{
11845   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
11846   ins_cost(1.9 * INSN_COST);
11847   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
11848 
11849    ins_encode %{
11850      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11851             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
11852    %}
11853   ins_pipe(ialu_reg_reg_shift);
11854 %}
11855 
11856 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
11857 %{
11858   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
11859   ins_cost(1.9 * INSN_COST);
11860   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
11861 
11862    ins_encode %{
11863      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11864             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
11865    %}
11866   ins_pipe(ialu_reg_reg_shift);
11867 %}
11868 
11869 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
11870 %{
11871   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
11872   ins_cost(1.9 * INSN_COST);
11873   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
11874 
11875    ins_encode %{
11876      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11877             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
11878    %}
11879   ins_pipe(ialu_reg_reg_shift);
11880 %}
11881 
11882 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
11883 %{
11884   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
11885   ins_cost(1.9 * INSN_COST);
11886   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
11887 
11888    ins_encode %{
11889      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11890             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
11891    %}
11892   ins_pipe(ialu_reg_reg_shift);
11893 %}
11894 
11895 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
11896 %{
11897   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
11898   ins_cost(1.9 * INSN_COST);
11899   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
11900 
11901    ins_encode %{
11902      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11903             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
11904    %}
11905   ins_pipe(ialu_reg_reg_shift);
11906 %}
11907 
11908 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
11909 %{
11910   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
11911   ins_cost(1.9 * INSN_COST);
11912   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
11913 
11914    ins_encode %{
11915      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11916             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
11917    %}
11918   ins_pipe(ialu_reg_reg_shift);
11919 %}
11920 
11921 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
11922 %{
11923   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
11924   ins_cost(1.9 * INSN_COST);
11925   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
11926 
11927    ins_encode %{
11928      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11929             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
11930    %}
11931   ins_pipe(ialu_reg_reg_shift);
11932 %}
11933 
11934 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
11935 %{
11936   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
11937   ins_cost(1.9 * INSN_COST);
11938   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
11939 
11940    ins_encode %{
11941      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11942             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
11943    %}
11944   ins_pipe(ialu_reg_reg_shift);
11945 %}
11946 
11947 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
11948 %{
11949   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
11950   ins_cost(1.9 * INSN_COST);
11951   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
11952 
11953    ins_encode %{
11954      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11955             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
11956    %}
11957   ins_pipe(ialu_reg_reg_shift);
11958 %}
11959 // END This section of the file is automatically generated. Do not edit --------------
11960 
11961 // ============================================================================
11962 // Floating Point Arithmetic Instructions
11963 
11964 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11965   match(Set dst (AddF src1 src2));
11966 
11967   ins_cost(INSN_COST * 5);
11968   format %{ "fadds   $dst, $src1, $src2" %}
11969 
11970   ins_encode %{
11971     __ fadds(as_FloatRegister($dst$$reg),
11972              as_FloatRegister($src1$$reg),
11973              as_FloatRegister($src2$$reg));
11974   %}
11975 
11976   ins_pipe(fp_dop_reg_reg_s);
11977 %}
11978 
11979 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11980   match(Set dst (AddD src1 src2));
11981 
11982   ins_cost(INSN_COST * 5);
11983   format %{ "faddd   $dst, $src1, $src2" %}
11984 
11985   ins_encode %{
11986     __ faddd(as_FloatRegister($dst$$reg),
11987              as_FloatRegister($src1$$reg),
11988              as_FloatRegister($src2$$reg));
11989   %}
11990 
11991   ins_pipe(fp_dop_reg_reg_d);
11992 %}
11993 
11994 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11995   match(Set dst (SubF src1 src2));
11996 
11997   ins_cost(INSN_COST * 5);
11998   format %{ "fsubs   $dst, $src1, $src2" %}
11999 
12000   ins_encode %{
12001     __ fsubs(as_FloatRegister($dst$$reg),
12002              as_FloatRegister($src1$$reg),
12003              as_FloatRegister($src2$$reg));
12004   %}
12005 
12006   ins_pipe(fp_dop_reg_reg_s);
12007 %}
12008 
12009 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12010   match(Set dst (SubD src1 src2));
12011 
12012   ins_cost(INSN_COST * 5);
12013   format %{ "fsubd   $dst, $src1, $src2" %}
12014 
12015   ins_encode %{
12016     __ fsubd(as_FloatRegister($dst$$reg),
12017              as_FloatRegister($src1$$reg),
12018              as_FloatRegister($src2$$reg));
12019   %}
12020 
12021   ins_pipe(fp_dop_reg_reg_d);
12022 %}
12023 
12024 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12025   match(Set dst (MulF src1 src2));
12026 
12027   ins_cost(INSN_COST * 6);
12028   format %{ "fmuls   $dst, $src1, $src2" %}
12029 
12030   ins_encode %{
12031     __ fmuls(as_FloatRegister($dst$$reg),
12032              as_FloatRegister($src1$$reg),
12033              as_FloatRegister($src2$$reg));
12034   %}
12035 
12036   ins_pipe(fp_dop_reg_reg_s);
12037 %}
12038 
12039 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12040   match(Set dst (MulD src1 src2));
12041 
12042   ins_cost(INSN_COST * 6);
12043   format %{ "fmuld   $dst, $src1, $src2" %}
12044 
12045   ins_encode %{
12046     __ fmuld(as_FloatRegister($dst$$reg),
12047              as_FloatRegister($src1$$reg),
12048              as_FloatRegister($src2$$reg));
12049   %}
12050 
12051   ins_pipe(fp_dop_reg_reg_d);
12052 %}
12053 
12054 // src1 * src2 + src3
12055 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12056   predicate(UseFMA);
12057   match(Set dst (FmaF src3 (Binary src1 src2)));
12058 
12059   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12060 
12061   ins_encode %{
12062     __ fmadds(as_FloatRegister($dst$$reg),
12063              as_FloatRegister($src1$$reg),
12064              as_FloatRegister($src2$$reg),
12065              as_FloatRegister($src3$$reg));
12066   %}
12067 
12068   ins_pipe(pipe_class_default);
12069 %}
12070 
12071 // src1 * src2 + src3
12072 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12073   predicate(UseFMA);
12074   match(Set dst (FmaD src3 (Binary src1 src2)));
12075 
12076   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12077 
12078   ins_encode %{
12079     __ fmaddd(as_FloatRegister($dst$$reg),
12080              as_FloatRegister($src1$$reg),
12081              as_FloatRegister($src2$$reg),
12082              as_FloatRegister($src3$$reg));
12083   %}
12084 
12085   ins_pipe(pipe_class_default);
12086 %}
12087 
12088 // -src1 * src2 + src3
12089 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12090   predicate(UseFMA);
12091   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12092   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12093 
12094   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12095 
12096   ins_encode %{
12097     __ fmsubs(as_FloatRegister($dst$$reg),
12098               as_FloatRegister($src1$$reg),
12099               as_FloatRegister($src2$$reg),
12100               as_FloatRegister($src3$$reg));
12101   %}
12102 
12103   ins_pipe(pipe_class_default);
12104 %}
12105 
12106 // -src1 * src2 + src3
12107 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12108   predicate(UseFMA);
12109   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12110   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12111 
12112   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12113 
12114   ins_encode %{
12115     __ fmsubd(as_FloatRegister($dst$$reg),
12116               as_FloatRegister($src1$$reg),
12117               as_FloatRegister($src2$$reg),
12118               as_FloatRegister($src3$$reg));
12119   %}
12120 
12121   ins_pipe(pipe_class_default);
12122 %}
12123 
12124 // -src1 * src2 - src3
12125 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12126   predicate(UseFMA);
12127   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12128   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12129 
12130   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12131 
12132   ins_encode %{
12133     __ fnmadds(as_FloatRegister($dst$$reg),
12134                as_FloatRegister($src1$$reg),
12135                as_FloatRegister($src2$$reg),
12136                as_FloatRegister($src3$$reg));
12137   %}
12138 
12139   ins_pipe(pipe_class_default);
12140 %}
12141 
12142 // -src1 * src2 - src3
12143 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12144   predicate(UseFMA);
12145   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12146   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12147 
12148   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12149 
12150   ins_encode %{
12151     __ fnmaddd(as_FloatRegister($dst$$reg),
12152                as_FloatRegister($src1$$reg),
12153                as_FloatRegister($src2$$reg),
12154                as_FloatRegister($src3$$reg));
12155   %}
12156 
12157   ins_pipe(pipe_class_default);
12158 %}
12159 
12160 // src1 * src2 - src3
12161 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12162   predicate(UseFMA);
12163   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12164 
12165   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12166 
12167   ins_encode %{
12168     __ fnmsubs(as_FloatRegister($dst$$reg),
12169                as_FloatRegister($src1$$reg),
12170                as_FloatRegister($src2$$reg),
12171                as_FloatRegister($src3$$reg));
12172   %}
12173 
12174   ins_pipe(pipe_class_default);
12175 %}
12176 
12177 // src1 * src2 - src3
12178 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12179   predicate(UseFMA);
12180   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12181 
12182   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12183 
12184   ins_encode %{
12185   // n.b. insn name should be fnmsubd
12186     __ fnmsub(as_FloatRegister($dst$$reg),
12187               as_FloatRegister($src1$$reg),
12188               as_FloatRegister($src2$$reg),
12189               as_FloatRegister($src3$$reg));
12190   %}
12191 
12192   ins_pipe(pipe_class_default);
12193 %}
12194 
12195 
12196 // Math.max(FF)F
12197 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12198   match(Set dst (MaxF src1 src2));
12199 
12200   format %{ "fmaxs   $dst, $src1, $src2" %}
12201   ins_encode %{
12202     __ fmaxs(as_FloatRegister($dst$$reg),
12203              as_FloatRegister($src1$$reg),
12204              as_FloatRegister($src2$$reg));
12205   %}
12206 
12207   ins_pipe(fp_dop_reg_reg_s);
12208 %}
12209 
12210 // Math.min(FF)F
12211 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12212   match(Set dst (MinF src1 src2));
12213 
12214   format %{ "fmins   $dst, $src1, $src2" %}
12215   ins_encode %{
12216     __ fmins(as_FloatRegister($dst$$reg),
12217              as_FloatRegister($src1$$reg),
12218              as_FloatRegister($src2$$reg));
12219   %}
12220 
12221   ins_pipe(fp_dop_reg_reg_s);
12222 %}
12223 
12224 // Math.max(DD)D
12225 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12226   match(Set dst (MaxD src1 src2));
12227 
12228   format %{ "fmaxd   $dst, $src1, $src2" %}
12229   ins_encode %{
12230     __ fmaxd(as_FloatRegister($dst$$reg),
12231              as_FloatRegister($src1$$reg),
12232              as_FloatRegister($src2$$reg));
12233   %}
12234 
12235   ins_pipe(fp_dop_reg_reg_d);
12236 %}
12237 
12238 // Math.min(DD)D
12239 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12240   match(Set dst (MinD src1 src2));
12241 
12242   format %{ "fmind   $dst, $src1, $src2" %}
12243   ins_encode %{
12244     __ fmind(as_FloatRegister($dst$$reg),
12245              as_FloatRegister($src1$$reg),
12246              as_FloatRegister($src2$$reg));
12247   %}
12248 
12249   ins_pipe(fp_dop_reg_reg_d);
12250 %}
12251 
12252 
12253 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12254   match(Set dst (DivF src1  src2));
12255 
12256   ins_cost(INSN_COST * 18);
12257   format %{ "fdivs   $dst, $src1, $src2" %}
12258 
12259   ins_encode %{
12260     __ fdivs(as_FloatRegister($dst$$reg),
12261              as_FloatRegister($src1$$reg),
12262              as_FloatRegister($src2$$reg));
12263   %}
12264 
12265   ins_pipe(fp_div_s);
12266 %}
12267 
12268 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12269   match(Set dst (DivD src1  src2));
12270 
12271   ins_cost(INSN_COST * 32);
12272   format %{ "fdivd   $dst, $src1, $src2" %}
12273 
12274   ins_encode %{
12275     __ fdivd(as_FloatRegister($dst$$reg),
12276              as_FloatRegister($src1$$reg),
12277              as_FloatRegister($src2$$reg));
12278   %}
12279 
12280   ins_pipe(fp_div_d);
12281 %}
12282 
12283 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12284   match(Set dst (NegF src));
12285 
12286   ins_cost(INSN_COST * 3);
12287   format %{ "fneg   $dst, $src" %}
12288 
12289   ins_encode %{
12290     __ fnegs(as_FloatRegister($dst$$reg),
12291              as_FloatRegister($src$$reg));
12292   %}
12293 
12294   ins_pipe(fp_uop_s);
12295 %}
12296 
12297 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12298   match(Set dst (NegD src));
12299 
12300   ins_cost(INSN_COST * 3);
12301   format %{ "fnegd   $dst, $src" %}
12302 
12303   ins_encode %{
12304     __ fnegd(as_FloatRegister($dst$$reg),
12305              as_FloatRegister($src$$reg));
12306   %}
12307 
12308   ins_pipe(fp_uop_d);
12309 %}
12310 
12311 instruct absF_reg(vRegF dst, vRegF src) %{
12312   match(Set dst (AbsF src));
12313 
12314   ins_cost(INSN_COST * 3);
12315   format %{ "fabss   $dst, $src" %}
12316   ins_encode %{
12317     __ fabss(as_FloatRegister($dst$$reg),
12318              as_FloatRegister($src$$reg));
12319   %}
12320 
12321   ins_pipe(fp_uop_s);
12322 %}
12323 
12324 instruct absD_reg(vRegD dst, vRegD src) %{
12325   match(Set dst (AbsD src));
12326 
12327   ins_cost(INSN_COST * 3);
12328   format %{ "fabsd   $dst, $src" %}
12329   ins_encode %{
12330     __ fabsd(as_FloatRegister($dst$$reg),
12331              as_FloatRegister($src$$reg));
12332   %}
12333 
12334   ins_pipe(fp_uop_d);
12335 %}
12336 
12337 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12338   match(Set dst (SqrtD src));
12339 
12340   ins_cost(INSN_COST * 50);
12341   format %{ "fsqrtd  $dst, $src" %}
12342   ins_encode %{
12343     __ fsqrtd(as_FloatRegister($dst$$reg),
12344              as_FloatRegister($src$$reg));
12345   %}
12346 
12347   ins_pipe(fp_div_s);
12348 %}
12349 
12350 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12351   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12352 
12353   ins_cost(INSN_COST * 50);
12354   format %{ "fsqrts  $dst, $src" %}
12355   ins_encode %{
12356     __ fsqrts(as_FloatRegister($dst$$reg),
12357              as_FloatRegister($src$$reg));
12358   %}
12359 
12360   ins_pipe(fp_div_d);
12361 %}
12362 
12363 // ============================================================================
12364 // Logical Instructions
12365 
12366 // Integer Logical Instructions
12367 
12368 // And Instructions
12369 
12370 
12371 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12372   match(Set dst (AndI src1 src2));
12373 
12374   format %{ "andw  $dst, $src1, $src2\t# int" %}
12375 
12376   ins_cost(INSN_COST);
12377   ins_encode %{
12378     __ andw(as_Register($dst$$reg),
12379             as_Register($src1$$reg),
12380             as_Register($src2$$reg));
12381   %}
12382 
12383   ins_pipe(ialu_reg_reg);
12384 %}
12385 
12386 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12387   match(Set dst (AndI src1 src2));
12388 
12389   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12390 
12391   ins_cost(INSN_COST);
12392   ins_encode %{
12393     __ andw(as_Register($dst$$reg),
12394             as_Register($src1$$reg),
12395             (unsigned long)($src2$$constant));
12396   %}
12397 
12398   ins_pipe(ialu_reg_imm);
12399 %}
12400 
12401 // Or Instructions
12402 
12403 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12404   match(Set dst (OrI src1 src2));
12405 
12406   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12407 
12408   ins_cost(INSN_COST);
12409   ins_encode %{
12410     __ orrw(as_Register($dst$$reg),
12411             as_Register($src1$$reg),
12412             as_Register($src2$$reg));
12413   %}
12414 
12415   ins_pipe(ialu_reg_reg);
12416 %}
12417 
12418 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12419   match(Set dst (OrI src1 src2));
12420 
12421   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12422 
12423   ins_cost(INSN_COST);
12424   ins_encode %{
12425     __ orrw(as_Register($dst$$reg),
12426             as_Register($src1$$reg),
12427             (unsigned long)($src2$$constant));
12428   %}
12429 
12430   ins_pipe(ialu_reg_imm);
12431 %}
12432 
12433 // Xor Instructions
12434 
12435 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12436   match(Set dst (XorI src1 src2));
12437 
12438   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12439 
12440   ins_cost(INSN_COST);
12441   ins_encode %{
12442     __ eorw(as_Register($dst$$reg),
12443             as_Register($src1$$reg),
12444             as_Register($src2$$reg));
12445   %}
12446 
12447   ins_pipe(ialu_reg_reg);
12448 %}
12449 
12450 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12451   match(Set dst (XorI src1 src2));
12452 
12453   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12454 
12455   ins_cost(INSN_COST);
12456   ins_encode %{
12457     __ eorw(as_Register($dst$$reg),
12458             as_Register($src1$$reg),
12459             (unsigned long)($src2$$constant));
12460   %}
12461 
12462   ins_pipe(ialu_reg_imm);
12463 %}
12464 
12465 // Long Logical Instructions
12466 // TODO
12467 
12468 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12469   match(Set dst (AndL src1 src2));
12470 
12471   format %{ "and  $dst, $src1, $src2\t# int" %}
12472 
12473   ins_cost(INSN_COST);
12474   ins_encode %{
12475     __ andr(as_Register($dst$$reg),
12476             as_Register($src1$$reg),
12477             as_Register($src2$$reg));
12478   %}
12479 
12480   ins_pipe(ialu_reg_reg);
12481 %}
12482 
12483 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12484   match(Set dst (AndL src1 src2));
12485 
12486   format %{ "and  $dst, $src1, $src2\t# int" %}
12487 
12488   ins_cost(INSN_COST);
12489   ins_encode %{
12490     __ andr(as_Register($dst$$reg),
12491             as_Register($src1$$reg),
12492             (unsigned long)($src2$$constant));
12493   %}
12494 
12495   ins_pipe(ialu_reg_imm);
12496 %}
12497 
12498 // Or Instructions
12499 
12500 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12501   match(Set dst (OrL src1 src2));
12502 
12503   format %{ "orr  $dst, $src1, $src2\t# int" %}
12504 
12505   ins_cost(INSN_COST);
12506   ins_encode %{
12507     __ orr(as_Register($dst$$reg),
12508            as_Register($src1$$reg),
12509            as_Register($src2$$reg));
12510   %}
12511 
12512   ins_pipe(ialu_reg_reg);
12513 %}
12514 
12515 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12516   match(Set dst (OrL src1 src2));
12517 
12518   format %{ "orr  $dst, $src1, $src2\t# int" %}
12519 
12520   ins_cost(INSN_COST);
12521   ins_encode %{
12522     __ orr(as_Register($dst$$reg),
12523            as_Register($src1$$reg),
12524            (unsigned long)($src2$$constant));
12525   %}
12526 
12527   ins_pipe(ialu_reg_imm);
12528 %}
12529 
12530 // Xor Instructions
12531 
12532 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12533   match(Set dst (XorL src1 src2));
12534 
12535   format %{ "eor  $dst, $src1, $src2\t# int" %}
12536 
12537   ins_cost(INSN_COST);
12538   ins_encode %{
12539     __ eor(as_Register($dst$$reg),
12540            as_Register($src1$$reg),
12541            as_Register($src2$$reg));
12542   %}
12543 
12544   ins_pipe(ialu_reg_reg);
12545 %}
12546 
12547 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12548   match(Set dst (XorL src1 src2));
12549 
12550   ins_cost(INSN_COST);
12551   format %{ "eor  $dst, $src1, $src2\t# int" %}
12552 
12553   ins_encode %{
12554     __ eor(as_Register($dst$$reg),
12555            as_Register($src1$$reg),
12556            (unsigned long)($src2$$constant));
12557   %}
12558 
12559   ins_pipe(ialu_reg_imm);
12560 %}
12561 
12562 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12563 %{
12564   match(Set dst (ConvI2L src));
12565 
12566   ins_cost(INSN_COST);
12567   format %{ "sxtw  $dst, $src\t# i2l" %}
12568   ins_encode %{
12569     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12570   %}
12571   ins_pipe(ialu_reg_shift);
12572 %}
12573 
12574 // this pattern occurs in bigmath arithmetic
12575 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12576 %{
12577   match(Set dst (AndL (ConvI2L src) mask));
12578 
12579   ins_cost(INSN_COST);
12580   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12581   ins_encode %{
12582     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12583   %}
12584 
12585   ins_pipe(ialu_reg_shift);
12586 %}
12587 
12588 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12589   match(Set dst (ConvL2I src));
12590 
12591   ins_cost(INSN_COST);
12592   format %{ "movw  $dst, $src \t// l2i" %}
12593 
12594   ins_encode %{
12595     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12596   %}
12597 
12598   ins_pipe(ialu_reg);
12599 %}
12600 
12601 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12602 %{
12603   match(Set dst (Conv2B src));
12604   effect(KILL cr);
12605 
12606   format %{
12607     "cmpw $src, zr\n\t"
12608     "cset $dst, ne"
12609   %}
12610 
12611   ins_encode %{
12612     __ cmpw(as_Register($src$$reg), zr);
12613     __ cset(as_Register($dst$$reg), Assembler::NE);
12614   %}
12615 
12616   ins_pipe(ialu_reg);
12617 %}
12618 
12619 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12620 %{
12621   match(Set dst (Conv2B src));
12622   effect(KILL cr);
12623 
12624   format %{
12625     "cmp  $src, zr\n\t"
12626     "cset $dst, ne"
12627   %}
12628 
12629   ins_encode %{
12630     __ cmp(as_Register($src$$reg), zr);
12631     __ cset(as_Register($dst$$reg), Assembler::NE);
12632   %}
12633 
12634   ins_pipe(ialu_reg);
12635 %}
12636 
12637 instruct convD2F_reg(vRegF dst, vRegD src) %{
12638   match(Set dst (ConvD2F src));
12639 
12640   ins_cost(INSN_COST * 5);
12641   format %{ "fcvtd  $dst, $src \t// d2f" %}
12642 
12643   ins_encode %{
12644     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12645   %}
12646 
12647   ins_pipe(fp_d2f);
12648 %}
12649 
12650 instruct convF2D_reg(vRegD dst, vRegF src) %{
12651   match(Set dst (ConvF2D src));
12652 
12653   ins_cost(INSN_COST * 5);
12654   format %{ "fcvts  $dst, $src \t// f2d" %}
12655 
12656   ins_encode %{
12657     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12658   %}
12659 
12660   ins_pipe(fp_f2d);
12661 %}
12662 
12663 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12664   match(Set dst (ConvF2I src));
12665 
12666   ins_cost(INSN_COST * 5);
12667   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
12668 
12669   ins_encode %{
12670     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12671   %}
12672 
12673   ins_pipe(fp_f2i);
12674 %}
12675 
12676 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
12677   match(Set dst (ConvF2L src));
12678 
12679   ins_cost(INSN_COST * 5);
12680   format %{ "fcvtzs  $dst, $src \t// f2l" %}
12681 
12682   ins_encode %{
12683     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12684   %}
12685 
12686   ins_pipe(fp_f2l);
12687 %}
12688 
12689 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
12690   match(Set dst (ConvI2F src));
12691 
12692   ins_cost(INSN_COST * 5);
12693   format %{ "scvtfws  $dst, $src \t// i2f" %}
12694 
12695   ins_encode %{
12696     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12697   %}
12698 
12699   ins_pipe(fp_i2f);
12700 %}
12701 
12702 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
12703   match(Set dst (ConvL2F src));
12704 
12705   ins_cost(INSN_COST * 5);
12706   format %{ "scvtfs  $dst, $src \t// l2f" %}
12707 
12708   ins_encode %{
12709     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12710   %}
12711 
12712   ins_pipe(fp_l2f);
12713 %}
12714 
12715 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
12716   match(Set dst (ConvD2I src));
12717 
12718   ins_cost(INSN_COST * 5);
12719   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
12720 
12721   ins_encode %{
12722     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12723   %}
12724 
12725   ins_pipe(fp_d2i);
12726 %}
12727 
12728 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12729   match(Set dst (ConvD2L src));
12730 
12731   ins_cost(INSN_COST * 5);
12732   format %{ "fcvtzd  $dst, $src \t// d2l" %}
12733 
12734   ins_encode %{
12735     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12736   %}
12737 
12738   ins_pipe(fp_d2l);
12739 %}
12740 
12741 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
12742   match(Set dst (ConvI2D src));
12743 
12744   ins_cost(INSN_COST * 5);
12745   format %{ "scvtfwd  $dst, $src \t// i2d" %}
12746 
12747   ins_encode %{
12748     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12749   %}
12750 
12751   ins_pipe(fp_i2d);
12752 %}
12753 
12754 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
12755   match(Set dst (ConvL2D src));
12756 
12757   ins_cost(INSN_COST * 5);
12758   format %{ "scvtfd  $dst, $src \t// l2d" %}
12759 
12760   ins_encode %{
12761     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12762   %}
12763 
12764   ins_pipe(fp_l2d);
12765 %}
12766 
12767 // stack <-> reg and reg <-> reg shuffles with no conversion
12768 
12769 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
12770 
12771   match(Set dst (MoveF2I src));
12772 
12773   effect(DEF dst, USE src);
12774 
12775   ins_cost(4 * INSN_COST);
12776 
12777   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
12778 
12779   ins_encode %{
12780     __ ldrw($dst$$Register, Address(sp, $src$$disp));
12781   %}
12782 
12783   ins_pipe(iload_reg_reg);
12784 
12785 %}
12786 
12787 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
12788 
12789   match(Set dst (MoveI2F src));
12790 
12791   effect(DEF dst, USE src);
12792 
12793   ins_cost(4 * INSN_COST);
12794 
12795   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
12796 
12797   ins_encode %{
12798     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12799   %}
12800 
12801   ins_pipe(pipe_class_memory);
12802 
12803 %}
12804 
12805 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
12806 
12807   match(Set dst (MoveD2L src));
12808 
12809   effect(DEF dst, USE src);
12810 
12811   ins_cost(4 * INSN_COST);
12812 
12813   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
12814 
12815   ins_encode %{
12816     __ ldr($dst$$Register, Address(sp, $src$$disp));
12817   %}
12818 
12819   ins_pipe(iload_reg_reg);
12820 
12821 %}
12822 
12823 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
12824 
12825   match(Set dst (MoveL2D src));
12826 
12827   effect(DEF dst, USE src);
12828 
12829   ins_cost(4 * INSN_COST);
12830 
12831   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
12832 
12833   ins_encode %{
12834     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12835   %}
12836 
12837   ins_pipe(pipe_class_memory);
12838 
12839 %}
12840 
12841 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
12842 
12843   match(Set dst (MoveF2I src));
12844 
12845   effect(DEF dst, USE src);
12846 
12847   ins_cost(INSN_COST);
12848 
12849   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
12850 
12851   ins_encode %{
12852     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12853   %}
12854 
12855   ins_pipe(pipe_class_memory);
12856 
12857 %}
12858 
12859 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
12860 
12861   match(Set dst (MoveI2F src));
12862 
12863   effect(DEF dst, USE src);
12864 
12865   ins_cost(INSN_COST);
12866 
12867   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
12868 
12869   ins_encode %{
12870     __ strw($src$$Register, Address(sp, $dst$$disp));
12871   %}
12872 
12873   ins_pipe(istore_reg_reg);
12874 
12875 %}
12876 
12877 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
12878 
12879   match(Set dst (MoveD2L src));
12880 
12881   effect(DEF dst, USE src);
12882 
12883   ins_cost(INSN_COST);
12884 
12885   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
12886 
12887   ins_encode %{
12888     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12889   %}
12890 
12891   ins_pipe(pipe_class_memory);
12892 
12893 %}
12894 
12895 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
12896 
12897   match(Set dst (MoveL2D src));
12898 
12899   effect(DEF dst, USE src);
12900 
12901   ins_cost(INSN_COST);
12902 
12903   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
12904 
12905   ins_encode %{
12906     __ str($src$$Register, Address(sp, $dst$$disp));
12907   %}
12908 
12909   ins_pipe(istore_reg_reg);
12910 
12911 %}
12912 
12913 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12914 
12915   match(Set dst (MoveF2I src));
12916 
12917   effect(DEF dst, USE src);
12918 
12919   ins_cost(INSN_COST);
12920 
12921   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
12922 
12923   ins_encode %{
12924     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
12925   %}
12926 
12927   ins_pipe(fp_f2i);
12928 
12929 %}
12930 
12931 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
12932 
12933   match(Set dst (MoveI2F src));
12934 
12935   effect(DEF dst, USE src);
12936 
12937   ins_cost(INSN_COST);
12938 
12939   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
12940 
12941   ins_encode %{
12942     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
12943   %}
12944 
12945   ins_pipe(fp_i2f);
12946 
12947 %}
12948 
12949 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12950 
12951   match(Set dst (MoveD2L src));
12952 
12953   effect(DEF dst, USE src);
12954 
12955   ins_cost(INSN_COST);
12956 
12957   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
12958 
12959   ins_encode %{
12960     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
12961   %}
12962 
12963   ins_pipe(fp_d2l);
12964 
12965 %}
12966 
12967 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
12968 
12969   match(Set dst (MoveL2D src));
12970 
12971   effect(DEF dst, USE src);
12972 
12973   ins_cost(INSN_COST);
12974 
12975   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
12976 
12977   ins_encode %{
12978     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
12979   %}
12980 
12981   ins_pipe(fp_l2d);
12982 
12983 %}
12984 
12985 // ============================================================================
12986 // clearing of an array
12987 
12988 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
12989 %{
12990   match(Set dummy (ClearArray cnt base));
12991   effect(USE_KILL cnt, USE_KILL base);
12992 
12993   ins_cost(4 * INSN_COST);
12994   format %{ "ClearArray $cnt, $base" %}
12995 
12996   ins_encode %{
12997     __ zero_words($base$$Register, $cnt$$Register);
12998   %}
12999 
13000   ins_pipe(pipe_class_memory);
13001 %}
13002 
13003 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13004 %{
13005   predicate((u_int64_t)n->in(2)->get_long()
13006             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13007   match(Set dummy (ClearArray cnt base));
13008   effect(USE_KILL base);
13009 
13010   ins_cost(4 * INSN_COST);
13011   format %{ "ClearArray $cnt, $base" %}
13012 
13013   ins_encode %{
13014     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13015   %}
13016 
13017   ins_pipe(pipe_class_memory);
13018 %}
13019 
13020 // ============================================================================
13021 // Overflow Math Instructions
13022 
13023 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13024 %{
13025   match(Set cr (OverflowAddI op1 op2));
13026 
13027   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13028   ins_cost(INSN_COST);
13029   ins_encode %{
13030     __ cmnw($op1$$Register, $op2$$Register);
13031   %}
13032 
13033   ins_pipe(icmp_reg_reg);
13034 %}
13035 
13036 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13037 %{
13038   match(Set cr (OverflowAddI op1 op2));
13039 
13040   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13041   ins_cost(INSN_COST);
13042   ins_encode %{
13043     __ cmnw($op1$$Register, $op2$$constant);
13044   %}
13045 
13046   ins_pipe(icmp_reg_imm);
13047 %}
13048 
13049 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13050 %{
13051   match(Set cr (OverflowAddL op1 op2));
13052 
13053   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13054   ins_cost(INSN_COST);
13055   ins_encode %{
13056     __ cmn($op1$$Register, $op2$$Register);
13057   %}
13058 
13059   ins_pipe(icmp_reg_reg);
13060 %}
13061 
13062 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13063 %{
13064   match(Set cr (OverflowAddL op1 op2));
13065 
13066   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13067   ins_cost(INSN_COST);
13068   ins_encode %{
13069     __ cmn($op1$$Register, $op2$$constant);
13070   %}
13071 
13072   ins_pipe(icmp_reg_imm);
13073 %}
13074 
13075 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13076 %{
13077   match(Set cr (OverflowSubI op1 op2));
13078 
13079   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13080   ins_cost(INSN_COST);
13081   ins_encode %{
13082     __ cmpw($op1$$Register, $op2$$Register);
13083   %}
13084 
13085   ins_pipe(icmp_reg_reg);
13086 %}
13087 
13088 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13089 %{
13090   match(Set cr (OverflowSubI op1 op2));
13091 
13092   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13093   ins_cost(INSN_COST);
13094   ins_encode %{
13095     __ cmpw($op1$$Register, $op2$$constant);
13096   %}
13097 
13098   ins_pipe(icmp_reg_imm);
13099 %}
13100 
13101 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13102 %{
13103   match(Set cr (OverflowSubL op1 op2));
13104 
13105   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13106   ins_cost(INSN_COST);
13107   ins_encode %{
13108     __ cmp($op1$$Register, $op2$$Register);
13109   %}
13110 
13111   ins_pipe(icmp_reg_reg);
13112 %}
13113 
13114 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13115 %{
13116   match(Set cr (OverflowSubL op1 op2));
13117 
13118   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13119   ins_cost(INSN_COST);
13120   ins_encode %{
13121     __ subs(zr, $op1$$Register, $op2$$constant);
13122   %}
13123 
13124   ins_pipe(icmp_reg_imm);
13125 %}
13126 
13127 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13128 %{
13129   match(Set cr (OverflowSubI zero op1));
13130 
13131   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13132   ins_cost(INSN_COST);
13133   ins_encode %{
13134     __ cmpw(zr, $op1$$Register);
13135   %}
13136 
13137   ins_pipe(icmp_reg_imm);
13138 %}
13139 
13140 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13141 %{
13142   match(Set cr (OverflowSubL zero op1));
13143 
13144   format %{ "cmp   zr, $op1\t# overflow check long" %}
13145   ins_cost(INSN_COST);
13146   ins_encode %{
13147     __ cmp(zr, $op1$$Register);
13148   %}
13149 
13150   ins_pipe(icmp_reg_imm);
13151 %}
13152 
13153 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13154 %{
13155   match(Set cr (OverflowMulI op1 op2));
13156 
13157   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13158             "cmp   rscratch1, rscratch1, sxtw\n\t"
13159             "movw  rscratch1, #0x80000000\n\t"
13160             "cselw rscratch1, rscratch1, zr, NE\n\t"
13161             "cmpw  rscratch1, #1" %}
13162   ins_cost(5 * INSN_COST);
13163   ins_encode %{
13164     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13165     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13166     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13167     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13168     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13169   %}
13170 
13171   ins_pipe(pipe_slow);
13172 %}
13173 
13174 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13175 %{
13176   match(If cmp (OverflowMulI op1 op2));
13177   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13178             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13179   effect(USE labl, KILL cr);
13180 
13181   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13182             "cmp   rscratch1, rscratch1, sxtw\n\t"
13183             "b$cmp   $labl" %}
13184   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13185   ins_encode %{
13186     Label* L = $labl$$label;
13187     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13188     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13189     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13190     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13191   %}
13192 
13193   ins_pipe(pipe_serial);
13194 %}
13195 
13196 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13197 %{
13198   match(Set cr (OverflowMulL op1 op2));
13199 
13200   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13201             "smulh rscratch2, $op1, $op2\n\t"
13202             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13203             "movw  rscratch1, #0x80000000\n\t"
13204             "cselw rscratch1, rscratch1, zr, NE\n\t"
13205             "cmpw  rscratch1, #1" %}
13206   ins_cost(6 * INSN_COST);
13207   ins_encode %{
13208     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13209     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13210     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13211     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13212     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13213     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13214   %}
13215 
13216   ins_pipe(pipe_slow);
13217 %}
13218 
13219 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13220 %{
13221   match(If cmp (OverflowMulL op1 op2));
13222   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13223             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13224   effect(USE labl, KILL cr);
13225 
13226   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13227             "smulh rscratch2, $op1, $op2\n\t"
13228             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13229             "b$cmp $labl" %}
13230   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13231   ins_encode %{
13232     Label* L = $labl$$label;
13233     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13234     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13235     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13236     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13237     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13238   %}
13239 
13240   ins_pipe(pipe_serial);
13241 %}
13242 
13243 // ============================================================================
13244 // Compare Instructions
13245 
13246 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13247 %{
13248   match(Set cr (CmpI op1 op2));
13249 
13250   effect(DEF cr, USE op1, USE op2);
13251 
13252   ins_cost(INSN_COST);
13253   format %{ "cmpw  $op1, $op2" %}
13254 
13255   ins_encode(aarch64_enc_cmpw(op1, op2));
13256 
13257   ins_pipe(icmp_reg_reg);
13258 %}
13259 
13260 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13261 %{
13262   match(Set cr (CmpI op1 zero));
13263 
13264   effect(DEF cr, USE op1);
13265 
13266   ins_cost(INSN_COST);
13267   format %{ "cmpw $op1, 0" %}
13268 
13269   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13270 
13271   ins_pipe(icmp_reg_imm);
13272 %}
13273 
13274 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13275 %{
13276   match(Set cr (CmpI op1 op2));
13277 
13278   effect(DEF cr, USE op1);
13279 
13280   ins_cost(INSN_COST);
13281   format %{ "cmpw  $op1, $op2" %}
13282 
13283   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13284 
13285   ins_pipe(icmp_reg_imm);
13286 %}
13287 
13288 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13289 %{
13290   match(Set cr (CmpI op1 op2));
13291 
13292   effect(DEF cr, USE op1);
13293 
13294   ins_cost(INSN_COST * 2);
13295   format %{ "cmpw  $op1, $op2" %}
13296 
13297   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13298 
13299   ins_pipe(icmp_reg_imm);
13300 %}
13301 
13302 // Unsigned compare Instructions; really, same as signed compare
13303 // except it should only be used to feed an If or a CMovI which takes a
13304 // cmpOpU.
13305 
13306 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13307 %{
13308   match(Set cr (CmpU op1 op2));
13309 
13310   effect(DEF cr, USE op1, USE op2);
13311 
13312   ins_cost(INSN_COST);
13313   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13314 
13315   ins_encode(aarch64_enc_cmpw(op1, op2));
13316 
13317   ins_pipe(icmp_reg_reg);
13318 %}
13319 
13320 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13321 %{
13322   match(Set cr (CmpU op1 zero));
13323 
13324   effect(DEF cr, USE op1);
13325 
13326   ins_cost(INSN_COST);
13327   format %{ "cmpw $op1, #0\t# unsigned" %}
13328 
13329   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13330 
13331   ins_pipe(icmp_reg_imm);
13332 %}
13333 
13334 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13335 %{
13336   match(Set cr (CmpU op1 op2));
13337 
13338   effect(DEF cr, USE op1);
13339 
13340   ins_cost(INSN_COST);
13341   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13342 
13343   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13344 
13345   ins_pipe(icmp_reg_imm);
13346 %}
13347 
13348 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13349 %{
13350   match(Set cr (CmpU op1 op2));
13351 
13352   effect(DEF cr, USE op1);
13353 
13354   ins_cost(INSN_COST * 2);
13355   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13356 
13357   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13358 
13359   ins_pipe(icmp_reg_imm);
13360 %}
13361 
13362 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13363 %{
13364   match(Set cr (CmpL op1 op2));
13365 
13366   effect(DEF cr, USE op1, USE op2);
13367 
13368   ins_cost(INSN_COST);
13369   format %{ "cmp  $op1, $op2" %}
13370 
13371   ins_encode(aarch64_enc_cmp(op1, op2));
13372 
13373   ins_pipe(icmp_reg_reg);
13374 %}
13375 
13376 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
13377 %{
13378   match(Set cr (CmpL op1 zero));
13379 
13380   effect(DEF cr, USE op1);
13381 
13382   ins_cost(INSN_COST);
13383   format %{ "tst  $op1" %}
13384 
13385   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13386 
13387   ins_pipe(icmp_reg_imm);
13388 %}
13389 
13390 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13391 %{
13392   match(Set cr (CmpL op1 op2));
13393 
13394   effect(DEF cr, USE op1);
13395 
13396   ins_cost(INSN_COST);
13397   format %{ "cmp  $op1, $op2" %}
13398 
13399   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13400 
13401   ins_pipe(icmp_reg_imm);
13402 %}
13403 
13404 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13405 %{
13406   match(Set cr (CmpL op1 op2));
13407 
13408   effect(DEF cr, USE op1);
13409 
13410   ins_cost(INSN_COST * 2);
13411   format %{ "cmp  $op1, $op2" %}
13412 
13413   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13414 
13415   ins_pipe(icmp_reg_imm);
13416 %}
13417 
13418 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
13419 %{
13420   match(Set cr (CmpUL op1 op2));
13421 
13422   effect(DEF cr, USE op1, USE op2);
13423 
13424   ins_cost(INSN_COST);
13425   format %{ "cmp  $op1, $op2" %}
13426 
13427   ins_encode(aarch64_enc_cmp(op1, op2));
13428 
13429   ins_pipe(icmp_reg_reg);
13430 %}
13431 
13432 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
13433 %{
13434   match(Set cr (CmpUL op1 zero));
13435 
13436   effect(DEF cr, USE op1);
13437 
13438   ins_cost(INSN_COST);
13439   format %{ "tst  $op1" %}
13440 
13441   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13442 
13443   ins_pipe(icmp_reg_imm);
13444 %}
13445 
13446 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
13447 %{
13448   match(Set cr (CmpUL op1 op2));
13449 
13450   effect(DEF cr, USE op1);
13451 
13452   ins_cost(INSN_COST);
13453   format %{ "cmp  $op1, $op2" %}
13454 
13455   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13456 
13457   ins_pipe(icmp_reg_imm);
13458 %}
13459 
13460 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
13461 %{
13462   match(Set cr (CmpUL op1 op2));
13463 
13464   effect(DEF cr, USE op1);
13465 
13466   ins_cost(INSN_COST * 2);
13467   format %{ "cmp  $op1, $op2" %}
13468 
13469   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13470 
13471   ins_pipe(icmp_reg_imm);
13472 %}
13473 
13474 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13475 %{
13476   match(Set cr (CmpP op1 op2));
13477 
13478   effect(DEF cr, USE op1, USE op2);
13479 
13480   ins_cost(INSN_COST);
13481   format %{ "cmp  $op1, $op2\t // ptr" %}
13482 
13483   ins_encode(aarch64_enc_cmpp(op1, op2));
13484 
13485   ins_pipe(icmp_reg_reg);
13486 %}
13487 
13488 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13489 %{
13490   match(Set cr (CmpN op1 op2));
13491 
13492   effect(DEF cr, USE op1, USE op2);
13493 
13494   ins_cost(INSN_COST);
13495   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13496 
13497   ins_encode(aarch64_enc_cmpn(op1, op2));
13498 
13499   ins_pipe(icmp_reg_reg);
13500 %}
13501 
13502 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13503 %{
13504   match(Set cr (CmpP op1 zero));
13505 
13506   effect(DEF cr, USE op1, USE zero);
13507 
13508   ins_cost(INSN_COST);
13509   format %{ "cmp  $op1, 0\t // ptr" %}
13510 
13511   ins_encode(aarch64_enc_testp(op1));
13512 
13513   ins_pipe(icmp_reg_imm);
13514 %}
13515 
13516 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13517 %{
13518   match(Set cr (CmpN op1 zero));
13519 
13520   effect(DEF cr, USE op1, USE zero);
13521 
13522   ins_cost(INSN_COST);
13523   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13524 
13525   ins_encode(aarch64_enc_testn(op1));
13526 
13527   ins_pipe(icmp_reg_imm);
13528 %}
13529 
13530 // FP comparisons
13531 //
13532 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13533 // using normal cmpOp. See declaration of rFlagsReg for details.
13534 
13535 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13536 %{
13537   match(Set cr (CmpF src1 src2));
13538 
13539   ins_cost(3 * INSN_COST);
13540   format %{ "fcmps $src1, $src2" %}
13541 
13542   ins_encode %{
13543     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13544   %}
13545 
13546   ins_pipe(pipe_class_compare);
13547 %}
13548 
13549 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13550 %{
13551   match(Set cr (CmpF src1 src2));
13552 
13553   ins_cost(3 * INSN_COST);
13554   format %{ "fcmps $src1, 0.0" %}
13555 
13556   ins_encode %{
13557     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13558   %}
13559 
13560   ins_pipe(pipe_class_compare);
13561 %}
13562 // FROM HERE
13563 
13564 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13565 %{
13566   match(Set cr (CmpD src1 src2));
13567 
13568   ins_cost(3 * INSN_COST);
13569   format %{ "fcmpd $src1, $src2" %}
13570 
13571   ins_encode %{
13572     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13573   %}
13574 
13575   ins_pipe(pipe_class_compare);
13576 %}
13577 
13578 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13579 %{
13580   match(Set cr (CmpD src1 src2));
13581 
13582   ins_cost(3 * INSN_COST);
13583   format %{ "fcmpd $src1, 0.0" %}
13584 
13585   ins_encode %{
13586     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13587   %}
13588 
13589   ins_pipe(pipe_class_compare);
13590 %}
13591 
13592 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13593 %{
13594   match(Set dst (CmpF3 src1 src2));
13595   effect(KILL cr);
13596 
13597   ins_cost(5 * INSN_COST);
13598   format %{ "fcmps $src1, $src2\n\t"
13599             "csinvw($dst, zr, zr, eq\n\t"
13600             "csnegw($dst, $dst, $dst, lt)"
13601   %}
13602 
13603   ins_encode %{
13604     Label done;
13605     FloatRegister s1 = as_FloatRegister($src1$$reg);
13606     FloatRegister s2 = as_FloatRegister($src2$$reg);
13607     Register d = as_Register($dst$$reg);
13608     __ fcmps(s1, s2);
13609     // installs 0 if EQ else -1
13610     __ csinvw(d, zr, zr, Assembler::EQ);
13611     // keeps -1 if less or unordered else installs 1
13612     __ csnegw(d, d, d, Assembler::LT);
13613     __ bind(done);
13614   %}
13615 
13616   ins_pipe(pipe_class_default);
13617 
13618 %}
13619 
13620 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13621 %{
13622   match(Set dst (CmpD3 src1 src2));
13623   effect(KILL cr);
13624 
13625   ins_cost(5 * INSN_COST);
13626   format %{ "fcmpd $src1, $src2\n\t"
13627             "csinvw($dst, zr, zr, eq\n\t"
13628             "csnegw($dst, $dst, $dst, lt)"
13629   %}
13630 
13631   ins_encode %{
13632     Label done;
13633     FloatRegister s1 = as_FloatRegister($src1$$reg);
13634     FloatRegister s2 = as_FloatRegister($src2$$reg);
13635     Register d = as_Register($dst$$reg);
13636     __ fcmpd(s1, s2);
13637     // installs 0 if EQ else -1
13638     __ csinvw(d, zr, zr, Assembler::EQ);
13639     // keeps -1 if less or unordered else installs 1
13640     __ csnegw(d, d, d, Assembler::LT);
13641     __ bind(done);
13642   %}
13643   ins_pipe(pipe_class_default);
13644 
13645 %}
13646 
13647 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13648 %{
13649   match(Set dst (CmpF3 src1 zero));
13650   effect(KILL cr);
13651 
13652   ins_cost(5 * INSN_COST);
13653   format %{ "fcmps $src1, 0.0\n\t"
13654             "csinvw($dst, zr, zr, eq\n\t"
13655             "csnegw($dst, $dst, $dst, lt)"
13656   %}
13657 
13658   ins_encode %{
13659     Label done;
13660     FloatRegister s1 = as_FloatRegister($src1$$reg);
13661     Register d = as_Register($dst$$reg);
13662     __ fcmps(s1, 0.0D);
13663     // installs 0 if EQ else -1
13664     __ csinvw(d, zr, zr, Assembler::EQ);
13665     // keeps -1 if less or unordered else installs 1
13666     __ csnegw(d, d, d, Assembler::LT);
13667     __ bind(done);
13668   %}
13669 
13670   ins_pipe(pipe_class_default);
13671 
13672 %}
13673 
13674 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13675 %{
13676   match(Set dst (CmpD3 src1 zero));
13677   effect(KILL cr);
13678 
13679   ins_cost(5 * INSN_COST);
13680   format %{ "fcmpd $src1, 0.0\n\t"
13681             "csinvw($dst, zr, zr, eq\n\t"
13682             "csnegw($dst, $dst, $dst, lt)"
13683   %}
13684 
13685   ins_encode %{
13686     Label done;
13687     FloatRegister s1 = as_FloatRegister($src1$$reg);
13688     Register d = as_Register($dst$$reg);
13689     __ fcmpd(s1, 0.0D);
13690     // installs 0 if EQ else -1
13691     __ csinvw(d, zr, zr, Assembler::EQ);
13692     // keeps -1 if less or unordered else installs 1
13693     __ csnegw(d, d, d, Assembler::LT);
13694     __ bind(done);
13695   %}
13696   ins_pipe(pipe_class_default);
13697 
13698 %}
13699 
13700 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13701 %{
13702   match(Set dst (CmpLTMask p q));
13703   effect(KILL cr);
13704 
13705   ins_cost(3 * INSN_COST);
13706 
13707   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
13708             "csetw $dst, lt\n\t"
13709             "subw $dst, zr, $dst"
13710   %}
13711 
13712   ins_encode %{
13713     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
13714     __ csetw(as_Register($dst$$reg), Assembler::LT);
13715     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
13716   %}
13717 
13718   ins_pipe(ialu_reg_reg);
13719 %}
13720 
13721 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
13722 %{
13723   match(Set dst (CmpLTMask src zero));
13724   effect(KILL cr);
13725 
13726   ins_cost(INSN_COST);
13727 
13728   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
13729 
13730   ins_encode %{
13731     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
13732   %}
13733 
13734   ins_pipe(ialu_reg_shift);
13735 %}
13736 
13737 // ============================================================================
13738 // Max and Min
13739 
13740 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13741 %{
13742   match(Set dst (MinI src1 src2));
13743 
13744   effect(DEF dst, USE src1, USE src2, KILL cr);
13745   size(8);
13746 
13747   ins_cost(INSN_COST * 3);
13748   format %{
13749     "cmpw $src1 $src2\t signed int\n\t"
13750     "cselw $dst, $src1, $src2 lt\t"
13751   %}
13752 
13753   ins_encode %{
13754     __ cmpw(as_Register($src1$$reg),
13755             as_Register($src2$$reg));
13756     __ cselw(as_Register($dst$$reg),
13757              as_Register($src1$$reg),
13758              as_Register($src2$$reg),
13759              Assembler::LT);
13760   %}
13761 
13762   ins_pipe(ialu_reg_reg);
13763 %}
13764 // FROM HERE
13765 
13766 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13767 %{
13768   match(Set dst (MaxI src1 src2));
13769 
13770   effect(DEF dst, USE src1, USE src2, KILL cr);
13771   size(8);
13772 
13773   ins_cost(INSN_COST * 3);
13774   format %{
13775     "cmpw $src1 $src2\t signed int\n\t"
13776     "cselw $dst, $src1, $src2 gt\t"
13777   %}
13778 
13779   ins_encode %{
13780     __ cmpw(as_Register($src1$$reg),
13781             as_Register($src2$$reg));
13782     __ cselw(as_Register($dst$$reg),
13783              as_Register($src1$$reg),
13784              as_Register($src2$$reg),
13785              Assembler::GT);
13786   %}
13787 
13788   ins_pipe(ialu_reg_reg);
13789 %}
13790 
13791 // ============================================================================
13792 // Branch Instructions
13793 
13794 // Direct Branch.
13795 instruct branch(label lbl)
13796 %{
13797   match(Goto);
13798 
13799   effect(USE lbl);
13800 
13801   ins_cost(BRANCH_COST);
13802   format %{ "b  $lbl" %}
13803 
13804   ins_encode(aarch64_enc_b(lbl));
13805 
13806   ins_pipe(pipe_branch);
13807 %}
13808 
13809 // Conditional Near Branch
13810 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
13811 %{
13812   // Same match rule as `branchConFar'.
13813   match(If cmp cr);
13814 
13815   effect(USE lbl);
13816 
13817   ins_cost(BRANCH_COST);
13818   // If set to 1 this indicates that the current instruction is a
13819   // short variant of a long branch. This avoids using this
13820   // instruction in first-pass matching. It will then only be used in
13821   // the `Shorten_branches' pass.
13822   // ins_short_branch(1);
13823   format %{ "b$cmp  $lbl" %}
13824 
13825   ins_encode(aarch64_enc_br_con(cmp, lbl));
13826 
13827   ins_pipe(pipe_branch_cond);
13828 %}
13829 
13830 // Conditional Near Branch Unsigned
13831 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13832 %{
13833   // Same match rule as `branchConFar'.
13834   match(If cmp cr);
13835 
13836   effect(USE lbl);
13837 
13838   ins_cost(BRANCH_COST);
13839   // If set to 1 this indicates that the current instruction is a
13840   // short variant of a long branch. This avoids using this
13841   // instruction in first-pass matching. It will then only be used in
13842   // the `Shorten_branches' pass.
13843   // ins_short_branch(1);
13844   format %{ "b$cmp  $lbl\t# unsigned" %}
13845 
13846   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13847 
13848   ins_pipe(pipe_branch_cond);
13849 %}
13850 
13851 // Make use of CBZ and CBNZ.  These instructions, as well as being
13852 // shorter than (cmp; branch), have the additional benefit of not
13853 // killing the flags.
13854 
13855 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
13856   match(If cmp (CmpI op1 op2));
13857   effect(USE labl);
13858 
13859   ins_cost(BRANCH_COST);
13860   format %{ "cbw$cmp   $op1, $labl" %}
13861   ins_encode %{
13862     Label* L = $labl$$label;
13863     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13864     if (cond == Assembler::EQ)
13865       __ cbzw($op1$$Register, *L);
13866     else
13867       __ cbnzw($op1$$Register, *L);
13868   %}
13869   ins_pipe(pipe_cmp_branch);
13870 %}
13871 
13872 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
13873   match(If cmp (CmpL op1 op2));
13874   effect(USE labl);
13875 
13876   ins_cost(BRANCH_COST);
13877   format %{ "cb$cmp   $op1, $labl" %}
13878   ins_encode %{
13879     Label* L = $labl$$label;
13880     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13881     if (cond == Assembler::EQ)
13882       __ cbz($op1$$Register, *L);
13883     else
13884       __ cbnz($op1$$Register, *L);
13885   %}
13886   ins_pipe(pipe_cmp_branch);
13887 %}
13888 
13889 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
13890   match(If cmp (CmpP op1 op2));
13891   effect(USE labl);
13892 
13893   ins_cost(BRANCH_COST);
13894   format %{ "cb$cmp   $op1, $labl" %}
13895   ins_encode %{
13896     Label* L = $labl$$label;
13897     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13898     if (cond == Assembler::EQ)
13899       __ cbz($op1$$Register, *L);
13900     else
13901       __ cbnz($op1$$Register, *L);
13902   %}
13903   ins_pipe(pipe_cmp_branch);
13904 %}
13905 
13906 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
13907   match(If cmp (CmpN op1 op2));
13908   effect(USE labl);
13909 
13910   ins_cost(BRANCH_COST);
13911   format %{ "cbw$cmp   $op1, $labl" %}
13912   ins_encode %{
13913     Label* L = $labl$$label;
13914     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13915     if (cond == Assembler::EQ)
13916       __ cbzw($op1$$Register, *L);
13917     else
13918       __ cbnzw($op1$$Register, *L);
13919   %}
13920   ins_pipe(pipe_cmp_branch);
13921 %}
13922 
13923 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
13924   match(If cmp (CmpP (DecodeN oop) zero));
13925   effect(USE labl);
13926 
13927   ins_cost(BRANCH_COST);
13928   format %{ "cb$cmp   $oop, $labl" %}
13929   ins_encode %{
13930     Label* L = $labl$$label;
13931     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13932     if (cond == Assembler::EQ)
13933       __ cbzw($oop$$Register, *L);
13934     else
13935       __ cbnzw($oop$$Register, *L);
13936   %}
13937   ins_pipe(pipe_cmp_branch);
13938 %}
13939 
13940 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
13941   match(If cmp (CmpU op1 op2));
13942   effect(USE labl);
13943 
13944   ins_cost(BRANCH_COST);
13945   format %{ "cbw$cmp   $op1, $labl" %}
13946   ins_encode %{
13947     Label* L = $labl$$label;
13948     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13949     if (cond == Assembler::EQ || cond == Assembler::LS)
13950       __ cbzw($op1$$Register, *L);
13951     else
13952       __ cbnzw($op1$$Register, *L);
13953   %}
13954   ins_pipe(pipe_cmp_branch);
13955 %}
13956 
13957 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
13958   match(If cmp (CmpUL op1 op2));
13959   effect(USE labl);
13960 
13961   ins_cost(BRANCH_COST);
13962   format %{ "cb$cmp   $op1, $labl" %}
13963   ins_encode %{
13964     Label* L = $labl$$label;
13965     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13966     if (cond == Assembler::EQ || cond == Assembler::LS)
13967       __ cbz($op1$$Register, *L);
13968     else
13969       __ cbnz($op1$$Register, *L);
13970   %}
13971   ins_pipe(pipe_cmp_branch);
13972 %}
13973 
13974 // Test bit and Branch
13975 
13976 // Patterns for short (< 32KiB) variants
13977 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
13978   match(If cmp (CmpL op1 op2));
13979   effect(USE labl);
13980 
13981   ins_cost(BRANCH_COST);
13982   format %{ "cb$cmp   $op1, $labl # long" %}
13983   ins_encode %{
13984     Label* L = $labl$$label;
13985     Assembler::Condition cond =
13986       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
13987     __ tbr(cond, $op1$$Register, 63, *L);
13988   %}
13989   ins_pipe(pipe_cmp_branch);
13990   ins_short_branch(1);
13991 %}
13992 
13993 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
13994   match(If cmp (CmpI op1 op2));
13995   effect(USE labl);
13996 
13997   ins_cost(BRANCH_COST);
13998   format %{ "cb$cmp   $op1, $labl # int" %}
13999   ins_encode %{
14000     Label* L = $labl$$label;
14001     Assembler::Condition cond =
14002       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14003     __ tbr(cond, $op1$$Register, 31, *L);
14004   %}
14005   ins_pipe(pipe_cmp_branch);
14006   ins_short_branch(1);
14007 %}
14008 
14009 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14010   match(If cmp (CmpL (AndL op1 op2) op3));
14011   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14012   effect(USE labl);
14013 
14014   ins_cost(BRANCH_COST);
14015   format %{ "tb$cmp   $op1, $op2, $labl" %}
14016   ins_encode %{
14017     Label* L = $labl$$label;
14018     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14019     int bit = exact_log2($op2$$constant);
14020     __ tbr(cond, $op1$$Register, bit, *L);
14021   %}
14022   ins_pipe(pipe_cmp_branch);
14023   ins_short_branch(1);
14024 %}
14025 
14026 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14027   match(If cmp (CmpI (AndI op1 op2) op3));
14028   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14029   effect(USE labl);
14030 
14031   ins_cost(BRANCH_COST);
14032   format %{ "tb$cmp   $op1, $op2, $labl" %}
14033   ins_encode %{
14034     Label* L = $labl$$label;
14035     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14036     int bit = exact_log2($op2$$constant);
14037     __ tbr(cond, $op1$$Register, bit, *L);
14038   %}
14039   ins_pipe(pipe_cmp_branch);
14040   ins_short_branch(1);
14041 %}
14042 
14043 // And far variants
14044 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14045   match(If cmp (CmpL op1 op2));
14046   effect(USE labl);
14047 
14048   ins_cost(BRANCH_COST);
14049   format %{ "cb$cmp   $op1, $labl # long" %}
14050   ins_encode %{
14051     Label* L = $labl$$label;
14052     Assembler::Condition cond =
14053       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14054     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14055   %}
14056   ins_pipe(pipe_cmp_branch);
14057 %}
14058 
14059 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14060   match(If cmp (CmpI op1 op2));
14061   effect(USE labl);
14062 
14063   ins_cost(BRANCH_COST);
14064   format %{ "cb$cmp   $op1, $labl # int" %}
14065   ins_encode %{
14066     Label* L = $labl$$label;
14067     Assembler::Condition cond =
14068       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14069     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14070   %}
14071   ins_pipe(pipe_cmp_branch);
14072 %}
14073 
14074 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14075   match(If cmp (CmpL (AndL op1 op2) op3));
14076   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14077   effect(USE labl);
14078 
14079   ins_cost(BRANCH_COST);
14080   format %{ "tb$cmp   $op1, $op2, $labl" %}
14081   ins_encode %{
14082     Label* L = $labl$$label;
14083     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14084     int bit = exact_log2($op2$$constant);
14085     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14086   %}
14087   ins_pipe(pipe_cmp_branch);
14088 %}
14089 
14090 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14091   match(If cmp (CmpI (AndI op1 op2) op3));
14092   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14093   effect(USE labl);
14094 
14095   ins_cost(BRANCH_COST);
14096   format %{ "tb$cmp   $op1, $op2, $labl" %}
14097   ins_encode %{
14098     Label* L = $labl$$label;
14099     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14100     int bit = exact_log2($op2$$constant);
14101     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14102   %}
14103   ins_pipe(pipe_cmp_branch);
14104 %}
14105 
14106 // Test bits
14107 
14108 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14109   match(Set cr (CmpL (AndL op1 op2) op3));
14110   predicate(Assembler::operand_valid_for_logical_immediate
14111             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14112 
14113   ins_cost(INSN_COST);
14114   format %{ "tst $op1, $op2 # long" %}
14115   ins_encode %{
14116     __ tst($op1$$Register, $op2$$constant);
14117   %}
14118   ins_pipe(ialu_reg_reg);
14119 %}
14120 
14121 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14122   match(Set cr (CmpI (AndI op1 op2) op3));
14123   predicate(Assembler::operand_valid_for_logical_immediate
14124             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14125 
14126   ins_cost(INSN_COST);
14127   format %{ "tst $op1, $op2 # int" %}
14128   ins_encode %{
14129     __ tstw($op1$$Register, $op2$$constant);
14130   %}
14131   ins_pipe(ialu_reg_reg);
14132 %}
14133 
14134 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14135   match(Set cr (CmpL (AndL op1 op2) op3));
14136 
14137   ins_cost(INSN_COST);
14138   format %{ "tst $op1, $op2 # long" %}
14139   ins_encode %{
14140     __ tst($op1$$Register, $op2$$Register);
14141   %}
14142   ins_pipe(ialu_reg_reg);
14143 %}
14144 
14145 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14146   match(Set cr (CmpI (AndI op1 op2) op3));
14147 
14148   ins_cost(INSN_COST);
14149   format %{ "tstw $op1, $op2 # int" %}
14150   ins_encode %{
14151     __ tstw($op1$$Register, $op2$$Register);
14152   %}
14153   ins_pipe(ialu_reg_reg);
14154 %}
14155 
14156 
14157 // Conditional Far Branch
14158 // Conditional Far Branch Unsigned
14159 // TODO: fixme
14160 
14161 // counted loop end branch near
14162 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14163 %{
14164   match(CountedLoopEnd cmp cr);
14165 
14166   effect(USE lbl);
14167 
14168   ins_cost(BRANCH_COST);
14169   // short variant.
14170   // ins_short_branch(1);
14171   format %{ "b$cmp $lbl \t// counted loop end" %}
14172 
14173   ins_encode(aarch64_enc_br_con(cmp, lbl));
14174 
14175   ins_pipe(pipe_branch);
14176 %}
14177 
14178 // counted loop end branch near Unsigned
14179 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14180 %{
14181   match(CountedLoopEnd cmp cr);
14182 
14183   effect(USE lbl);
14184 
14185   ins_cost(BRANCH_COST);
14186   // short variant.
14187   // ins_short_branch(1);
14188   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14189 
14190   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14191 
14192   ins_pipe(pipe_branch);
14193 %}
14194 
14195 // counted loop end branch far
14196 // counted loop end branch far unsigned
14197 // TODO: fixme
14198 
14199 // ============================================================================
14200 // inlined locking and unlocking
14201 
14202 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14203 %{
14204   match(Set cr (FastLock object box));
14205   effect(TEMP tmp, TEMP tmp2);
14206 
14207   // TODO
14208   // identify correct cost
14209   ins_cost(5 * INSN_COST);
14210   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14211 
14212   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14213 
14214   ins_pipe(pipe_serial);
14215 %}
14216 
14217 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14218 %{
14219   match(Set cr (FastUnlock object box));
14220   effect(TEMP tmp, TEMP tmp2);
14221 
14222   ins_cost(5 * INSN_COST);
14223   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14224 
14225   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14226 
14227   ins_pipe(pipe_serial);
14228 %}
14229 
14230 
14231 // ============================================================================
14232 // Safepoint Instructions
14233 
14234 // TODO
14235 // provide a near and far version of this code
14236 
14237 instruct safePoint(iRegP poll)
14238 %{
14239   match(SafePoint poll);
14240 
14241   format %{
14242     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14243   %}
14244   ins_encode %{
14245     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14246   %}
14247   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14248 %}
14249 
14250 
14251 // ============================================================================
14252 // Procedure Call/Return Instructions
14253 
14254 // Call Java Static Instruction
14255 
14256 instruct CallStaticJavaDirect(method meth)
14257 %{
14258   match(CallStaticJava);
14259 
14260   effect(USE meth);
14261 
14262   ins_cost(CALL_COST);
14263 
14264   format %{ "call,static $meth \t// ==> " %}
14265 
14266   ins_encode( aarch64_enc_java_static_call(meth),
14267               aarch64_enc_call_epilog );
14268 
14269   ins_pipe(pipe_class_call);
14270 %}
14271 
14272 // TO HERE
14273 
14274 // Call Java Dynamic Instruction
14275 instruct CallDynamicJavaDirect(method meth)
14276 %{
14277   match(CallDynamicJava);
14278 
14279   effect(USE meth);
14280 
14281   ins_cost(CALL_COST);
14282 
14283   format %{ "CALL,dynamic $meth \t// ==> " %}
14284 
14285   ins_encode( aarch64_enc_java_dynamic_call(meth),
14286                aarch64_enc_call_epilog );
14287 
14288   ins_pipe(pipe_class_call);
14289 %}
14290 
14291 // Call Runtime Instruction
14292 
14293 instruct CallRuntimeDirect(method meth)
14294 %{
14295   match(CallRuntime);
14296 
14297   effect(USE meth);
14298 
14299   ins_cost(CALL_COST);
14300 
14301   format %{ "CALL, runtime $meth" %}
14302 
14303   ins_encode( aarch64_enc_java_to_runtime(meth) );
14304 
14305   ins_pipe(pipe_class_call);
14306 %}
14307 
14308 // Call Runtime Instruction
14309 
14310 instruct CallLeafDirect(method meth)
14311 %{
14312   match(CallLeaf);
14313 
14314   effect(USE meth);
14315 
14316   ins_cost(CALL_COST);
14317 
14318   format %{ "CALL, runtime leaf $meth" %}
14319 
14320   ins_encode( aarch64_enc_java_to_runtime(meth) );
14321 
14322   ins_pipe(pipe_class_call);
14323 %}
14324 
14325 // Call Runtime Instruction
14326 
14327 instruct CallLeafNoFPDirect(method meth)
14328 %{
14329   match(CallLeafNoFP);
14330 
14331   effect(USE meth);
14332 
14333   ins_cost(CALL_COST);
14334 
14335   format %{ "CALL, runtime leaf nofp $meth" %}
14336 
14337   ins_encode( aarch64_enc_java_to_runtime(meth) );
14338 
14339   ins_pipe(pipe_class_call);
14340 %}
14341 
14342 // Tail Call; Jump from runtime stub to Java code.
14343 // Also known as an 'interprocedural jump'.
14344 // Target of jump will eventually return to caller.
14345 // TailJump below removes the return address.
14346 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14347 %{
14348   match(TailCall jump_target method_oop);
14349 
14350   ins_cost(CALL_COST);
14351 
14352   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14353 
14354   ins_encode(aarch64_enc_tail_call(jump_target));
14355 
14356   ins_pipe(pipe_class_call);
14357 %}
14358 
14359 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14360 %{
14361   match(TailJump jump_target ex_oop);
14362 
14363   ins_cost(CALL_COST);
14364 
14365   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14366 
14367   ins_encode(aarch64_enc_tail_jmp(jump_target));
14368 
14369   ins_pipe(pipe_class_call);
14370 %}
14371 
14372 // Create exception oop: created by stack-crawling runtime code.
14373 // Created exception is now available to this handler, and is setup
14374 // just prior to jumping to this handler. No code emitted.
14375 // TODO check
14376 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14377 instruct CreateException(iRegP_R0 ex_oop)
14378 %{
14379   match(Set ex_oop (CreateEx));
14380 
14381   format %{ " -- \t// exception oop; no code emitted" %}
14382 
14383   size(0);
14384 
14385   ins_encode( /*empty*/ );
14386 
14387   ins_pipe(pipe_class_empty);
14388 %}
14389 
14390 // Rethrow exception: The exception oop will come in the first
14391 // argument position. Then JUMP (not call) to the rethrow stub code.
14392 instruct RethrowException() %{
14393   match(Rethrow);
14394   ins_cost(CALL_COST);
14395 
14396   format %{ "b rethrow_stub" %}
14397 
14398   ins_encode( aarch64_enc_rethrow() );
14399 
14400   ins_pipe(pipe_class_call);
14401 %}
14402 
14403 
14404 // Return Instruction
14405 // epilog node loads ret address into lr as part of frame pop
14406 instruct Ret()
14407 %{
14408   match(Return);
14409 
14410   format %{ "ret\t// return register" %}
14411 
14412   ins_encode( aarch64_enc_ret() );
14413 
14414   ins_pipe(pipe_branch);
14415 %}
14416 
14417 // Die now.
14418 instruct ShouldNotReachHere() %{
14419   match(Halt);
14420 
14421   ins_cost(CALL_COST);
14422   format %{ "ShouldNotReachHere" %}
14423 
14424   ins_encode %{
14425     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
14426     // return true
14427     __ dpcs1(0xdead + 1);
14428   %}
14429 
14430   ins_pipe(pipe_class_default);
14431 %}
14432 
14433 // ============================================================================
14434 // Partial Subtype Check
14435 //
14436 // superklass array for an instance of the superklass.  Set a hidden
14437 // internal cache on a hit (cache is checked with exposed code in
14438 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14439 // encoding ALSO sets flags.
14440 
14441 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14442 %{
14443   match(Set result (PartialSubtypeCheck sub super));
14444   effect(KILL cr, KILL temp);
14445 
14446   ins_cost(1100);  // slightly larger than the next version
14447   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14448 
14449   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14450 
14451   opcode(0x1); // Force zero of result reg on hit
14452 
14453   ins_pipe(pipe_class_memory);
14454 %}
14455 
14456 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14457 %{
14458   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14459   effect(KILL temp, KILL result);
14460 
14461   ins_cost(1100);  // slightly larger than the next version
14462   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14463 
14464   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14465 
14466   opcode(0x0); // Don't zero result reg on hit
14467 
14468   ins_pipe(pipe_class_memory);
14469 %}
14470 
14471 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14472                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14473 %{
14474   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
14475   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14476   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14477 
14478   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14479   ins_encode %{
14480     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14481     __ string_compare($str1$$Register, $str2$$Register,
14482                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14483                       $tmp1$$Register, $tmp2$$Register,
14484                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
14485   %}
14486   ins_pipe(pipe_class_memory);
14487 %}
14488 
14489 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14490                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14491 %{
14492   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
14493   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14494   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14495 
14496   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14497   ins_encode %{
14498     __ string_compare($str1$$Register, $str2$$Register,
14499                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14500                       $tmp1$$Register, $tmp2$$Register,
14501                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
14502   %}
14503   ins_pipe(pipe_class_memory);
14504 %}
14505 
14506 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14507                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14508                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14509 %{
14510   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
14511   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14512   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14513          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14514 
14515   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14516   ins_encode %{
14517     __ string_compare($str1$$Register, $str2$$Register,
14518                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14519                       $tmp1$$Register, $tmp2$$Register,
14520                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14521                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
14522   %}
14523   ins_pipe(pipe_class_memory);
14524 %}
14525 
14526 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14527                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14528                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14529 %{
14530   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
14531   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14532   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14533          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14534 
14535   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14536   ins_encode %{
14537     __ string_compare($str1$$Register, $str2$$Register,
14538                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14539                       $tmp1$$Register, $tmp2$$Register,
14540                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14541                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
14542   %}
14543   ins_pipe(pipe_class_memory);
14544 %}
14545 
14546 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14547        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14548        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14549 %{
14550   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14551   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14552   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14553          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14554   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
14555 
14556   ins_encode %{
14557     __ string_indexof($str1$$Register, $str2$$Register,
14558                       $cnt1$$Register, $cnt2$$Register,
14559                       $tmp1$$Register, $tmp2$$Register,
14560                       $tmp3$$Register, $tmp4$$Register,
14561                       $tmp5$$Register, $tmp6$$Register,
14562                       -1, $result$$Register, StrIntrinsicNode::UU);
14563   %}
14564   ins_pipe(pipe_class_memory);
14565 %}
14566 
14567 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14568        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14569        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14570 %{
14571   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
14572   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14573   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14574          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14575   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
14576 
14577   ins_encode %{
14578     __ string_indexof($str1$$Register, $str2$$Register,
14579                       $cnt1$$Register, $cnt2$$Register,
14580                       $tmp1$$Register, $tmp2$$Register,
14581                       $tmp3$$Register, $tmp4$$Register,
14582                       $tmp5$$Register, $tmp6$$Register,
14583                       -1, $result$$Register, StrIntrinsicNode::LL);
14584   %}
14585   ins_pipe(pipe_class_memory);
14586 %}
14587 
14588 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14589        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14590        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14591 %{
14592   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
14593   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14594   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14595          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14596   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
14597 
14598   ins_encode %{
14599     __ string_indexof($str1$$Register, $str2$$Register,
14600                       $cnt1$$Register, $cnt2$$Register,
14601                       $tmp1$$Register, $tmp2$$Register,
14602                       $tmp3$$Register, $tmp4$$Register,
14603                       $tmp5$$Register, $tmp6$$Register,
14604                       -1, $result$$Register, StrIntrinsicNode::UL);
14605   %}
14606   ins_pipe(pipe_class_memory);
14607 %}
14608 
14609 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14610                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14611                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14612 %{
14613   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14614   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14615   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14616          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14617   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
14618 
14619   ins_encode %{
14620     int icnt2 = (int)$int_cnt2$$constant;
14621     __ string_indexof($str1$$Register, $str2$$Register,
14622                       $cnt1$$Register, zr,
14623                       $tmp1$$Register, $tmp2$$Register,
14624                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14625                       icnt2, $result$$Register, StrIntrinsicNode::UU);
14626   %}
14627   ins_pipe(pipe_class_memory);
14628 %}
14629 
14630 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14631                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14632                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14633 %{
14634   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
14635   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14636   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14637          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14638   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
14639 
14640   ins_encode %{
14641     int icnt2 = (int)$int_cnt2$$constant;
14642     __ string_indexof($str1$$Register, $str2$$Register,
14643                       $cnt1$$Register, zr,
14644                       $tmp1$$Register, $tmp2$$Register,
14645                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14646                       icnt2, $result$$Register, StrIntrinsicNode::LL);
14647   %}
14648   ins_pipe(pipe_class_memory);
14649 %}
14650 
14651 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14652                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14653                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14654 %{
14655   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
14656   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14657   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14658          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14659   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
14660 
14661   ins_encode %{
14662     int icnt2 = (int)$int_cnt2$$constant;
14663     __ string_indexof($str1$$Register, $str2$$Register,
14664                       $cnt1$$Register, zr,
14665                       $tmp1$$Register, $tmp2$$Register,
14666                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14667                       icnt2, $result$$Register, StrIntrinsicNode::UL);
14668   %}
14669   ins_pipe(pipe_class_memory);
14670 %}
14671 
14672 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
14673                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14674                               iRegINoSp tmp3, rFlagsReg cr)
14675 %{
14676   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
14677   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
14678          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
14679 
14680   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
14681 
14682   ins_encode %{
14683     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
14684                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
14685                            $tmp3$$Register);
14686   %}
14687   ins_pipe(pipe_class_memory);
14688 %}
14689 
14690 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14691                         iRegI_R0 result, rFlagsReg cr)
14692 %{
14693   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
14694   match(Set result (StrEquals (Binary str1 str2) cnt));
14695   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14696 
14697   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
14698   ins_encode %{
14699     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14700     __ string_equals($str1$$Register, $str2$$Register,
14701                      $result$$Register, $cnt$$Register, 1);
14702   %}
14703   ins_pipe(pipe_class_memory);
14704 %}
14705 
14706 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14707                         iRegI_R0 result, rFlagsReg cr)
14708 %{
14709   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
14710   match(Set result (StrEquals (Binary str1 str2) cnt));
14711   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14712 
14713   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
14714   ins_encode %{
14715     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14716     __ string_equals($str1$$Register, $str2$$Register,
14717                      $result$$Register, $cnt$$Register, 2);
14718   %}
14719   ins_pipe(pipe_class_memory);
14720 %}
14721 
14722 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14723                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
14724                        iRegP_R10 tmp, rFlagsReg cr)
14725 %{
14726   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
14727   match(Set result (AryEq ary1 ary2));
14728   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
14729 
14730   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14731   ins_encode %{
14732     __ arrays_equals($ary1$$Register, $ary2$$Register,
14733                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
14734                      $result$$Register, $tmp$$Register, 1);
14735     %}
14736   ins_pipe(pipe_class_memory);
14737 %}
14738 
14739 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14740                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
14741                        iRegP_R10 tmp, rFlagsReg cr)
14742 %{
14743   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
14744   match(Set result (AryEq ary1 ary2));
14745   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
14746 
14747   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14748   ins_encode %{
14749     __ arrays_equals($ary1$$Register, $ary2$$Register,
14750                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
14751                      $result$$Register, $tmp$$Register, 2);
14752   %}
14753   ins_pipe(pipe_class_memory);
14754 %}
14755 
14756 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
14757 %{
14758   match(Set result (HasNegatives ary1 len));
14759   effect(USE_KILL ary1, USE_KILL len, KILL cr);
14760   format %{ "has negatives byte[] $ary1,$len -> $result" %}
14761   ins_encode %{
14762     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
14763   %}
14764   ins_pipe( pipe_slow );
14765 %}
14766 
14767 // fast char[] to byte[] compression
14768 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14769                          vRegD_V0 tmp1, vRegD_V1 tmp2,
14770                          vRegD_V2 tmp3, vRegD_V3 tmp4,
14771                          iRegI_R0 result, rFlagsReg cr)
14772 %{
14773   match(Set result (StrCompressedCopy src (Binary dst len)));
14774   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
14775 
14776   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
14777   ins_encode %{
14778     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
14779                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
14780                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
14781                            $result$$Register);
14782   %}
14783   ins_pipe( pipe_slow );
14784 %}
14785 
14786 // fast byte[] to char[] inflation
14787 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
14788                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
14789 %{
14790   match(Set dummy (StrInflatedCopy src (Binary dst len)));
14791   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
14792 
14793   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
14794   ins_encode %{
14795     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
14796                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
14797   %}
14798   ins_pipe(pipe_class_memory);
14799 %}
14800 
14801 // encode char[] to byte[] in ISO_8859_1
14802 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14803                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
14804                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
14805                           iRegI_R0 result, rFlagsReg cr)
14806 %{
14807   match(Set result (EncodeISOArray src (Binary dst len)));
14808   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
14809          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
14810 
14811   format %{ "Encode array $src,$dst,$len -> $result" %}
14812   ins_encode %{
14813     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
14814          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
14815          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
14816   %}
14817   ins_pipe( pipe_class_memory );
14818 %}
14819 
14820 // ============================================================================
14821 // This name is KNOWN by the ADLC and cannot be changed.
14822 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
14823 // for this guy.
14824 instruct tlsLoadP(thread_RegP dst)
14825 %{
14826   match(Set dst (ThreadLocal));
14827 
14828   ins_cost(0);
14829 
14830   format %{ " -- \t// $dst=Thread::current(), empty" %}
14831 
14832   size(0);
14833 
14834   ins_encode( /*empty*/ );
14835 
14836   ins_pipe(pipe_class_empty);
14837 %}
14838 
14839 // ====================VECTOR INSTRUCTIONS=====================================
14840 
14841 // Load vector (32 bits)
14842 instruct loadV4(vecD dst, vmem4 mem)
14843 %{
14844   predicate(n->as_LoadVector()->memory_size() == 4);
14845   match(Set dst (LoadVector mem));
14846   ins_cost(4 * INSN_COST);
14847   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
14848   ins_encode( aarch64_enc_ldrvS(dst, mem) );
14849   ins_pipe(vload_reg_mem64);
14850 %}
14851 
14852 // Load vector (64 bits)
14853 instruct loadV8(vecD dst, vmem8 mem)
14854 %{
14855   predicate(n->as_LoadVector()->memory_size() == 8);
14856   match(Set dst (LoadVector mem));
14857   ins_cost(4 * INSN_COST);
14858   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
14859   ins_encode( aarch64_enc_ldrvD(dst, mem) );
14860   ins_pipe(vload_reg_mem64);
14861 %}
14862 
14863 // Load Vector (128 bits)
14864 instruct loadV16(vecX dst, vmem16 mem)
14865 %{
14866   predicate(n->as_LoadVector()->memory_size() == 16);
14867   match(Set dst (LoadVector mem));
14868   ins_cost(4 * INSN_COST);
14869   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
14870   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
14871   ins_pipe(vload_reg_mem128);
14872 %}
14873 
14874 // Store Vector (32 bits)
14875 instruct storeV4(vecD src, vmem4 mem)
14876 %{
14877   predicate(n->as_StoreVector()->memory_size() == 4);
14878   match(Set mem (StoreVector mem src));
14879   ins_cost(4 * INSN_COST);
14880   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
14881   ins_encode( aarch64_enc_strvS(src, mem) );
14882   ins_pipe(vstore_reg_mem64);
14883 %}
14884 
14885 // Store Vector (64 bits)
14886 instruct storeV8(vecD src, vmem8 mem)
14887 %{
14888   predicate(n->as_StoreVector()->memory_size() == 8);
14889   match(Set mem (StoreVector mem src));
14890   ins_cost(4 * INSN_COST);
14891   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
14892   ins_encode( aarch64_enc_strvD(src, mem) );
14893   ins_pipe(vstore_reg_mem64);
14894 %}
14895 
14896 // Store Vector (128 bits)
14897 instruct storeV16(vecX src, vmem16 mem)
14898 %{
14899   predicate(n->as_StoreVector()->memory_size() == 16);
14900   match(Set mem (StoreVector mem src));
14901   ins_cost(4 * INSN_COST);
14902   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
14903   ins_encode( aarch64_enc_strvQ(src, mem) );
14904   ins_pipe(vstore_reg_mem128);
14905 %}
14906 
14907 instruct replicate8B(vecD dst, iRegIorL2I src)
14908 %{
14909   predicate(n->as_Vector()->length() == 4 ||
14910             n->as_Vector()->length() == 8);
14911   match(Set dst (ReplicateB src));
14912   ins_cost(INSN_COST);
14913   format %{ "dup  $dst, $src\t# vector (8B)" %}
14914   ins_encode %{
14915     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
14916   %}
14917   ins_pipe(vdup_reg_reg64);
14918 %}
14919 
14920 instruct replicate16B(vecX dst, iRegIorL2I src)
14921 %{
14922   predicate(n->as_Vector()->length() == 16);
14923   match(Set dst (ReplicateB src));
14924   ins_cost(INSN_COST);
14925   format %{ "dup  $dst, $src\t# vector (16B)" %}
14926   ins_encode %{
14927     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
14928   %}
14929   ins_pipe(vdup_reg_reg128);
14930 %}
14931 
14932 instruct replicate8B_imm(vecD dst, immI con)
14933 %{
14934   predicate(n->as_Vector()->length() == 4 ||
14935             n->as_Vector()->length() == 8);
14936   match(Set dst (ReplicateB con));
14937   ins_cost(INSN_COST);
14938   format %{ "movi  $dst, $con\t# vector(8B)" %}
14939   ins_encode %{
14940     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
14941   %}
14942   ins_pipe(vmovi_reg_imm64);
14943 %}
14944 
14945 instruct replicate16B_imm(vecX dst, immI con)
14946 %{
14947   predicate(n->as_Vector()->length() == 16);
14948   match(Set dst (ReplicateB con));
14949   ins_cost(INSN_COST);
14950   format %{ "movi  $dst, $con\t# vector(16B)" %}
14951   ins_encode %{
14952     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
14953   %}
14954   ins_pipe(vmovi_reg_imm128);
14955 %}
14956 
14957 instruct replicate4S(vecD dst, iRegIorL2I src)
14958 %{
14959   predicate(n->as_Vector()->length() == 2 ||
14960             n->as_Vector()->length() == 4);
14961   match(Set dst (ReplicateS src));
14962   ins_cost(INSN_COST);
14963   format %{ "dup  $dst, $src\t# vector (4S)" %}
14964   ins_encode %{
14965     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
14966   %}
14967   ins_pipe(vdup_reg_reg64);
14968 %}
14969 
14970 instruct replicate8S(vecX dst, iRegIorL2I src)
14971 %{
14972   predicate(n->as_Vector()->length() == 8);
14973   match(Set dst (ReplicateS src));
14974   ins_cost(INSN_COST);
14975   format %{ "dup  $dst, $src\t# vector (8S)" %}
14976   ins_encode %{
14977     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
14978   %}
14979   ins_pipe(vdup_reg_reg128);
14980 %}
14981 
14982 instruct replicate4S_imm(vecD dst, immI con)
14983 %{
14984   predicate(n->as_Vector()->length() == 2 ||
14985             n->as_Vector()->length() == 4);
14986   match(Set dst (ReplicateS con));
14987   ins_cost(INSN_COST);
14988   format %{ "movi  $dst, $con\t# vector(4H)" %}
14989   ins_encode %{
14990     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
14991   %}
14992   ins_pipe(vmovi_reg_imm64);
14993 %}
14994 
14995 instruct replicate8S_imm(vecX dst, immI con)
14996 %{
14997   predicate(n->as_Vector()->length() == 8);
14998   match(Set dst (ReplicateS con));
14999   ins_cost(INSN_COST);
15000   format %{ "movi  $dst, $con\t# vector(8H)" %}
15001   ins_encode %{
15002     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15003   %}
15004   ins_pipe(vmovi_reg_imm128);
15005 %}
15006 
15007 instruct replicate2I(vecD dst, iRegIorL2I src)
15008 %{
15009   predicate(n->as_Vector()->length() == 2);
15010   match(Set dst (ReplicateI src));
15011   ins_cost(INSN_COST);
15012   format %{ "dup  $dst, $src\t# vector (2I)" %}
15013   ins_encode %{
15014     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15015   %}
15016   ins_pipe(vdup_reg_reg64);
15017 %}
15018 
15019 instruct replicate4I(vecX dst, iRegIorL2I src)
15020 %{
15021   predicate(n->as_Vector()->length() == 4);
15022   match(Set dst (ReplicateI src));
15023   ins_cost(INSN_COST);
15024   format %{ "dup  $dst, $src\t# vector (4I)" %}
15025   ins_encode %{
15026     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15027   %}
15028   ins_pipe(vdup_reg_reg128);
15029 %}
15030 
15031 instruct replicate2I_imm(vecD dst, immI con)
15032 %{
15033   predicate(n->as_Vector()->length() == 2);
15034   match(Set dst (ReplicateI con));
15035   ins_cost(INSN_COST);
15036   format %{ "movi  $dst, $con\t# vector(2I)" %}
15037   ins_encode %{
15038     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15039   %}
15040   ins_pipe(vmovi_reg_imm64);
15041 %}
15042 
15043 instruct replicate4I_imm(vecX dst, immI con)
15044 %{
15045   predicate(n->as_Vector()->length() == 4);
15046   match(Set dst (ReplicateI con));
15047   ins_cost(INSN_COST);
15048   format %{ "movi  $dst, $con\t# vector(4I)" %}
15049   ins_encode %{
15050     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15051   %}
15052   ins_pipe(vmovi_reg_imm128);
15053 %}
15054 
15055 instruct replicate2L(vecX dst, iRegL src)
15056 %{
15057   predicate(n->as_Vector()->length() == 2);
15058   match(Set dst (ReplicateL src));
15059   ins_cost(INSN_COST);
15060   format %{ "dup  $dst, $src\t# vector (2L)" %}
15061   ins_encode %{
15062     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15063   %}
15064   ins_pipe(vdup_reg_reg128);
15065 %}
15066 
15067 instruct replicate2L_zero(vecX dst, immI0 zero)
15068 %{
15069   predicate(n->as_Vector()->length() == 2);
15070   match(Set dst (ReplicateI zero));
15071   ins_cost(INSN_COST);
15072   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15073   ins_encode %{
15074     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15075            as_FloatRegister($dst$$reg),
15076            as_FloatRegister($dst$$reg));
15077   %}
15078   ins_pipe(vmovi_reg_imm128);
15079 %}
15080 
15081 instruct replicate2F(vecD dst, vRegF src)
15082 %{
15083   predicate(n->as_Vector()->length() == 2);
15084   match(Set dst (ReplicateF src));
15085   ins_cost(INSN_COST);
15086   format %{ "dup  $dst, $src\t# vector (2F)" %}
15087   ins_encode %{
15088     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15089            as_FloatRegister($src$$reg));
15090   %}
15091   ins_pipe(vdup_reg_freg64);
15092 %}
15093 
15094 instruct replicate4F(vecX dst, vRegF src)
15095 %{
15096   predicate(n->as_Vector()->length() == 4);
15097   match(Set dst (ReplicateF src));
15098   ins_cost(INSN_COST);
15099   format %{ "dup  $dst, $src\t# vector (4F)" %}
15100   ins_encode %{
15101     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15102            as_FloatRegister($src$$reg));
15103   %}
15104   ins_pipe(vdup_reg_freg128);
15105 %}
15106 
15107 instruct replicate2D(vecX dst, vRegD src)
15108 %{
15109   predicate(n->as_Vector()->length() == 2);
15110   match(Set dst (ReplicateD src));
15111   ins_cost(INSN_COST);
15112   format %{ "dup  $dst, $src\t# vector (2D)" %}
15113   ins_encode %{
15114     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15115            as_FloatRegister($src$$reg));
15116   %}
15117   ins_pipe(vdup_reg_dreg128);
15118 %}
15119 
15120 // ====================REDUCTION ARITHMETIC====================================
15121 
15122 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15123 %{
15124   match(Set dst (AddReductionVI src1 src2));
15125   ins_cost(INSN_COST);
15126   effect(TEMP tmp, TEMP tmp2);
15127   format %{ "umov  $tmp, $src2, S, 0\n\t"
15128             "umov  $tmp2, $src2, S, 1\n\t"
15129             "addw  $dst, $src1, $tmp\n\t"
15130             "addw  $dst, $dst, $tmp2\t add reduction2i"
15131   %}
15132   ins_encode %{
15133     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15134     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15135     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15136     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15137   %}
15138   ins_pipe(pipe_class_default);
15139 %}
15140 
15141 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15142 %{
15143   match(Set dst (AddReductionVI src1 src2));
15144   ins_cost(INSN_COST);
15145   effect(TEMP tmp, TEMP tmp2);
15146   format %{ "addv  $tmp, T4S, $src2\n\t"
15147             "umov  $tmp2, $tmp, S, 0\n\t"
15148             "addw  $dst, $tmp2, $src1\t add reduction4i"
15149   %}
15150   ins_encode %{
15151     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15152             as_FloatRegister($src2$$reg));
15153     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15154     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15155   %}
15156   ins_pipe(pipe_class_default);
15157 %}
15158 
15159 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15160 %{
15161   match(Set dst (MulReductionVI src1 src2));
15162   ins_cost(INSN_COST);
15163   effect(TEMP tmp, TEMP dst);
15164   format %{ "umov  $tmp, $src2, S, 0\n\t"
15165             "mul   $dst, $tmp, $src1\n\t"
15166             "umov  $tmp, $src2, S, 1\n\t"
15167             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15168   %}
15169   ins_encode %{
15170     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15171     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15172     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15173     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15174   %}
15175   ins_pipe(pipe_class_default);
15176 %}
15177 
15178 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15179 %{
15180   match(Set dst (MulReductionVI src1 src2));
15181   ins_cost(INSN_COST);
15182   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15183   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15184             "mul   $tmp, $tmp, $src2\n\t"
15185             "umov  $tmp2, $tmp, S, 0\n\t"
15186             "mul   $dst, $tmp2, $src1\n\t"
15187             "umov  $tmp2, $tmp, S, 1\n\t"
15188             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15189   %}
15190   ins_encode %{
15191     __ ins(as_FloatRegister($tmp$$reg), __ D,
15192            as_FloatRegister($src2$$reg), 0, 1);
15193     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15194            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15195     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15196     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15197     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15198     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15199   %}
15200   ins_pipe(pipe_class_default);
15201 %}
15202 
15203 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15204 %{
15205   match(Set dst (AddReductionVF src1 src2));
15206   ins_cost(INSN_COST);
15207   effect(TEMP tmp, TEMP dst);
15208   format %{ "fadds $dst, $src1, $src2\n\t"
15209             "ins   $tmp, S, $src2, 0, 1\n\t"
15210             "fadds $dst, $dst, $tmp\t add reduction2f"
15211   %}
15212   ins_encode %{
15213     __ fadds(as_FloatRegister($dst$$reg),
15214              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15215     __ ins(as_FloatRegister($tmp$$reg), __ S,
15216            as_FloatRegister($src2$$reg), 0, 1);
15217     __ fadds(as_FloatRegister($dst$$reg),
15218              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15219   %}
15220   ins_pipe(pipe_class_default);
15221 %}
15222 
15223 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15224 %{
15225   match(Set dst (AddReductionVF src1 src2));
15226   ins_cost(INSN_COST);
15227   effect(TEMP tmp, TEMP dst);
15228   format %{ "fadds $dst, $src1, $src2\n\t"
15229             "ins   $tmp, S, $src2, 0, 1\n\t"
15230             "fadds $dst, $dst, $tmp\n\t"
15231             "ins   $tmp, S, $src2, 0, 2\n\t"
15232             "fadds $dst, $dst, $tmp\n\t"
15233             "ins   $tmp, S, $src2, 0, 3\n\t"
15234             "fadds $dst, $dst, $tmp\t add reduction4f"
15235   %}
15236   ins_encode %{
15237     __ fadds(as_FloatRegister($dst$$reg),
15238              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15239     __ ins(as_FloatRegister($tmp$$reg), __ S,
15240            as_FloatRegister($src2$$reg), 0, 1);
15241     __ fadds(as_FloatRegister($dst$$reg),
15242              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15243     __ ins(as_FloatRegister($tmp$$reg), __ S,
15244            as_FloatRegister($src2$$reg), 0, 2);
15245     __ fadds(as_FloatRegister($dst$$reg),
15246              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15247     __ ins(as_FloatRegister($tmp$$reg), __ S,
15248            as_FloatRegister($src2$$reg), 0, 3);
15249     __ fadds(as_FloatRegister($dst$$reg),
15250              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15251   %}
15252   ins_pipe(pipe_class_default);
15253 %}
15254 
15255 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15256 %{
15257   match(Set dst (MulReductionVF src1 src2));
15258   ins_cost(INSN_COST);
15259   effect(TEMP tmp, TEMP dst);
15260   format %{ "fmuls $dst, $src1, $src2\n\t"
15261             "ins   $tmp, S, $src2, 0, 1\n\t"
15262             "fmuls $dst, $dst, $tmp\t add reduction4f"
15263   %}
15264   ins_encode %{
15265     __ fmuls(as_FloatRegister($dst$$reg),
15266              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15267     __ ins(as_FloatRegister($tmp$$reg), __ S,
15268            as_FloatRegister($src2$$reg), 0, 1);
15269     __ fmuls(as_FloatRegister($dst$$reg),
15270              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15271   %}
15272   ins_pipe(pipe_class_default);
15273 %}
15274 
15275 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15276 %{
15277   match(Set dst (MulReductionVF src1 src2));
15278   ins_cost(INSN_COST);
15279   effect(TEMP tmp, TEMP dst);
15280   format %{ "fmuls $dst, $src1, $src2\n\t"
15281             "ins   $tmp, S, $src2, 0, 1\n\t"
15282             "fmuls $dst, $dst, $tmp\n\t"
15283             "ins   $tmp, S, $src2, 0, 2\n\t"
15284             "fmuls $dst, $dst, $tmp\n\t"
15285             "ins   $tmp, S, $src2, 0, 3\n\t"
15286             "fmuls $dst, $dst, $tmp\t add reduction4f"
15287   %}
15288   ins_encode %{
15289     __ fmuls(as_FloatRegister($dst$$reg),
15290              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15291     __ ins(as_FloatRegister($tmp$$reg), __ S,
15292            as_FloatRegister($src2$$reg), 0, 1);
15293     __ fmuls(as_FloatRegister($dst$$reg),
15294              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15295     __ ins(as_FloatRegister($tmp$$reg), __ S,
15296            as_FloatRegister($src2$$reg), 0, 2);
15297     __ fmuls(as_FloatRegister($dst$$reg),
15298              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15299     __ ins(as_FloatRegister($tmp$$reg), __ S,
15300            as_FloatRegister($src2$$reg), 0, 3);
15301     __ fmuls(as_FloatRegister($dst$$reg),
15302              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15303   %}
15304   ins_pipe(pipe_class_default);
15305 %}
15306 
15307 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15308 %{
15309   match(Set dst (AddReductionVD src1 src2));
15310   ins_cost(INSN_COST);
15311   effect(TEMP tmp, TEMP dst);
15312   format %{ "faddd $dst, $src1, $src2\n\t"
15313             "ins   $tmp, D, $src2, 0, 1\n\t"
15314             "faddd $dst, $dst, $tmp\t add reduction2d"
15315   %}
15316   ins_encode %{
15317     __ faddd(as_FloatRegister($dst$$reg),
15318              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15319     __ ins(as_FloatRegister($tmp$$reg), __ D,
15320            as_FloatRegister($src2$$reg), 0, 1);
15321     __ faddd(as_FloatRegister($dst$$reg),
15322              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15323   %}
15324   ins_pipe(pipe_class_default);
15325 %}
15326 
15327 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15328 %{
15329   match(Set dst (MulReductionVD src1 src2));
15330   ins_cost(INSN_COST);
15331   effect(TEMP tmp, TEMP dst);
15332   format %{ "fmuld $dst, $src1, $src2\n\t"
15333             "ins   $tmp, D, $src2, 0, 1\n\t"
15334             "fmuld $dst, $dst, $tmp\t add reduction2d"
15335   %}
15336   ins_encode %{
15337     __ fmuld(as_FloatRegister($dst$$reg),
15338              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15339     __ ins(as_FloatRegister($tmp$$reg), __ D,
15340            as_FloatRegister($src2$$reg), 0, 1);
15341     __ fmuld(as_FloatRegister($dst$$reg),
15342              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15343   %}
15344   ins_pipe(pipe_class_default);
15345 %}
15346 
15347 // ====================VECTOR ARITHMETIC=======================================
15348 
15349 // --------------------------------- ADD --------------------------------------
15350 
15351 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15352 %{
15353   predicate(n->as_Vector()->length() == 4 ||
15354             n->as_Vector()->length() == 8);
15355   match(Set dst (AddVB src1 src2));
15356   ins_cost(INSN_COST);
15357   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15358   ins_encode %{
15359     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15360             as_FloatRegister($src1$$reg),
15361             as_FloatRegister($src2$$reg));
15362   %}
15363   ins_pipe(vdop64);
15364 %}
15365 
15366 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15367 %{
15368   predicate(n->as_Vector()->length() == 16);
15369   match(Set dst (AddVB src1 src2));
15370   ins_cost(INSN_COST);
15371   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15372   ins_encode %{
15373     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15374             as_FloatRegister($src1$$reg),
15375             as_FloatRegister($src2$$reg));
15376   %}
15377   ins_pipe(vdop128);
15378 %}
15379 
15380 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15381 %{
15382   predicate(n->as_Vector()->length() == 2 ||
15383             n->as_Vector()->length() == 4);
15384   match(Set dst (AddVS src1 src2));
15385   ins_cost(INSN_COST);
15386   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15387   ins_encode %{
15388     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15389             as_FloatRegister($src1$$reg),
15390             as_FloatRegister($src2$$reg));
15391   %}
15392   ins_pipe(vdop64);
15393 %}
15394 
15395 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15396 %{
15397   predicate(n->as_Vector()->length() == 8);
15398   match(Set dst (AddVS src1 src2));
15399   ins_cost(INSN_COST);
15400   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15401   ins_encode %{
15402     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15403             as_FloatRegister($src1$$reg),
15404             as_FloatRegister($src2$$reg));
15405   %}
15406   ins_pipe(vdop128);
15407 %}
15408 
15409 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15410 %{
15411   predicate(n->as_Vector()->length() == 2);
15412   match(Set dst (AddVI src1 src2));
15413   ins_cost(INSN_COST);
15414   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15415   ins_encode %{
15416     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15417             as_FloatRegister($src1$$reg),
15418             as_FloatRegister($src2$$reg));
15419   %}
15420   ins_pipe(vdop64);
15421 %}
15422 
15423 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15424 %{
15425   predicate(n->as_Vector()->length() == 4);
15426   match(Set dst (AddVI src1 src2));
15427   ins_cost(INSN_COST);
15428   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15429   ins_encode %{
15430     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15431             as_FloatRegister($src1$$reg),
15432             as_FloatRegister($src2$$reg));
15433   %}
15434   ins_pipe(vdop128);
15435 %}
15436 
15437 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15438 %{
15439   predicate(n->as_Vector()->length() == 2);
15440   match(Set dst (AddVL src1 src2));
15441   ins_cost(INSN_COST);
15442   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15443   ins_encode %{
15444     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15445             as_FloatRegister($src1$$reg),
15446             as_FloatRegister($src2$$reg));
15447   %}
15448   ins_pipe(vdop128);
15449 %}
15450 
15451 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15452 %{
15453   predicate(n->as_Vector()->length() == 2);
15454   match(Set dst (AddVF src1 src2));
15455   ins_cost(INSN_COST);
15456   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15457   ins_encode %{
15458     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15459             as_FloatRegister($src1$$reg),
15460             as_FloatRegister($src2$$reg));
15461   %}
15462   ins_pipe(vdop_fp64);
15463 %}
15464 
15465 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15466 %{
15467   predicate(n->as_Vector()->length() == 4);
15468   match(Set dst (AddVF src1 src2));
15469   ins_cost(INSN_COST);
15470   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15471   ins_encode %{
15472     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15473             as_FloatRegister($src1$$reg),
15474             as_FloatRegister($src2$$reg));
15475   %}
15476   ins_pipe(vdop_fp128);
15477 %}
15478 
15479 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15480 %{
15481   match(Set dst (AddVD src1 src2));
15482   ins_cost(INSN_COST);
15483   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15484   ins_encode %{
15485     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15486             as_FloatRegister($src1$$reg),
15487             as_FloatRegister($src2$$reg));
15488   %}
15489   ins_pipe(vdop_fp128);
15490 %}
15491 
15492 // --------------------------------- SUB --------------------------------------
15493 
15494 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15495 %{
15496   predicate(n->as_Vector()->length() == 4 ||
15497             n->as_Vector()->length() == 8);
15498   match(Set dst (SubVB src1 src2));
15499   ins_cost(INSN_COST);
15500   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15501   ins_encode %{
15502     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15503             as_FloatRegister($src1$$reg),
15504             as_FloatRegister($src2$$reg));
15505   %}
15506   ins_pipe(vdop64);
15507 %}
15508 
15509 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15510 %{
15511   predicate(n->as_Vector()->length() == 16);
15512   match(Set dst (SubVB src1 src2));
15513   ins_cost(INSN_COST);
15514   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15515   ins_encode %{
15516     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15517             as_FloatRegister($src1$$reg),
15518             as_FloatRegister($src2$$reg));
15519   %}
15520   ins_pipe(vdop128);
15521 %}
15522 
15523 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15524 %{
15525   predicate(n->as_Vector()->length() == 2 ||
15526             n->as_Vector()->length() == 4);
15527   match(Set dst (SubVS src1 src2));
15528   ins_cost(INSN_COST);
15529   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15530   ins_encode %{
15531     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15532             as_FloatRegister($src1$$reg),
15533             as_FloatRegister($src2$$reg));
15534   %}
15535   ins_pipe(vdop64);
15536 %}
15537 
15538 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15539 %{
15540   predicate(n->as_Vector()->length() == 8);
15541   match(Set dst (SubVS src1 src2));
15542   ins_cost(INSN_COST);
15543   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15544   ins_encode %{
15545     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15546             as_FloatRegister($src1$$reg),
15547             as_FloatRegister($src2$$reg));
15548   %}
15549   ins_pipe(vdop128);
15550 %}
15551 
15552 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15553 %{
15554   predicate(n->as_Vector()->length() == 2);
15555   match(Set dst (SubVI src1 src2));
15556   ins_cost(INSN_COST);
15557   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15558   ins_encode %{
15559     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15560             as_FloatRegister($src1$$reg),
15561             as_FloatRegister($src2$$reg));
15562   %}
15563   ins_pipe(vdop64);
15564 %}
15565 
15566 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15567 %{
15568   predicate(n->as_Vector()->length() == 4);
15569   match(Set dst (SubVI src1 src2));
15570   ins_cost(INSN_COST);
15571   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15572   ins_encode %{
15573     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15574             as_FloatRegister($src1$$reg),
15575             as_FloatRegister($src2$$reg));
15576   %}
15577   ins_pipe(vdop128);
15578 %}
15579 
15580 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15581 %{
15582   predicate(n->as_Vector()->length() == 2);
15583   match(Set dst (SubVL src1 src2));
15584   ins_cost(INSN_COST);
15585   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15586   ins_encode %{
15587     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15588             as_FloatRegister($src1$$reg),
15589             as_FloatRegister($src2$$reg));
15590   %}
15591   ins_pipe(vdop128);
15592 %}
15593 
15594 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15595 %{
15596   predicate(n->as_Vector()->length() == 2);
15597   match(Set dst (SubVF src1 src2));
15598   ins_cost(INSN_COST);
15599   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15600   ins_encode %{
15601     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15602             as_FloatRegister($src1$$reg),
15603             as_FloatRegister($src2$$reg));
15604   %}
15605   ins_pipe(vdop_fp64);
15606 %}
15607 
15608 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15609 %{
15610   predicate(n->as_Vector()->length() == 4);
15611   match(Set dst (SubVF src1 src2));
15612   ins_cost(INSN_COST);
15613   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15614   ins_encode %{
15615     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15616             as_FloatRegister($src1$$reg),
15617             as_FloatRegister($src2$$reg));
15618   %}
15619   ins_pipe(vdop_fp128);
15620 %}
15621 
15622 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15623 %{
15624   predicate(n->as_Vector()->length() == 2);
15625   match(Set dst (SubVD src1 src2));
15626   ins_cost(INSN_COST);
15627   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15628   ins_encode %{
15629     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15630             as_FloatRegister($src1$$reg),
15631             as_FloatRegister($src2$$reg));
15632   %}
15633   ins_pipe(vdop_fp128);
15634 %}
15635 
15636 // --------------------------------- MUL --------------------------------------
15637 
15638 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15639 %{
15640   predicate(n->as_Vector()->length() == 2 ||
15641             n->as_Vector()->length() == 4);
15642   match(Set dst (MulVS src1 src2));
15643   ins_cost(INSN_COST);
15644   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15645   ins_encode %{
15646     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15647             as_FloatRegister($src1$$reg),
15648             as_FloatRegister($src2$$reg));
15649   %}
15650   ins_pipe(vmul64);
15651 %}
15652 
15653 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15654 %{
15655   predicate(n->as_Vector()->length() == 8);
15656   match(Set dst (MulVS src1 src2));
15657   ins_cost(INSN_COST);
15658   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15659   ins_encode %{
15660     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15661             as_FloatRegister($src1$$reg),
15662             as_FloatRegister($src2$$reg));
15663   %}
15664   ins_pipe(vmul128);
15665 %}
15666 
15667 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15668 %{
15669   predicate(n->as_Vector()->length() == 2);
15670   match(Set dst (MulVI src1 src2));
15671   ins_cost(INSN_COST);
15672   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15673   ins_encode %{
15674     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15675             as_FloatRegister($src1$$reg),
15676             as_FloatRegister($src2$$reg));
15677   %}
15678   ins_pipe(vmul64);
15679 %}
15680 
15681 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15682 %{
15683   predicate(n->as_Vector()->length() == 4);
15684   match(Set dst (MulVI src1 src2));
15685   ins_cost(INSN_COST);
15686   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15687   ins_encode %{
15688     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15689             as_FloatRegister($src1$$reg),
15690             as_FloatRegister($src2$$reg));
15691   %}
15692   ins_pipe(vmul128);
15693 %}
15694 
15695 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15696 %{
15697   predicate(n->as_Vector()->length() == 2);
15698   match(Set dst (MulVF src1 src2));
15699   ins_cost(INSN_COST);
15700   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15701   ins_encode %{
15702     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15703             as_FloatRegister($src1$$reg),
15704             as_FloatRegister($src2$$reg));
15705   %}
15706   ins_pipe(vmuldiv_fp64);
15707 %}
15708 
15709 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15710 %{
15711   predicate(n->as_Vector()->length() == 4);
15712   match(Set dst (MulVF src1 src2));
15713   ins_cost(INSN_COST);
15714   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15715   ins_encode %{
15716     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15717             as_FloatRegister($src1$$reg),
15718             as_FloatRegister($src2$$reg));
15719   %}
15720   ins_pipe(vmuldiv_fp128);
15721 %}
15722 
15723 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15724 %{
15725   predicate(n->as_Vector()->length() == 2);
15726   match(Set dst (MulVD src1 src2));
15727   ins_cost(INSN_COST);
15728   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15729   ins_encode %{
15730     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15731             as_FloatRegister($src1$$reg),
15732             as_FloatRegister($src2$$reg));
15733   %}
15734   ins_pipe(vmuldiv_fp128);
15735 %}
15736 
15737 // --------------------------------- MLA --------------------------------------
15738 
15739 instruct vmla4S(vecD dst, vecD src1, vecD src2)
15740 %{
15741   predicate(n->as_Vector()->length() == 2 ||
15742             n->as_Vector()->length() == 4);
15743   match(Set dst (AddVS dst (MulVS src1 src2)));
15744   ins_cost(INSN_COST);
15745   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
15746   ins_encode %{
15747     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
15748             as_FloatRegister($src1$$reg),
15749             as_FloatRegister($src2$$reg));
15750   %}
15751   ins_pipe(vmla64);
15752 %}
15753 
15754 instruct vmla8S(vecX dst, vecX src1, vecX src2)
15755 %{
15756   predicate(n->as_Vector()->length() == 8);
15757   match(Set dst (AddVS dst (MulVS src1 src2)));
15758   ins_cost(INSN_COST);
15759   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
15760   ins_encode %{
15761     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
15762             as_FloatRegister($src1$$reg),
15763             as_FloatRegister($src2$$reg));
15764   %}
15765   ins_pipe(vmla128);
15766 %}
15767 
15768 instruct vmla2I(vecD dst, vecD src1, vecD src2)
15769 %{
15770   predicate(n->as_Vector()->length() == 2);
15771   match(Set dst (AddVI dst (MulVI src1 src2)));
15772   ins_cost(INSN_COST);
15773   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
15774   ins_encode %{
15775     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
15776             as_FloatRegister($src1$$reg),
15777             as_FloatRegister($src2$$reg));
15778   %}
15779   ins_pipe(vmla64);
15780 %}
15781 
15782 instruct vmla4I(vecX dst, vecX src1, vecX src2)
15783 %{
15784   predicate(n->as_Vector()->length() == 4);
15785   match(Set dst (AddVI dst (MulVI src1 src2)));
15786   ins_cost(INSN_COST);
15787   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
15788   ins_encode %{
15789     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
15790             as_FloatRegister($src1$$reg),
15791             as_FloatRegister($src2$$reg));
15792   %}
15793   ins_pipe(vmla128);
15794 %}
15795 
15796 // dst + src1 * src2
15797 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
15798   predicate(UseFMA && n->as_Vector()->length() == 2);
15799   match(Set dst (FmaVF  dst (Binary src1 src2)));
15800   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
15801   ins_cost(INSN_COST);
15802   ins_encode %{
15803     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
15804             as_FloatRegister($src1$$reg),
15805             as_FloatRegister($src2$$reg));
15806   %}
15807   ins_pipe(vmuldiv_fp64);
15808 %}
15809 
15810 // dst + src1 * src2
15811 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
15812   predicate(UseFMA && n->as_Vector()->length() == 4);
15813   match(Set dst (FmaVF  dst (Binary src1 src2)));
15814   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
15815   ins_cost(INSN_COST);
15816   ins_encode %{
15817     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
15818             as_FloatRegister($src1$$reg),
15819             as_FloatRegister($src2$$reg));
15820   %}
15821   ins_pipe(vmuldiv_fp128);
15822 %}
15823 
15824 // dst + src1 * src2
15825 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
15826   predicate(UseFMA && n->as_Vector()->length() == 2);
15827   match(Set dst (FmaVD  dst (Binary src1 src2)));
15828   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
15829   ins_cost(INSN_COST);
15830   ins_encode %{
15831     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
15832             as_FloatRegister($src1$$reg),
15833             as_FloatRegister($src2$$reg));
15834   %}
15835   ins_pipe(vmuldiv_fp128);
15836 %}
15837 
15838 // --------------------------------- MLS --------------------------------------
15839 
15840 instruct vmls4S(vecD dst, vecD src1, vecD src2)
15841 %{
15842   predicate(n->as_Vector()->length() == 2 ||
15843             n->as_Vector()->length() == 4);
15844   match(Set dst (SubVS dst (MulVS src1 src2)));
15845   ins_cost(INSN_COST);
15846   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
15847   ins_encode %{
15848     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
15849             as_FloatRegister($src1$$reg),
15850             as_FloatRegister($src2$$reg));
15851   %}
15852   ins_pipe(vmla64);
15853 %}
15854 
15855 instruct vmls8S(vecX dst, vecX src1, vecX src2)
15856 %{
15857   predicate(n->as_Vector()->length() == 8);
15858   match(Set dst (SubVS dst (MulVS src1 src2)));
15859   ins_cost(INSN_COST);
15860   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
15861   ins_encode %{
15862     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
15863             as_FloatRegister($src1$$reg),
15864             as_FloatRegister($src2$$reg));
15865   %}
15866   ins_pipe(vmla128);
15867 %}
15868 
15869 instruct vmls2I(vecD dst, vecD src1, vecD src2)
15870 %{
15871   predicate(n->as_Vector()->length() == 2);
15872   match(Set dst (SubVI dst (MulVI src1 src2)));
15873   ins_cost(INSN_COST);
15874   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
15875   ins_encode %{
15876     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
15877             as_FloatRegister($src1$$reg),
15878             as_FloatRegister($src2$$reg));
15879   %}
15880   ins_pipe(vmla64);
15881 %}
15882 
15883 instruct vmls4I(vecX dst, vecX src1, vecX src2)
15884 %{
15885   predicate(n->as_Vector()->length() == 4);
15886   match(Set dst (SubVI dst (MulVI src1 src2)));
15887   ins_cost(INSN_COST);
15888   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
15889   ins_encode %{
15890     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
15891             as_FloatRegister($src1$$reg),
15892             as_FloatRegister($src2$$reg));
15893   %}
15894   ins_pipe(vmla128);
15895 %}
15896 
15897 // dst - src1 * src2
15898 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
15899   predicate(UseFMA && n->as_Vector()->length() == 2);
15900   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
15901   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
15902   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
15903   ins_cost(INSN_COST);
15904   ins_encode %{
15905     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
15906             as_FloatRegister($src1$$reg),
15907             as_FloatRegister($src2$$reg));
15908   %}
15909   ins_pipe(vmuldiv_fp64);
15910 %}
15911 
15912 // dst - src1 * src2
15913 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
15914   predicate(UseFMA && n->as_Vector()->length() == 4);
15915   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
15916   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
15917   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
15918   ins_cost(INSN_COST);
15919   ins_encode %{
15920     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
15921             as_FloatRegister($src1$$reg),
15922             as_FloatRegister($src2$$reg));
15923   %}
15924   ins_pipe(vmuldiv_fp128);
15925 %}
15926 
15927 // dst - src1 * src2
15928 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
15929   predicate(UseFMA && n->as_Vector()->length() == 2);
15930   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
15931   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
15932   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
15933   ins_cost(INSN_COST);
15934   ins_encode %{
15935     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
15936             as_FloatRegister($src1$$reg),
15937             as_FloatRegister($src2$$reg));
15938   %}
15939   ins_pipe(vmuldiv_fp128);
15940 %}
15941 
15942 // --------------------------------- DIV --------------------------------------
15943 
15944 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
15945 %{
15946   predicate(n->as_Vector()->length() == 2);
15947   match(Set dst (DivVF src1 src2));
15948   ins_cost(INSN_COST);
15949   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
15950   ins_encode %{
15951     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
15952             as_FloatRegister($src1$$reg),
15953             as_FloatRegister($src2$$reg));
15954   %}
15955   ins_pipe(vmuldiv_fp64);
15956 %}
15957 
15958 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
15959 %{
15960   predicate(n->as_Vector()->length() == 4);
15961   match(Set dst (DivVF src1 src2));
15962   ins_cost(INSN_COST);
15963   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
15964   ins_encode %{
15965     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
15966             as_FloatRegister($src1$$reg),
15967             as_FloatRegister($src2$$reg));
15968   %}
15969   ins_pipe(vmuldiv_fp128);
15970 %}
15971 
15972 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
15973 %{
15974   predicate(n->as_Vector()->length() == 2);
15975   match(Set dst (DivVD src1 src2));
15976   ins_cost(INSN_COST);
15977   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
15978   ins_encode %{
15979     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
15980             as_FloatRegister($src1$$reg),
15981             as_FloatRegister($src2$$reg));
15982   %}
15983   ins_pipe(vmuldiv_fp128);
15984 %}
15985 
15986 // --------------------------------- SQRT -------------------------------------
15987 
15988 instruct vsqrt2D(vecX dst, vecX src)
15989 %{
15990   predicate(n->as_Vector()->length() == 2);
15991   match(Set dst (SqrtVD src));
15992   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
15993   ins_encode %{
15994     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
15995              as_FloatRegister($src$$reg));
15996   %}
15997   ins_pipe(vsqrt_fp128);
15998 %}
15999 
16000 // --------------------------------- ABS --------------------------------------
16001 
16002 instruct vabs2F(vecD dst, vecD src)
16003 %{
16004   predicate(n->as_Vector()->length() == 2);
16005   match(Set dst (AbsVF src));
16006   ins_cost(INSN_COST * 3);
16007   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16008   ins_encode %{
16009     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16010             as_FloatRegister($src$$reg));
16011   %}
16012   ins_pipe(vunop_fp64);
16013 %}
16014 
16015 instruct vabs4F(vecX dst, vecX src)
16016 %{
16017   predicate(n->as_Vector()->length() == 4);
16018   match(Set dst (AbsVF src));
16019   ins_cost(INSN_COST * 3);
16020   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16021   ins_encode %{
16022     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16023             as_FloatRegister($src$$reg));
16024   %}
16025   ins_pipe(vunop_fp128);
16026 %}
16027 
16028 instruct vabs2D(vecX dst, vecX src)
16029 %{
16030   predicate(n->as_Vector()->length() == 2);
16031   match(Set dst (AbsVD src));
16032   ins_cost(INSN_COST * 3);
16033   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16034   ins_encode %{
16035     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16036             as_FloatRegister($src$$reg));
16037   %}
16038   ins_pipe(vunop_fp128);
16039 %}
16040 
16041 // --------------------------------- NEG --------------------------------------
16042 
16043 instruct vneg2F(vecD dst, vecD src)
16044 %{
16045   predicate(n->as_Vector()->length() == 2);
16046   match(Set dst (NegVF src));
16047   ins_cost(INSN_COST * 3);
16048   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16049   ins_encode %{
16050     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16051             as_FloatRegister($src$$reg));
16052   %}
16053   ins_pipe(vunop_fp64);
16054 %}
16055 
16056 instruct vneg4F(vecX dst, vecX src)
16057 %{
16058   predicate(n->as_Vector()->length() == 4);
16059   match(Set dst (NegVF src));
16060   ins_cost(INSN_COST * 3);
16061   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16062   ins_encode %{
16063     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16064             as_FloatRegister($src$$reg));
16065   %}
16066   ins_pipe(vunop_fp128);
16067 %}
16068 
16069 instruct vneg2D(vecX dst, vecX src)
16070 %{
16071   predicate(n->as_Vector()->length() == 2);
16072   match(Set dst (NegVD src));
16073   ins_cost(INSN_COST * 3);
16074   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16075   ins_encode %{
16076     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16077             as_FloatRegister($src$$reg));
16078   %}
16079   ins_pipe(vunop_fp128);
16080 %}
16081 
16082 // --------------------------------- AND --------------------------------------
16083 
16084 instruct vand8B(vecD dst, vecD src1, vecD src2)
16085 %{
16086   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16087             n->as_Vector()->length_in_bytes() == 8);
16088   match(Set dst (AndV src1 src2));
16089   ins_cost(INSN_COST);
16090   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16091   ins_encode %{
16092     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16093             as_FloatRegister($src1$$reg),
16094             as_FloatRegister($src2$$reg));
16095   %}
16096   ins_pipe(vlogical64);
16097 %}
16098 
16099 instruct vand16B(vecX dst, vecX src1, vecX src2)
16100 %{
16101   predicate(n->as_Vector()->length_in_bytes() == 16);
16102   match(Set dst (AndV src1 src2));
16103   ins_cost(INSN_COST);
16104   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16105   ins_encode %{
16106     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16107             as_FloatRegister($src1$$reg),
16108             as_FloatRegister($src2$$reg));
16109   %}
16110   ins_pipe(vlogical128);
16111 %}
16112 
16113 // --------------------------------- OR ---------------------------------------
16114 
16115 instruct vor8B(vecD dst, vecD src1, vecD src2)
16116 %{
16117   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16118             n->as_Vector()->length_in_bytes() == 8);
16119   match(Set dst (OrV src1 src2));
16120   ins_cost(INSN_COST);
16121   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16122   ins_encode %{
16123     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16124             as_FloatRegister($src1$$reg),
16125             as_FloatRegister($src2$$reg));
16126   %}
16127   ins_pipe(vlogical64);
16128 %}
16129 
16130 instruct vor16B(vecX dst, vecX src1, vecX src2)
16131 %{
16132   predicate(n->as_Vector()->length_in_bytes() == 16);
16133   match(Set dst (OrV src1 src2));
16134   ins_cost(INSN_COST);
16135   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16136   ins_encode %{
16137     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16138             as_FloatRegister($src1$$reg),
16139             as_FloatRegister($src2$$reg));
16140   %}
16141   ins_pipe(vlogical128);
16142 %}
16143 
16144 // --------------------------------- XOR --------------------------------------
16145 
16146 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16147 %{
16148   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16149             n->as_Vector()->length_in_bytes() == 8);
16150   match(Set dst (XorV src1 src2));
16151   ins_cost(INSN_COST);
16152   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16153   ins_encode %{
16154     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16155             as_FloatRegister($src1$$reg),
16156             as_FloatRegister($src2$$reg));
16157   %}
16158   ins_pipe(vlogical64);
16159 %}
16160 
16161 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16162 %{
16163   predicate(n->as_Vector()->length_in_bytes() == 16);
16164   match(Set dst (XorV src1 src2));
16165   ins_cost(INSN_COST);
16166   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16167   ins_encode %{
16168     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16169             as_FloatRegister($src1$$reg),
16170             as_FloatRegister($src2$$reg));
16171   %}
16172   ins_pipe(vlogical128);
16173 %}
16174 
16175 // ------------------------------ Shift ---------------------------------------
16176 
16177 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16178   match(Set dst (LShiftCntV cnt));
16179   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16180   ins_encode %{
16181     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16182   %}
16183   ins_pipe(vdup_reg_reg128);
16184 %}
16185 
16186 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16187 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16188   match(Set dst (RShiftCntV cnt));
16189   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16190   ins_encode %{
16191     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16192     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16193   %}
16194   ins_pipe(vdup_reg_reg128);
16195 %}
16196 
16197 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16198   predicate(n->as_Vector()->length() == 4 ||
16199             n->as_Vector()->length() == 8);
16200   match(Set dst (LShiftVB src shift));
16201   match(Set dst (RShiftVB src shift));
16202   ins_cost(INSN_COST);
16203   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16204   ins_encode %{
16205     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16206             as_FloatRegister($src$$reg),
16207             as_FloatRegister($shift$$reg));
16208   %}
16209   ins_pipe(vshift64);
16210 %}
16211 
16212 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16213   predicate(n->as_Vector()->length() == 16);
16214   match(Set dst (LShiftVB src shift));
16215   match(Set dst (RShiftVB src shift));
16216   ins_cost(INSN_COST);
16217   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16218   ins_encode %{
16219     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16220             as_FloatRegister($src$$reg),
16221             as_FloatRegister($shift$$reg));
16222   %}
16223   ins_pipe(vshift128);
16224 %}
16225 
16226 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16227   predicate(n->as_Vector()->length() == 4 ||
16228             n->as_Vector()->length() == 8);
16229   match(Set dst (URShiftVB src shift));
16230   ins_cost(INSN_COST);
16231   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16232   ins_encode %{
16233     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16234             as_FloatRegister($src$$reg),
16235             as_FloatRegister($shift$$reg));
16236   %}
16237   ins_pipe(vshift64);
16238 %}
16239 
16240 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16241   predicate(n->as_Vector()->length() == 16);
16242   match(Set dst (URShiftVB src shift));
16243   ins_cost(INSN_COST);
16244   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16245   ins_encode %{
16246     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16247             as_FloatRegister($src$$reg),
16248             as_FloatRegister($shift$$reg));
16249   %}
16250   ins_pipe(vshift128);
16251 %}
16252 
16253 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16254   predicate(n->as_Vector()->length() == 4 ||
16255             n->as_Vector()->length() == 8);
16256   match(Set dst (LShiftVB src shift));
16257   ins_cost(INSN_COST);
16258   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16259   ins_encode %{
16260     int sh = (int)$shift$$constant;
16261     if (sh >= 8) {
16262       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16263              as_FloatRegister($src$$reg),
16264              as_FloatRegister($src$$reg));
16265     } else {
16266       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16267              as_FloatRegister($src$$reg), sh);
16268     }
16269   %}
16270   ins_pipe(vshift64_imm);
16271 %}
16272 
16273 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16274   predicate(n->as_Vector()->length() == 16);
16275   match(Set dst (LShiftVB src shift));
16276   ins_cost(INSN_COST);
16277   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16278   ins_encode %{
16279     int sh = (int)$shift$$constant;
16280     if (sh >= 8) {
16281       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16282              as_FloatRegister($src$$reg),
16283              as_FloatRegister($src$$reg));
16284     } else {
16285       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16286              as_FloatRegister($src$$reg), sh);
16287     }
16288   %}
16289   ins_pipe(vshift128_imm);
16290 %}
16291 
16292 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16293   predicate(n->as_Vector()->length() == 4 ||
16294             n->as_Vector()->length() == 8);
16295   match(Set dst (RShiftVB src shift));
16296   ins_cost(INSN_COST);
16297   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16298   ins_encode %{
16299     int sh = (int)$shift$$constant;
16300     if (sh >= 8) sh = 7;
16301     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16302            as_FloatRegister($src$$reg), sh);
16303   %}
16304   ins_pipe(vshift64_imm);
16305 %}
16306 
16307 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16308   predicate(n->as_Vector()->length() == 16);
16309   match(Set dst (RShiftVB src shift));
16310   ins_cost(INSN_COST);
16311   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16312   ins_encode %{
16313     int sh = (int)$shift$$constant;
16314     if (sh >= 8) sh = 7;
16315     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16316            as_FloatRegister($src$$reg), sh);
16317   %}
16318   ins_pipe(vshift128_imm);
16319 %}
16320 
16321 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16322   predicate(n->as_Vector()->length() == 4 ||
16323             n->as_Vector()->length() == 8);
16324   match(Set dst (URShiftVB src shift));
16325   ins_cost(INSN_COST);
16326   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16327   ins_encode %{
16328     int sh = (int)$shift$$constant;
16329     if (sh >= 8) {
16330       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16331              as_FloatRegister($src$$reg),
16332              as_FloatRegister($src$$reg));
16333     } else {
16334       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16335              as_FloatRegister($src$$reg), sh);
16336     }
16337   %}
16338   ins_pipe(vshift64_imm);
16339 %}
16340 
16341 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16342   predicate(n->as_Vector()->length() == 16);
16343   match(Set dst (URShiftVB src shift));
16344   ins_cost(INSN_COST);
16345   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16346   ins_encode %{
16347     int sh = (int)$shift$$constant;
16348     if (sh >= 8) {
16349       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16350              as_FloatRegister($src$$reg),
16351              as_FloatRegister($src$$reg));
16352     } else {
16353       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16354              as_FloatRegister($src$$reg), sh);
16355     }
16356   %}
16357   ins_pipe(vshift128_imm);
16358 %}
16359 
16360 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16361   predicate(n->as_Vector()->length() == 2 ||
16362             n->as_Vector()->length() == 4);
16363   match(Set dst (LShiftVS src shift));
16364   match(Set dst (RShiftVS src shift));
16365   ins_cost(INSN_COST);
16366   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16367   ins_encode %{
16368     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16369             as_FloatRegister($src$$reg),
16370             as_FloatRegister($shift$$reg));
16371   %}
16372   ins_pipe(vshift64);
16373 %}
16374 
16375 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16376   predicate(n->as_Vector()->length() == 8);
16377   match(Set dst (LShiftVS src shift));
16378   match(Set dst (RShiftVS src shift));
16379   ins_cost(INSN_COST);
16380   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16381   ins_encode %{
16382     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16383             as_FloatRegister($src$$reg),
16384             as_FloatRegister($shift$$reg));
16385   %}
16386   ins_pipe(vshift128);
16387 %}
16388 
16389 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16390   predicate(n->as_Vector()->length() == 2 ||
16391             n->as_Vector()->length() == 4);
16392   match(Set dst (URShiftVS src shift));
16393   ins_cost(INSN_COST);
16394   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16395   ins_encode %{
16396     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16397             as_FloatRegister($src$$reg),
16398             as_FloatRegister($shift$$reg));
16399   %}
16400   ins_pipe(vshift64);
16401 %}
16402 
16403 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16404   predicate(n->as_Vector()->length() == 8);
16405   match(Set dst (URShiftVS src shift));
16406   ins_cost(INSN_COST);
16407   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16408   ins_encode %{
16409     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16410             as_FloatRegister($src$$reg),
16411             as_FloatRegister($shift$$reg));
16412   %}
16413   ins_pipe(vshift128);
16414 %}
16415 
16416 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16417   predicate(n->as_Vector()->length() == 2 ||
16418             n->as_Vector()->length() == 4);
16419   match(Set dst (LShiftVS src shift));
16420   ins_cost(INSN_COST);
16421   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16422   ins_encode %{
16423     int sh = (int)$shift$$constant;
16424     if (sh >= 16) {
16425       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16426              as_FloatRegister($src$$reg),
16427              as_FloatRegister($src$$reg));
16428     } else {
16429       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16430              as_FloatRegister($src$$reg), sh);
16431     }
16432   %}
16433   ins_pipe(vshift64_imm);
16434 %}
16435 
16436 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16437   predicate(n->as_Vector()->length() == 8);
16438   match(Set dst (LShiftVS src shift));
16439   ins_cost(INSN_COST);
16440   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16441   ins_encode %{
16442     int sh = (int)$shift$$constant;
16443     if (sh >= 16) {
16444       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16445              as_FloatRegister($src$$reg),
16446              as_FloatRegister($src$$reg));
16447     } else {
16448       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16449              as_FloatRegister($src$$reg), sh);
16450     }
16451   %}
16452   ins_pipe(vshift128_imm);
16453 %}
16454 
16455 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16456   predicate(n->as_Vector()->length() == 2 ||
16457             n->as_Vector()->length() == 4);
16458   match(Set dst (RShiftVS src shift));
16459   ins_cost(INSN_COST);
16460   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16461   ins_encode %{
16462     int sh = (int)$shift$$constant;
16463     if (sh >= 16) sh = 15;
16464     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16465            as_FloatRegister($src$$reg), sh);
16466   %}
16467   ins_pipe(vshift64_imm);
16468 %}
16469 
16470 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16471   predicate(n->as_Vector()->length() == 8);
16472   match(Set dst (RShiftVS src shift));
16473   ins_cost(INSN_COST);
16474   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16475   ins_encode %{
16476     int sh = (int)$shift$$constant;
16477     if (sh >= 16) sh = 15;
16478     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16479            as_FloatRegister($src$$reg), sh);
16480   %}
16481   ins_pipe(vshift128_imm);
16482 %}
16483 
16484 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16485   predicate(n->as_Vector()->length() == 2 ||
16486             n->as_Vector()->length() == 4);
16487   match(Set dst (URShiftVS src shift));
16488   ins_cost(INSN_COST);
16489   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16490   ins_encode %{
16491     int sh = (int)$shift$$constant;
16492     if (sh >= 16) {
16493       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16494              as_FloatRegister($src$$reg),
16495              as_FloatRegister($src$$reg));
16496     } else {
16497       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16498              as_FloatRegister($src$$reg), sh);
16499     }
16500   %}
16501   ins_pipe(vshift64_imm);
16502 %}
16503 
16504 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16505   predicate(n->as_Vector()->length() == 8);
16506   match(Set dst (URShiftVS src shift));
16507   ins_cost(INSN_COST);
16508   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16509   ins_encode %{
16510     int sh = (int)$shift$$constant;
16511     if (sh >= 16) {
16512       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16513              as_FloatRegister($src$$reg),
16514              as_FloatRegister($src$$reg));
16515     } else {
16516       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16517              as_FloatRegister($src$$reg), sh);
16518     }
16519   %}
16520   ins_pipe(vshift128_imm);
16521 %}
16522 
16523 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16524   predicate(n->as_Vector()->length() == 2);
16525   match(Set dst (LShiftVI src shift));
16526   match(Set dst (RShiftVI src shift));
16527   ins_cost(INSN_COST);
16528   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16529   ins_encode %{
16530     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16531             as_FloatRegister($src$$reg),
16532             as_FloatRegister($shift$$reg));
16533   %}
16534   ins_pipe(vshift64);
16535 %}
16536 
16537 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16538   predicate(n->as_Vector()->length() == 4);
16539   match(Set dst (LShiftVI src shift));
16540   match(Set dst (RShiftVI src shift));
16541   ins_cost(INSN_COST);
16542   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16543   ins_encode %{
16544     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16545             as_FloatRegister($src$$reg),
16546             as_FloatRegister($shift$$reg));
16547   %}
16548   ins_pipe(vshift128);
16549 %}
16550 
16551 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16552   predicate(n->as_Vector()->length() == 2);
16553   match(Set dst (URShiftVI src shift));
16554   ins_cost(INSN_COST);
16555   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16556   ins_encode %{
16557     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16558             as_FloatRegister($src$$reg),
16559             as_FloatRegister($shift$$reg));
16560   %}
16561   ins_pipe(vshift64);
16562 %}
16563 
16564 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16565   predicate(n->as_Vector()->length() == 4);
16566   match(Set dst (URShiftVI src shift));
16567   ins_cost(INSN_COST);
16568   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16569   ins_encode %{
16570     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16571             as_FloatRegister($src$$reg),
16572             as_FloatRegister($shift$$reg));
16573   %}
16574   ins_pipe(vshift128);
16575 %}
16576 
16577 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16578   predicate(n->as_Vector()->length() == 2);
16579   match(Set dst (LShiftVI src shift));
16580   ins_cost(INSN_COST);
16581   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16582   ins_encode %{
16583     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16584            as_FloatRegister($src$$reg),
16585            (int)$shift$$constant);
16586   %}
16587   ins_pipe(vshift64_imm);
16588 %}
16589 
16590 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16591   predicate(n->as_Vector()->length() == 4);
16592   match(Set dst (LShiftVI src shift));
16593   ins_cost(INSN_COST);
16594   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16595   ins_encode %{
16596     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16597            as_FloatRegister($src$$reg),
16598            (int)$shift$$constant);
16599   %}
16600   ins_pipe(vshift128_imm);
16601 %}
16602 
16603 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16604   predicate(n->as_Vector()->length() == 2);
16605   match(Set dst (RShiftVI src shift));
16606   ins_cost(INSN_COST);
16607   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16608   ins_encode %{
16609     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16610             as_FloatRegister($src$$reg),
16611             (int)$shift$$constant);
16612   %}
16613   ins_pipe(vshift64_imm);
16614 %}
16615 
16616 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16617   predicate(n->as_Vector()->length() == 4);
16618   match(Set dst (RShiftVI src shift));
16619   ins_cost(INSN_COST);
16620   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16621   ins_encode %{
16622     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16623             as_FloatRegister($src$$reg),
16624             (int)$shift$$constant);
16625   %}
16626   ins_pipe(vshift128_imm);
16627 %}
16628 
16629 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16630   predicate(n->as_Vector()->length() == 2);
16631   match(Set dst (URShiftVI src shift));
16632   ins_cost(INSN_COST);
16633   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16634   ins_encode %{
16635     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16636             as_FloatRegister($src$$reg),
16637             (int)$shift$$constant);
16638   %}
16639   ins_pipe(vshift64_imm);
16640 %}
16641 
16642 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16643   predicate(n->as_Vector()->length() == 4);
16644   match(Set dst (URShiftVI src shift));
16645   ins_cost(INSN_COST);
16646   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16647   ins_encode %{
16648     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16649             as_FloatRegister($src$$reg),
16650             (int)$shift$$constant);
16651   %}
16652   ins_pipe(vshift128_imm);
16653 %}
16654 
16655 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16656   predicate(n->as_Vector()->length() == 2);
16657   match(Set dst (LShiftVL src shift));
16658   match(Set dst (RShiftVL src shift));
16659   ins_cost(INSN_COST);
16660   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16661   ins_encode %{
16662     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16663             as_FloatRegister($src$$reg),
16664             as_FloatRegister($shift$$reg));
16665   %}
16666   ins_pipe(vshift128);
16667 %}
16668 
16669 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16670   predicate(n->as_Vector()->length() == 2);
16671   match(Set dst (URShiftVL src shift));
16672   ins_cost(INSN_COST);
16673   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16674   ins_encode %{
16675     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16676             as_FloatRegister($src$$reg),
16677             as_FloatRegister($shift$$reg));
16678   %}
16679   ins_pipe(vshift128);
16680 %}
16681 
16682 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16683   predicate(n->as_Vector()->length() == 2);
16684   match(Set dst (LShiftVL src shift));
16685   ins_cost(INSN_COST);
16686   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16687   ins_encode %{
16688     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16689            as_FloatRegister($src$$reg),
16690            (int)$shift$$constant);
16691   %}
16692   ins_pipe(vshift128_imm);
16693 %}
16694 
16695 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16696   predicate(n->as_Vector()->length() == 2);
16697   match(Set dst (RShiftVL src shift));
16698   ins_cost(INSN_COST);
16699   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16700   ins_encode %{
16701     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16702             as_FloatRegister($src$$reg),
16703             (int)$shift$$constant);
16704   %}
16705   ins_pipe(vshift128_imm);
16706 %}
16707 
16708 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16709   predicate(n->as_Vector()->length() == 2);
16710   match(Set dst (URShiftVL src shift));
16711   ins_cost(INSN_COST);
16712   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16713   ins_encode %{
16714     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16715             as_FloatRegister($src$$reg),
16716             (int)$shift$$constant);
16717   %}
16718   ins_pipe(vshift128_imm);
16719 %}
16720 
16721 //----------PEEPHOLE RULES-----------------------------------------------------
16722 // These must follow all instruction definitions as they use the names
16723 // defined in the instructions definitions.
16724 //
16725 // peepmatch ( root_instr_name [preceding_instruction]* );
16726 //
16727 // peepconstraint %{
16728 // (instruction_number.operand_name relational_op instruction_number.operand_name
16729 //  [, ...] );
16730 // // instruction numbers are zero-based using left to right order in peepmatch
16731 //
16732 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16733 // // provide an instruction_number.operand_name for each operand that appears
16734 // // in the replacement instruction's match rule
16735 //
16736 // ---------VM FLAGS---------------------------------------------------------
16737 //
16738 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16739 //
16740 // Each peephole rule is given an identifying number starting with zero and
16741 // increasing by one in the order seen by the parser.  An individual peephole
16742 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16743 // on the command-line.
16744 //
16745 // ---------CURRENT LIMITATIONS----------------------------------------------
16746 //
16747 // Only match adjacent instructions in same basic block
16748 // Only equality constraints
16749 // Only constraints between operands, not (0.dest_reg == RAX_enc)
16750 // Only one replacement instruction
16751 //
16752 // ---------EXAMPLE----------------------------------------------------------
16753 //
16754 // // pertinent parts of existing instructions in architecture description
16755 // instruct movI(iRegINoSp dst, iRegI src)
16756 // %{
16757 //   match(Set dst (CopyI src));
16758 // %}
16759 //
16760 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16761 // %{
16762 //   match(Set dst (AddI dst src));
16763 //   effect(KILL cr);
16764 // %}
16765 //
16766 // // Change (inc mov) to lea
16767 // peephole %{
16768 //   // increment preceeded by register-register move
16769 //   peepmatch ( incI_iReg movI );
16770 //   // require that the destination register of the increment
16771 //   // match the destination register of the move
16772 //   peepconstraint ( 0.dst == 1.dst );
16773 //   // construct a replacement instruction that sets
16774 //   // the destination to ( move's source register + one )
16775 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16776 // %}
16777 //
16778 
16779 // Implementation no longer uses movX instructions since
16780 // machine-independent system no longer uses CopyX nodes.
16781 //
16782 // peephole
16783 // %{
16784 //   peepmatch (incI_iReg movI);
16785 //   peepconstraint (0.dst == 1.dst);
16786 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16787 // %}
16788 
16789 // peephole
16790 // %{
16791 //   peepmatch (decI_iReg movI);
16792 //   peepconstraint (0.dst == 1.dst);
16793 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16794 // %}
16795 
16796 // peephole
16797 // %{
16798 //   peepmatch (addI_iReg_imm movI);
16799 //   peepconstraint (0.dst == 1.dst);
16800 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16801 // %}
16802 
16803 // peephole
16804 // %{
16805 //   peepmatch (incL_iReg movL);
16806 //   peepconstraint (0.dst == 1.dst);
16807 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16808 // %}
16809 
16810 // peephole
16811 // %{
16812 //   peepmatch (decL_iReg movL);
16813 //   peepconstraint (0.dst == 1.dst);
16814 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16815 // %}
16816 
16817 // peephole
16818 // %{
16819 //   peepmatch (addL_iReg_imm movL);
16820 //   peepconstraint (0.dst == 1.dst);
16821 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16822 // %}
16823 
16824 // peephole
16825 // %{
16826 //   peepmatch (addP_iReg_imm movP);
16827 //   peepconstraint (0.dst == 1.dst);
16828 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16829 // %}
16830 
16831 // // Change load of spilled value to only a spill
16832 // instruct storeI(memory mem, iRegI src)
16833 // %{
16834 //   match(Set mem (StoreI mem src));
16835 // %}
16836 //
16837 // instruct loadI(iRegINoSp dst, memory mem)
16838 // %{
16839 //   match(Set dst (LoadI mem));
16840 // %}
16841 //
16842 
16843 //----------SMARTSPILL RULES---------------------------------------------------
16844 // These must follow all instruction definitions as they use the names
16845 // defined in the instructions definitions.
16846 
16847 // Local Variables:
16848 // mode: c++
16849 // End: