1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 
1004 class CallStubImpl {
1005 
1006   //--------------------------------------------------------------
1007   //---<  Used for optimization in Compile::shorten_branches  >---
1008   //--------------------------------------------------------------
1009 
1010  public:
1011   // Size of call trampoline stub.
1012   static uint size_call_trampoline() {
1013     return 0; // no call trampolines on this platform
1014   }
1015 
1016   // number of relocations needed by a call trampoline stub
1017   static uint reloc_call_trampoline() {
1018     return 0; // no call trampolines on this platform
1019   }
1020 };
1021 
1022 class HandlerImpl {
1023 
1024  public:
1025 
1026   static int emit_exception_handler(CodeBuffer &cbuf);
1027   static int emit_deopt_handler(CodeBuffer& cbuf);
1028 
1029   static uint size_exception_handler() {
1030     return MacroAssembler::far_branch_size();
1031   }
1032 
1033   static uint size_deopt_handler() {
1034     // count one adr and one far branch instruction
1035     return 4 * NativeInstruction::instruction_size;
1036   }
1037 };
1038 
1039  bool is_CAS(int opcode, bool maybe_volatile);
1040 
1041   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1042 
1043   bool unnecessary_acquire(const Node *barrier);
1044   bool needs_acquiring_load(const Node *load);
1045 
1046   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1047 
1048   bool unnecessary_release(const Node *barrier);
1049   bool unnecessary_volatile(const Node *barrier);
1050   bool needs_releasing_store(const Node *store);
1051 
1052   // predicate controlling translation of CompareAndSwapX
1053   bool needs_acquiring_load_exclusive(const Node *load);
1054 
1055   // predicate controlling translation of StoreCM
1056   bool unnecessary_storestore(const Node *storecm);
1057 
1058   // predicate controlling addressing modes
1059   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1060 %}
1061 
1062 source %{
1063 
1064   // Optimizaton of volatile gets and puts
1065   // -------------------------------------
1066   //
1067   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1068   // use to implement volatile reads and writes. For a volatile read
1069   // we simply need
1070   //
1071   //   ldar<x>
1072   //
1073   // and for a volatile write we need
1074   //
1075   //   stlr<x>
1076   //
1077   // Alternatively, we can implement them by pairing a normal
1078   // load/store with a memory barrier. For a volatile read we need
1079   //
1080   //   ldr<x>
1081   //   dmb ishld
1082   //
1083   // for a volatile write
1084   //
1085   //   dmb ish
1086   //   str<x>
1087   //   dmb ish
1088   //
1089   // We can also use ldaxr and stlxr to implement compare and swap CAS
1090   // sequences. These are normally translated to an instruction
1091   // sequence like the following
1092   //
1093   //   dmb      ish
1094   // retry:
1095   //   ldxr<x>   rval raddr
1096   //   cmp       rval rold
1097   //   b.ne done
1098   //   stlxr<x>  rval, rnew, rold
1099   //   cbnz      rval retry
1100   // done:
1101   //   cset      r0, eq
1102   //   dmb ishld
1103   //
1104   // Note that the exclusive store is already using an stlxr
1105   // instruction. That is required to ensure visibility to other
1106   // threads of the exclusive write (assuming it succeeds) before that
1107   // of any subsequent writes.
1108   //
1109   // The following instruction sequence is an improvement on the above
1110   //
1111   // retry:
1112   //   ldaxr<x>  rval raddr
1113   //   cmp       rval rold
1114   //   b.ne done
1115   //   stlxr<x>  rval, rnew, rold
1116   //   cbnz      rval retry
1117   // done:
1118   //   cset      r0, eq
1119   //
1120   // We don't need the leading dmb ish since the stlxr guarantees
1121   // visibility of prior writes in the case that the swap is
1122   // successful. Crucially we don't have to worry about the case where
1123   // the swap is not successful since no valid program should be
1124   // relying on visibility of prior changes by the attempting thread
1125   // in the case where the CAS fails.
1126   //
1127   // Similarly, we don't need the trailing dmb ishld if we substitute
1128   // an ldaxr instruction since that will provide all the guarantees we
1129   // require regarding observation of changes made by other threads
1130   // before any change to the CAS address observed by the load.
1131   //
1132   // In order to generate the desired instruction sequence we need to
1133   // be able to identify specific 'signature' ideal graph node
1134   // sequences which i) occur as a translation of a volatile reads or
1135   // writes or CAS operations and ii) do not occur through any other
1136   // translation or graph transformation. We can then provide
1137   // alternative aldc matching rules which translate these node
1138   // sequences to the desired machine code sequences. Selection of the
1139   // alternative rules can be implemented by predicates which identify
1140   // the relevant node sequences.
1141   //
1142   // The ideal graph generator translates a volatile read to the node
1143   // sequence
1144   //
1145   //   LoadX[mo_acquire]
1146   //   MemBarAcquire
1147   //
1148   // As a special case when using the compressed oops optimization we
1149   // may also see this variant
1150   //
1151   //   LoadN[mo_acquire]
1152   //   DecodeN
1153   //   MemBarAcquire
1154   //
1155   // A volatile write is translated to the node sequence
1156   //
1157   //   MemBarRelease
1158   //   StoreX[mo_release] {CardMark}-optional
1159   //   MemBarVolatile
1160   //
1161   // n.b. the above node patterns are generated with a strict
1162   // 'signature' configuration of input and output dependencies (see
1163   // the predicates below for exact details). The card mark may be as
1164   // simple as a few extra nodes or, in a few GC configurations, may
1165   // include more complex control flow between the leading and
1166   // trailing memory barriers. However, whatever the card mark
1167   // configuration these signatures are unique to translated volatile
1168   // reads/stores -- they will not appear as a result of any other
1169   // bytecode translation or inlining nor as a consequence of
1170   // optimizing transforms.
1171   //
1172   // We also want to catch inlined unsafe volatile gets and puts and
1173   // be able to implement them using either ldar<x>/stlr<x> or some
1174   // combination of ldr<x>/stlr<x> and dmb instructions.
1175   //
1176   // Inlined unsafe volatiles puts manifest as a minor variant of the
1177   // normal volatile put node sequence containing an extra cpuorder
1178   // membar
1179   //
1180   //   MemBarRelease
1181   //   MemBarCPUOrder
1182   //   StoreX[mo_release] {CardMark}-optional
1183   //   MemBarCPUOrder
1184   //   MemBarVolatile
1185   //
1186   // n.b. as an aside, a cpuorder membar is not itself subject to
1187   // matching and translation by adlc rules.  However, the rule
1188   // predicates need to detect its presence in order to correctly
1189   // select the desired adlc rules.
1190   //
1191   // Inlined unsafe volatile gets manifest as a slightly different
1192   // node sequence to a normal volatile get because of the
1193   // introduction of some CPUOrder memory barriers to bracket the
1194   // Load. However, but the same basic skeleton of a LoadX feeding a
1195   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1196   // present
1197   //
1198   //   MemBarCPUOrder
1199   //        ||       \\
1200   //   MemBarCPUOrder LoadX[mo_acquire]
1201   //        ||            |
1202   //        ||       {DecodeN} optional
1203   //        ||       /
1204   //     MemBarAcquire
1205   //
1206   // In this case the acquire membar does not directly depend on the
1207   // load. However, we can be sure that the load is generated from an
1208   // inlined unsafe volatile get if we see it dependent on this unique
1209   // sequence of membar nodes. Similarly, given an acquire membar we
1210   // can know that it was added because of an inlined unsafe volatile
1211   // get if it is fed and feeds a cpuorder membar and if its feed
1212   // membar also feeds an acquiring load.
1213   //
1214   // Finally an inlined (Unsafe) CAS operation is translated to the
1215   // following ideal graph
1216   //
1217   //   MemBarRelease
1218   //   MemBarCPUOrder
1219   //   CompareAndSwapX {CardMark}-optional
1220   //   MemBarCPUOrder
1221   //   MemBarAcquire
1222   //
1223   // So, where we can identify these volatile read and write
1224   // signatures we can choose to plant either of the above two code
1225   // sequences. For a volatile read we can simply plant a normal
1226   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1227   // also choose to inhibit translation of the MemBarAcquire and
1228   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1229   //
1230   // When we recognise a volatile store signature we can choose to
1231   // plant at a dmb ish as a translation for the MemBarRelease, a
1232   // normal str<x> and then a dmb ish for the MemBarVolatile.
1233   // Alternatively, we can inhibit translation of the MemBarRelease
1234   // and MemBarVolatile and instead plant a simple stlr<x>
1235   // instruction.
1236   //
1237   // when we recognise a CAS signature we can choose to plant a dmb
1238   // ish as a translation for the MemBarRelease, the conventional
1239   // macro-instruction sequence for the CompareAndSwap node (which
1240   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1241   // Alternatively, we can elide generation of the dmb instructions
1242   // and plant the alternative CompareAndSwap macro-instruction
1243   // sequence (which uses ldaxr<x>).
1244   //
1245   // Of course, the above only applies when we see these signature
1246   // configurations. We still want to plant dmb instructions in any
1247   // other cases where we may see a MemBarAcquire, MemBarRelease or
1248   // MemBarVolatile. For example, at the end of a constructor which
1249   // writes final/volatile fields we will see a MemBarRelease
1250   // instruction and this needs a 'dmb ish' lest we risk the
1251   // constructed object being visible without making the
1252   // final/volatile field writes visible.
1253   //
1254   // n.b. the translation rules below which rely on detection of the
1255   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1256   // If we see anything other than the signature configurations we
1257   // always just translate the loads and stores to ldr<x> and str<x>
1258   // and translate acquire, release and volatile membars to the
1259   // relevant dmb instructions.
1260   //
1261 
1262   // is_CAS(int opcode, bool maybe_volatile)
1263   //
1264   // return true if opcode is one of the possible CompareAndSwapX
1265   // values otherwise false.
1266 
1267   bool is_CAS(int opcode, bool maybe_volatile)
1268   {
1269     switch(opcode) {
1270       // We handle these
1271     case Op_CompareAndSwapI:
1272     case Op_CompareAndSwapL:
1273     case Op_CompareAndSwapP:
1274     case Op_CompareAndSwapN:
1275     case Op_CompareAndSwapB:
1276     case Op_CompareAndSwapS:
1277     case Op_GetAndSetI:
1278     case Op_GetAndSetL:
1279     case Op_GetAndSetP:
1280     case Op_GetAndSetN:
1281     case Op_GetAndAddI:
1282     case Op_GetAndAddL:
1283       return true;
1284     case Op_CompareAndExchangeI:
1285     case Op_CompareAndExchangeN:
1286     case Op_CompareAndExchangeB:
1287     case Op_CompareAndExchangeS:
1288     case Op_CompareAndExchangeL:
1289     case Op_CompareAndExchangeP:
1290     case Op_WeakCompareAndSwapB:
1291     case Op_WeakCompareAndSwapS:
1292     case Op_WeakCompareAndSwapI:
1293     case Op_WeakCompareAndSwapL:
1294     case Op_WeakCompareAndSwapP:
1295     case Op_WeakCompareAndSwapN:
1296       return maybe_volatile;
1297     default:
1298       return false;
1299     }
1300   }
1301 
1302   // helper to determine the maximum number of Phi nodes we may need to
1303   // traverse when searching from a card mark membar for the merge mem
1304   // feeding a trailing membar or vice versa
1305 
1306 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1307 
1308 bool unnecessary_acquire(const Node *barrier)
1309 {
1310   assert(barrier->is_MemBar(), "expecting a membar");
1311 
1312   if (UseBarriersForVolatile) {
1313     // we need to plant a dmb
1314     return false;
1315   }
1316 
1317   MemBarNode* mb = barrier->as_MemBar();
1318 
1319   if (mb->trailing_load()) {
1320     return true;
1321   }
1322 
1323   if (mb->trailing_load_store()) {
1324     Node* load_store = mb->in(MemBarNode::Precedent);
1325     assert(load_store->is_LoadStore(), "unexpected graph shape");
1326     return is_CAS(load_store->Opcode(), true);
1327   }
1328 
1329   return false;
1330 }
1331 
1332 bool needs_acquiring_load(const Node *n)
1333 {
1334   assert(n->is_Load(), "expecting a load");
1335   if (UseBarriersForVolatile) {
1336     // we use a normal load and a dmb
1337     return false;
1338   }
1339 
1340   LoadNode *ld = n->as_Load();
1341 
1342   return ld->is_acquire();
1343 }
1344 
1345 bool unnecessary_release(const Node *n)
1346 {
1347   assert((n->is_MemBar() &&
1348           n->Opcode() == Op_MemBarRelease),
1349          "expecting a release membar");
1350 
1351   if (UseBarriersForVolatile) {
1352     // we need to plant a dmb
1353     return false;
1354   }
1355 
1356   MemBarNode *barrier = n->as_MemBar();
1357   if (!barrier->leading()) {
1358     return false;
1359   } else {
1360     Node* trailing = barrier->trailing_membar();
1361     MemBarNode* trailing_mb = trailing->as_MemBar();
1362     assert(trailing_mb->trailing(), "Not a trailing membar?");
1363     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1364 
1365     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1366     if (mem->is_Store()) {
1367       assert(mem->as_Store()->is_release(), "");
1368       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1369       return true;
1370     } else {
1371       assert(mem->is_LoadStore(), "");
1372       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1373       return is_CAS(mem->Opcode(), true);
1374     }
1375   }
1376   return false;
1377 }
1378 
1379 bool unnecessary_volatile(const Node *n)
1380 {
1381   // assert n->is_MemBar();
1382   if (UseBarriersForVolatile) {
1383     // we need to plant a dmb
1384     return false;
1385   }
1386 
1387   MemBarNode *mbvol = n->as_MemBar();
1388 
1389   bool release = mbvol->trailing_store();
1390   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1391 #ifdef ASSERT
1392   if (release) {
1393     Node* leading = mbvol->leading_membar();
1394     assert(leading->Opcode() == Op_MemBarRelease, "");
1395     assert(leading->as_MemBar()->leading_store(), "");
1396     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1397   }
1398 #endif
1399 
1400   return release;
1401 }
1402 
1403 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1404 
1405 bool needs_releasing_store(const Node *n)
1406 {
1407   // assert n->is_Store();
1408   if (UseBarriersForVolatile) {
1409     // we use a normal store and dmb combination
1410     return false;
1411   }
1412 
1413   StoreNode *st = n->as_Store();
1414 
1415   return st->trailing_membar() != NULL;
1416 }
1417 
1418 // predicate controlling translation of CAS
1419 //
1420 // returns true if CAS needs to use an acquiring load otherwise false
1421 
1422 bool needs_acquiring_load_exclusive(const Node *n)
1423 {
1424   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1425   if (UseBarriersForVolatile) {
1426     return false;
1427   }
1428 
1429   LoadStoreNode* ldst = n->as_LoadStore();
1430   if (is_CAS(n->Opcode(), false)) {
1431     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1432   } else {
1433     return ldst->trailing_membar() != NULL;
1434   }
1435 
1436   // so we can just return true here
1437   return true;
1438 }
1439 
1440 // predicate controlling translation of StoreCM
1441 //
1442 // returns true if a StoreStore must precede the card write otherwise
1443 // false
1444 
1445 bool unnecessary_storestore(const Node *storecm)
1446 {
1447   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1448 
1449   // we need to generate a dmb ishst between an object put and the
1450   // associated card mark when we are using CMS without conditional
1451   // card marking
1452 
1453   if (UseConcMarkSweepGC && !UseCondCardMark) {
1454     return false;
1455   }
1456 
1457   // a storestore is unnecesary in all other cases
1458 
1459   return true;
1460 }
1461 
1462 
1463 #define __ _masm.
1464 
1465 // advance declarations for helper functions to convert register
1466 // indices to register objects
1467 
1468 // the ad file has to provide implementations of certain methods
1469 // expected by the generic code
1470 //
1471 // REQUIRED FUNCTIONALITY
1472 
1473 //=============================================================================
1474 
1475 // !!!!! Special hack to get all types of calls to specify the byte offset
1476 //       from the start of the call to the point where the return address
1477 //       will point.
1478 
1479 int MachCallStaticJavaNode::ret_addr_offset()
1480 {
1481   // call should be a simple bl
1482   int off = 4;
1483   return off;
1484 }
1485 
1486 int MachCallDynamicJavaNode::ret_addr_offset()
1487 {
1488   return 16; // movz, movk, movk, bl
1489 }
1490 
1491 int MachCallRuntimeNode::ret_addr_offset() {
1492   // for generated stubs the call will be
1493   //   far_call(addr)
1494   // for real runtime callouts it will be six instructions
1495   // see aarch64_enc_java_to_runtime
1496   //   adr(rscratch2, retaddr)
1497   //   lea(rscratch1, RuntimeAddress(addr)
1498   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1499   //   blrt rscratch1
1500   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1501   if (cb) {
1502     return MacroAssembler::far_branch_size();
1503   } else {
1504     return 6 * NativeInstruction::instruction_size;
1505   }
1506 }
1507 
1508 // Indicate if the safepoint node needs the polling page as an input
1509 
1510 // the shared code plants the oop data at the start of the generated
1511 // code for the safepoint node and that needs ot be at the load
1512 // instruction itself. so we cannot plant a mov of the safepoint poll
1513 // address followed by a load. setting this to true means the mov is
1514 // scheduled as a prior instruction. that's better for scheduling
1515 // anyway.
1516 
1517 bool SafePointNode::needs_polling_address_input()
1518 {
1519   return true;
1520 }
1521 
1522 //=============================================================================
1523 
1524 #ifndef PRODUCT
1525 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1526   st->print("BREAKPOINT");
1527 }
1528 #endif
1529 
1530 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1531   MacroAssembler _masm(&cbuf);
1532   __ brk(0);
1533 }
1534 
1535 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1536   return MachNode::size(ra_);
1537 }
1538 
1539 //=============================================================================
1540 
1541 #ifndef PRODUCT
1542   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1543     st->print("nop \t# %d bytes pad for loops and calls", _count);
1544   }
1545 #endif
1546 
1547   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1548     MacroAssembler _masm(&cbuf);
1549     for (int i = 0; i < _count; i++) {
1550       __ nop();
1551     }
1552   }
1553 
1554   uint MachNopNode::size(PhaseRegAlloc*) const {
1555     return _count * NativeInstruction::instruction_size;
1556   }
1557 
1558 //=============================================================================
1559 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1560 
1561 int Compile::ConstantTable::calculate_table_base_offset() const {
1562   return 0;  // absolute addressing, no offset
1563 }
1564 
1565 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1566 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1567   ShouldNotReachHere();
1568 }
1569 
1570 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1571   // Empty encoding
1572 }
1573 
1574 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1575   return 0;
1576 }
1577 
1578 #ifndef PRODUCT
1579 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1580   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1581 }
1582 #endif
1583 
1584 #ifndef PRODUCT
1585 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1586   Compile* C = ra_->C;
1587 
1588   int framesize = C->frame_slots() << LogBytesPerInt;
1589 
1590   if (C->need_stack_bang(framesize))
1591     st->print("# stack bang size=%d\n\t", framesize);
1592 
1593   if (framesize < ((1 << 9) + 2 * wordSize)) {
1594     st->print("sub  sp, sp, #%d\n\t", framesize);
1595     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1596     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1597   } else {
1598     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1599     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1600     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1601     st->print("sub  sp, sp, rscratch1");
1602   }
1603 }
1604 #endif
1605 
1606 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1607   Compile* C = ra_->C;
1608   MacroAssembler _masm(&cbuf);
1609 
1610   // n.b. frame size includes space for return pc and rfp
1611   const long framesize = C->frame_size_in_bytes();
1612   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1613 
1614   // insert a nop at the start of the prolog so we can patch in a
1615   // branch if we need to invalidate the method later
1616   __ nop();
1617 
1618   int bangsize = C->bang_size_in_bytes();
1619   if (C->need_stack_bang(bangsize) && UseStackBanging)
1620     __ generate_stack_overflow_check(bangsize);
1621 
1622   __ build_frame(framesize);
1623 
1624   if (NotifySimulator) {
1625     __ notify(Assembler::method_entry);
1626   }
1627 
1628   if (VerifyStackAtCalls) {
1629     Unimplemented();
1630   }
1631 
1632   C->set_frame_complete(cbuf.insts_size());
1633 
1634   if (C->has_mach_constant_base_node()) {
1635     // NOTE: We set the table base offset here because users might be
1636     // emitted before MachConstantBaseNode.
1637     Compile::ConstantTable& constant_table = C->constant_table();
1638     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1639   }
1640 }
1641 
1642 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1643 {
1644   return MachNode::size(ra_); // too many variables; just compute it
1645                               // the hard way
1646 }
1647 
1648 int MachPrologNode::reloc() const
1649 {
1650   return 0;
1651 }
1652 
1653 //=============================================================================
1654 
1655 #ifndef PRODUCT
1656 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1657   Compile* C = ra_->C;
1658   int framesize = C->frame_slots() << LogBytesPerInt;
1659 
1660   st->print("# pop frame %d\n\t",framesize);
1661 
1662   if (framesize == 0) {
1663     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1664   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1665     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1666     st->print("add  sp, sp, #%d\n\t", framesize);
1667   } else {
1668     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1669     st->print("add  sp, sp, rscratch1\n\t");
1670     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1671   }
1672 
1673   if (do_polling() && C->is_method_compilation()) {
1674     st->print("# touch polling page\n\t");
1675     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1676     st->print("ldr zr, [rscratch1]");
1677   }
1678 }
1679 #endif
1680 
1681 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1682   Compile* C = ra_->C;
1683   MacroAssembler _masm(&cbuf);
1684   int framesize = C->frame_slots() << LogBytesPerInt;
1685 
1686   __ remove_frame(framesize);
1687 
1688   if (NotifySimulator) {
1689     __ notify(Assembler::method_reentry);
1690   }
1691 
1692   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1693     __ reserved_stack_check();
1694   }
1695 
1696   if (do_polling() && C->is_method_compilation()) {
1697     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1698   }
1699 }
1700 
1701 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1702   // Variable size. Determine dynamically.
1703   return MachNode::size(ra_);
1704 }
1705 
1706 int MachEpilogNode::reloc() const {
1707   // Return number of relocatable values contained in this instruction.
1708   return 1; // 1 for polling page.
1709 }
1710 
1711 const Pipeline * MachEpilogNode::pipeline() const {
1712   return MachNode::pipeline_class();
1713 }
1714 
1715 // This method seems to be obsolete. It is declared in machnode.hpp
1716 // and defined in all *.ad files, but it is never called. Should we
1717 // get rid of it?
1718 int MachEpilogNode::safepoint_offset() const {
1719   assert(do_polling(), "no return for this epilog node");
1720   return 4;
1721 }
1722 
1723 //=============================================================================
1724 
1725 // Figure out which register class each belongs in: rc_int, rc_float or
1726 // rc_stack.
1727 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1728 
1729 static enum RC rc_class(OptoReg::Name reg) {
1730 
1731   if (reg == OptoReg::Bad) {
1732     return rc_bad;
1733   }
1734 
1735   // we have 30 int registers * 2 halves
1736   // (rscratch1 and rscratch2 are omitted)
1737 
1738   if (reg < 60) {
1739     return rc_int;
1740   }
1741 
1742   // we have 32 float register * 2 halves
1743   if (reg < 60 + 128) {
1744     return rc_float;
1745   }
1746 
1747   // Between float regs & stack is the flags regs.
1748   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1749 
1750   return rc_stack;
1751 }
1752 
1753 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1754   Compile* C = ra_->C;
1755 
1756   // Get registers to move.
1757   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1758   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1759   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1760   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1761 
1762   enum RC src_hi_rc = rc_class(src_hi);
1763   enum RC src_lo_rc = rc_class(src_lo);
1764   enum RC dst_hi_rc = rc_class(dst_hi);
1765   enum RC dst_lo_rc = rc_class(dst_lo);
1766 
1767   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1768 
1769   if (src_hi != OptoReg::Bad) {
1770     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1771            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1772            "expected aligned-adjacent pairs");
1773   }
1774 
1775   if (src_lo == dst_lo && src_hi == dst_hi) {
1776     return 0;            // Self copy, no move.
1777   }
1778 
1779   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1780               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1781   int src_offset = ra_->reg2offset(src_lo);
1782   int dst_offset = ra_->reg2offset(dst_lo);
1783 
1784   if (bottom_type()->isa_vect() != NULL) {
1785     uint ireg = ideal_reg();
1786     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1787     if (cbuf) {
1788       MacroAssembler _masm(cbuf);
1789       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1790       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1791         // stack->stack
1792         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1793         if (ireg == Op_VecD) {
1794           __ unspill(rscratch1, true, src_offset);
1795           __ spill(rscratch1, true, dst_offset);
1796         } else {
1797           __ spill_copy128(src_offset, dst_offset);
1798         }
1799       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1800         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1801                ireg == Op_VecD ? __ T8B : __ T16B,
1802                as_FloatRegister(Matcher::_regEncode[src_lo]));
1803       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1804         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1805                        ireg == Op_VecD ? __ D : __ Q,
1806                        ra_->reg2offset(dst_lo));
1807       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1808         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1809                        ireg == Op_VecD ? __ D : __ Q,
1810                        ra_->reg2offset(src_lo));
1811       } else {
1812         ShouldNotReachHere();
1813       }
1814     }
1815   } else if (cbuf) {
1816     MacroAssembler _masm(cbuf);
1817     switch (src_lo_rc) {
1818     case rc_int:
1819       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1820         if (is64) {
1821             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1822                    as_Register(Matcher::_regEncode[src_lo]));
1823         } else {
1824             MacroAssembler _masm(cbuf);
1825             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1826                     as_Register(Matcher::_regEncode[src_lo]));
1827         }
1828       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1829         if (is64) {
1830             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1831                      as_Register(Matcher::_regEncode[src_lo]));
1832         } else {
1833             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1834                      as_Register(Matcher::_regEncode[src_lo]));
1835         }
1836       } else {                    // gpr --> stack spill
1837         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1838         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1839       }
1840       break;
1841     case rc_float:
1842       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1843         if (is64) {
1844             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1845                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1846         } else {
1847             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1848                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1849         }
1850       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1851           if (cbuf) {
1852             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1853                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1854         } else {
1855             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1856                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1857         }
1858       } else {                    // fpr --> stack spill
1859         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1860         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1861                  is64 ? __ D : __ S, dst_offset);
1862       }
1863       break;
1864     case rc_stack:
1865       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1866         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1867       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1868         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1869                    is64 ? __ D : __ S, src_offset);
1870       } else {                    // stack --> stack copy
1871         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1872         __ unspill(rscratch1, is64, src_offset);
1873         __ spill(rscratch1, is64, dst_offset);
1874       }
1875       break;
1876     default:
1877       assert(false, "bad rc_class for spill");
1878       ShouldNotReachHere();
1879     }
1880   }
1881 
1882   if (st) {
1883     st->print("spill ");
1884     if (src_lo_rc == rc_stack) {
1885       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1886     } else {
1887       st->print("%s -> ", Matcher::regName[src_lo]);
1888     }
1889     if (dst_lo_rc == rc_stack) {
1890       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1891     } else {
1892       st->print("%s", Matcher::regName[dst_lo]);
1893     }
1894     if (bottom_type()->isa_vect() != NULL) {
1895       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1896     } else {
1897       st->print("\t# spill size = %d", is64 ? 64:32);
1898     }
1899   }
1900 
1901   return 0;
1902 
1903 }
1904 
1905 #ifndef PRODUCT
1906 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1907   if (!ra_)
1908     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1909   else
1910     implementation(NULL, ra_, false, st);
1911 }
1912 #endif
1913 
1914 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1915   implementation(&cbuf, ra_, false, NULL);
1916 }
1917 
1918 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1919   return MachNode::size(ra_);
1920 }
1921 
1922 //=============================================================================
1923 
1924 #ifndef PRODUCT
1925 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1926   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1927   int reg = ra_->get_reg_first(this);
1928   st->print("add %s, rsp, #%d]\t# box lock",
1929             Matcher::regName[reg], offset);
1930 }
1931 #endif
1932 
1933 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1934   MacroAssembler _masm(&cbuf);
1935 
1936   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1937   int reg    = ra_->get_encode(this);
1938 
1939   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1940     __ add(as_Register(reg), sp, offset);
1941   } else {
1942     ShouldNotReachHere();
1943   }
1944 }
1945 
1946 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1947   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1948   return 4;
1949 }
1950 
1951 //=============================================================================
1952 
1953 #ifndef PRODUCT
1954 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1955 {
1956   st->print_cr("# MachUEPNode");
1957   if (UseCompressedClassPointers) {
1958     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1959     if (Universe::narrow_klass_shift() != 0) {
1960       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1961     }
1962   } else {
1963    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1964   }
1965   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1966   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1967 }
1968 #endif
1969 
1970 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1971 {
1972   // This is the unverified entry point.
1973   MacroAssembler _masm(&cbuf);
1974 
1975   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1976   Label skip;
1977   // TODO
1978   // can we avoid this skip and still use a reloc?
1979   __ br(Assembler::EQ, skip);
1980   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1981   __ bind(skip);
1982 }
1983 
1984 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1985 {
1986   return MachNode::size(ra_);
1987 }
1988 
1989 // REQUIRED EMIT CODE
1990 
1991 //=============================================================================
1992 
1993 // Emit exception handler code.
1994 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
1995 {
1996   // mov rscratch1 #exception_blob_entry_point
1997   // br rscratch1
1998   // Note that the code buffer's insts_mark is always relative to insts.
1999   // That's why we must use the macroassembler to generate a handler.
2000   MacroAssembler _masm(&cbuf);
2001   address base = __ start_a_stub(size_exception_handler());
2002   if (base == NULL) {
2003     ciEnv::current()->record_failure("CodeCache is full");
2004     return 0;  // CodeBuffer::expand failed
2005   }
2006   int offset = __ offset();
2007   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2008   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2009   __ end_a_stub();
2010   return offset;
2011 }
2012 
2013 // Emit deopt handler code.
2014 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2015 {
2016   // Note that the code buffer's insts_mark is always relative to insts.
2017   // That's why we must use the macroassembler to generate a handler.
2018   MacroAssembler _masm(&cbuf);
2019   address base = __ start_a_stub(size_deopt_handler());
2020   if (base == NULL) {
2021     ciEnv::current()->record_failure("CodeCache is full");
2022     return 0;  // CodeBuffer::expand failed
2023   }
2024   int offset = __ offset();
2025 
2026   __ adr(lr, __ pc());
2027   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2028 
2029   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2030   __ end_a_stub();
2031   return offset;
2032 }
2033 
2034 // REQUIRED MATCHER CODE
2035 
2036 //=============================================================================
2037 
2038 const bool Matcher::match_rule_supported(int opcode) {
2039 
2040   switch (opcode) {
2041   default:
2042     break;
2043   }
2044 
2045   if (!has_match_rule(opcode)) {
2046     return false;
2047   }
2048 
2049   return true;  // Per default match rules are supported.
2050 }
2051 
2052 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
2053 
2054   // TODO
2055   // identify extra cases that we might want to provide match rules for
2056   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2057   bool ret_value = match_rule_supported(opcode);
2058   // Add rules here.
2059 
2060   return ret_value;  // Per default match rules are supported.
2061 }
2062 
2063 const bool Matcher::has_predicated_vectors(void) {
2064   return false;
2065 }
2066 
2067 const int Matcher::float_pressure(int default_pressure_threshold) {
2068   return default_pressure_threshold;
2069 }
2070 
2071 int Matcher::regnum_to_fpu_offset(int regnum)
2072 {
2073   Unimplemented();
2074   return 0;
2075 }
2076 
2077 // Is this branch offset short enough that a short branch can be used?
2078 //
2079 // NOTE: If the platform does not provide any short branch variants, then
2080 //       this method should return false for offset 0.
2081 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2082   // The passed offset is relative to address of the branch.
2083 
2084   return (-32768 <= offset && offset < 32768);
2085 }
2086 
2087 const bool Matcher::isSimpleConstant64(jlong value) {
2088   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2089   // Probably always true, even if a temp register is required.
2090   return true;
2091 }
2092 
2093 // true just means we have fast l2f conversion
2094 const bool Matcher::convL2FSupported(void) {
2095   return true;
2096 }
2097 
2098 // Vector width in bytes.
2099 const int Matcher::vector_width_in_bytes(BasicType bt) {
2100   int size = MIN2(16,(int)MaxVectorSize);
2101   // Minimum 2 values in vector
2102   if (size < 2*type2aelembytes(bt)) size = 0;
2103   // But never < 4
2104   if (size < 4) size = 0;
2105   return size;
2106 }
2107 
2108 // Limits on vector size (number of elements) loaded into vector.
2109 const int Matcher::max_vector_size(const BasicType bt) {
2110   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2111 }
2112 const int Matcher::min_vector_size(const BasicType bt) {
2113 //  For the moment limit the vector size to 8 bytes
2114     int size = 8 / type2aelembytes(bt);
2115     if (size < 2) size = 2;
2116     return size;
2117 }
2118 
2119 // Vector ideal reg.
2120 const uint Matcher::vector_ideal_reg(int len) {
2121   switch(len) {
2122     case  8: return Op_VecD;
2123     case 16: return Op_VecX;
2124   }
2125   ShouldNotReachHere();
2126   return 0;
2127 }
2128 
2129 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2130   switch(size) {
2131     case  8: return Op_VecD;
2132     case 16: return Op_VecX;
2133   }
2134   ShouldNotReachHere();
2135   return 0;
2136 }
2137 
2138 // AES support not yet implemented
2139 const bool Matcher::pass_original_key_for_aes() {
2140   return false;
2141 }
2142 
2143 // x86 supports misaligned vectors store/load.
2144 const bool Matcher::misaligned_vectors_ok() {
2145   return !AlignVector; // can be changed by flag
2146 }
2147 
2148 // false => size gets scaled to BytesPerLong, ok.
2149 const bool Matcher::init_array_count_is_in_bytes = false;
2150 
2151 // Use conditional move (CMOVL)
2152 const int Matcher::long_cmove_cost() {
2153   // long cmoves are no more expensive than int cmoves
2154   return 0;
2155 }
2156 
2157 const int Matcher::float_cmove_cost() {
2158   // float cmoves are no more expensive than int cmoves
2159   return 0;
2160 }
2161 
2162 // Does the CPU require late expand (see block.cpp for description of late expand)?
2163 const bool Matcher::require_postalloc_expand = false;
2164 
2165 // Do we need to mask the count passed to shift instructions or does
2166 // the cpu only look at the lower 5/6 bits anyway?
2167 const bool Matcher::need_masked_shift_count = false;
2168 
2169 // This affects two different things:
2170 //  - how Decode nodes are matched
2171 //  - how ImplicitNullCheck opportunities are recognized
2172 // If true, the matcher will try to remove all Decodes and match them
2173 // (as operands) into nodes. NullChecks are not prepared to deal with
2174 // Decodes by final_graph_reshaping().
2175 // If false, final_graph_reshaping() forces the decode behind the Cmp
2176 // for a NullCheck. The matcher matches the Decode node into a register.
2177 // Implicit_null_check optimization moves the Decode along with the
2178 // memory operation back up before the NullCheck.
2179 bool Matcher::narrow_oop_use_complex_address() {
2180   return Universe::narrow_oop_shift() == 0;
2181 }
2182 
2183 bool Matcher::narrow_klass_use_complex_address() {
2184 // TODO
2185 // decide whether we need to set this to true
2186   return false;
2187 }
2188 
2189 bool Matcher::const_oop_prefer_decode() {
2190   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2191   return Universe::narrow_oop_base() == NULL;
2192 }
2193 
2194 bool Matcher::const_klass_prefer_decode() {
2195   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2196   return Universe::narrow_klass_base() == NULL;
2197 }
2198 
2199 // Is it better to copy float constants, or load them directly from
2200 // memory?  Intel can load a float constant from a direct address,
2201 // requiring no extra registers.  Most RISCs will have to materialize
2202 // an address into a register first, so they would do better to copy
2203 // the constant from stack.
2204 const bool Matcher::rematerialize_float_constants = false;
2205 
2206 // If CPU can load and store mis-aligned doubles directly then no
2207 // fixup is needed.  Else we split the double into 2 integer pieces
2208 // and move it piece-by-piece.  Only happens when passing doubles into
2209 // C code as the Java calling convention forces doubles to be aligned.
2210 const bool Matcher::misaligned_doubles_ok = true;
2211 
2212 // No-op on amd64
2213 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2214   Unimplemented();
2215 }
2216 
2217 // Advertise here if the CPU requires explicit rounding operations to
2218 // implement the UseStrictFP mode.
2219 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2220 
2221 // Are floats converted to double when stored to stack during
2222 // deoptimization?
2223 bool Matcher::float_in_double() { return false; }
2224 
2225 // Do ints take an entire long register or just half?
2226 // The relevant question is how the int is callee-saved:
2227 // the whole long is written but de-opt'ing will have to extract
2228 // the relevant 32 bits.
2229 const bool Matcher::int_in_long = true;
2230 
2231 // Return whether or not this register is ever used as an argument.
2232 // This function is used on startup to build the trampoline stubs in
2233 // generateOptoStub.  Registers not mentioned will be killed by the VM
2234 // call in the trampoline, and arguments in those registers not be
2235 // available to the callee.
2236 bool Matcher::can_be_java_arg(int reg)
2237 {
2238   return
2239     reg ==  R0_num || reg == R0_H_num ||
2240     reg ==  R1_num || reg == R1_H_num ||
2241     reg ==  R2_num || reg == R2_H_num ||
2242     reg ==  R3_num || reg == R3_H_num ||
2243     reg ==  R4_num || reg == R4_H_num ||
2244     reg ==  R5_num || reg == R5_H_num ||
2245     reg ==  R6_num || reg == R6_H_num ||
2246     reg ==  R7_num || reg == R7_H_num ||
2247     reg ==  V0_num || reg == V0_H_num ||
2248     reg ==  V1_num || reg == V1_H_num ||
2249     reg ==  V2_num || reg == V2_H_num ||
2250     reg ==  V3_num || reg == V3_H_num ||
2251     reg ==  V4_num || reg == V4_H_num ||
2252     reg ==  V5_num || reg == V5_H_num ||
2253     reg ==  V6_num || reg == V6_H_num ||
2254     reg ==  V7_num || reg == V7_H_num;
2255 }
2256 
2257 bool Matcher::is_spillable_arg(int reg)
2258 {
2259   return can_be_java_arg(reg);
2260 }
2261 
2262 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2263   return false;
2264 }
2265 
2266 RegMask Matcher::divI_proj_mask() {
2267   ShouldNotReachHere();
2268   return RegMask();
2269 }
2270 
2271 // Register for MODI projection of divmodI.
2272 RegMask Matcher::modI_proj_mask() {
2273   ShouldNotReachHere();
2274   return RegMask();
2275 }
2276 
2277 // Register for DIVL projection of divmodL.
2278 RegMask Matcher::divL_proj_mask() {
2279   ShouldNotReachHere();
2280   return RegMask();
2281 }
2282 
2283 // Register for MODL projection of divmodL.
2284 RegMask Matcher::modL_proj_mask() {
2285   ShouldNotReachHere();
2286   return RegMask();
2287 }
2288 
2289 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2290   return FP_REG_mask();
2291 }
2292 
2293 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2294   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2295     Node* u = addp->fast_out(i);
2296     if (u->is_Mem()) {
2297       int opsize = u->as_Mem()->memory_size();
2298       assert(opsize > 0, "unexpected memory operand size");
2299       if (u->as_Mem()->memory_size() != (1<<shift)) {
2300         return false;
2301       }
2302     }
2303   }
2304   return true;
2305 }
2306 
2307 const bool Matcher::convi2l_type_required = false;
2308 
2309 // Should the Matcher clone shifts on addressing modes, expecting them
2310 // to be subsumed into complex addressing expressions or compute them
2311 // into registers?
2312 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2313   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2314     return true;
2315   }
2316 
2317   Node *off = m->in(AddPNode::Offset);
2318   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2319       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2320       // Are there other uses besides address expressions?
2321       !is_visited(off)) {
2322     address_visited.set(off->_idx); // Flag as address_visited
2323     mstack.push(off->in(2), Visit);
2324     Node *conv = off->in(1);
2325     if (conv->Opcode() == Op_ConvI2L &&
2326         // Are there other uses besides address expressions?
2327         !is_visited(conv)) {
2328       address_visited.set(conv->_idx); // Flag as address_visited
2329       mstack.push(conv->in(1), Pre_Visit);
2330     } else {
2331       mstack.push(conv, Pre_Visit);
2332     }
2333     address_visited.test_set(m->_idx); // Flag as address_visited
2334     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2335     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2336     return true;
2337   } else if (off->Opcode() == Op_ConvI2L &&
2338              // Are there other uses besides address expressions?
2339              !is_visited(off)) {
2340     address_visited.test_set(m->_idx); // Flag as address_visited
2341     address_visited.set(off->_idx); // Flag as address_visited
2342     mstack.push(off->in(1), Pre_Visit);
2343     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2344     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2345     return true;
2346   }
2347   return false;
2348 }
2349 
2350 void Compile::reshape_address(AddPNode* addp) {
2351 }
2352 
2353 // helper for encoding java_to_runtime calls on sim
2354 //
2355 // this is needed to compute the extra arguments required when
2356 // planting a call to the simulator blrt instruction. the TypeFunc
2357 // can be queried to identify the counts for integral, and floating
2358 // arguments and the return type
2359 
2360 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2361 {
2362   int gps = 0;
2363   int fps = 0;
2364   const TypeTuple *domain = tf->domain();
2365   int max = domain->cnt();
2366   for (int i = TypeFunc::Parms; i < max; i++) {
2367     const Type *t = domain->field_at(i);
2368     switch(t->basic_type()) {
2369     case T_FLOAT:
2370     case T_DOUBLE:
2371       fps++;
2372     default:
2373       gps++;
2374     }
2375   }
2376   gpcnt = gps;
2377   fpcnt = fps;
2378   BasicType rt = tf->return_type();
2379   switch (rt) {
2380   case T_VOID:
2381     rtype = MacroAssembler::ret_type_void;
2382     break;
2383   default:
2384     rtype = MacroAssembler::ret_type_integral;
2385     break;
2386   case T_FLOAT:
2387     rtype = MacroAssembler::ret_type_float;
2388     break;
2389   case T_DOUBLE:
2390     rtype = MacroAssembler::ret_type_double;
2391     break;
2392   }
2393 }
2394 
2395 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2396   MacroAssembler _masm(&cbuf);                                          \
2397   {                                                                     \
2398     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2399     guarantee(DISP == 0, "mode not permitted for volatile");            \
2400     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2401     __ INSN(REG, as_Register(BASE));                                    \
2402   }
2403 
2404 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2405 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2406 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2407                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2408 
2409   // Used for all non-volatile memory accesses.  The use of
2410   // $mem->opcode() to discover whether this pattern uses sign-extended
2411   // offsets is something of a kludge.
2412   static void loadStore(MacroAssembler masm, mem_insn insn,
2413                          Register reg, int opcode,
2414                          Register base, int index, int size, int disp)
2415   {
2416     Address::extend scale;
2417 
2418     // Hooboy, this is fugly.  We need a way to communicate to the
2419     // encoder that the index needs to be sign extended, so we have to
2420     // enumerate all the cases.
2421     switch (opcode) {
2422     case INDINDEXSCALEDI2L:
2423     case INDINDEXSCALEDI2LN:
2424     case INDINDEXI2L:
2425     case INDINDEXI2LN:
2426       scale = Address::sxtw(size);
2427       break;
2428     default:
2429       scale = Address::lsl(size);
2430     }
2431 
2432     if (index == -1) {
2433       (masm.*insn)(reg, Address(base, disp));
2434     } else {
2435       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2436       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2437     }
2438   }
2439 
2440   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2441                          FloatRegister reg, int opcode,
2442                          Register base, int index, int size, int disp)
2443   {
2444     Address::extend scale;
2445 
2446     switch (opcode) {
2447     case INDINDEXSCALEDI2L:
2448     case INDINDEXSCALEDI2LN:
2449       scale = Address::sxtw(size);
2450       break;
2451     default:
2452       scale = Address::lsl(size);
2453     }
2454 
2455      if (index == -1) {
2456       (masm.*insn)(reg, Address(base, disp));
2457     } else {
2458       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2459       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2460     }
2461   }
2462 
2463   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2464                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2465                          int opcode, Register base, int index, int size, int disp)
2466   {
2467     if (index == -1) {
2468       (masm.*insn)(reg, T, Address(base, disp));
2469     } else {
2470       assert(disp == 0, "unsupported address mode");
2471       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2472     }
2473   }
2474 
2475 %}
2476 
2477 
2478 
2479 //----------ENCODING BLOCK-----------------------------------------------------
2480 // This block specifies the encoding classes used by the compiler to
2481 // output byte streams.  Encoding classes are parameterized macros
2482 // used by Machine Instruction Nodes in order to generate the bit
2483 // encoding of the instruction.  Operands specify their base encoding
2484 // interface with the interface keyword.  There are currently
2485 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2486 // COND_INTER.  REG_INTER causes an operand to generate a function
2487 // which returns its register number when queried.  CONST_INTER causes
2488 // an operand to generate a function which returns the value of the
2489 // constant when queried.  MEMORY_INTER causes an operand to generate
2490 // four functions which return the Base Register, the Index Register,
2491 // the Scale Value, and the Offset Value of the operand when queried.
2492 // COND_INTER causes an operand to generate six functions which return
2493 // the encoding code (ie - encoding bits for the instruction)
2494 // associated with each basic boolean condition for a conditional
2495 // instruction.
2496 //
2497 // Instructions specify two basic values for encoding.  Again, a
2498 // function is available to check if the constant displacement is an
2499 // oop. They use the ins_encode keyword to specify their encoding
2500 // classes (which must be a sequence of enc_class names, and their
2501 // parameters, specified in the encoding block), and they use the
2502 // opcode keyword to specify, in order, their primary, secondary, and
2503 // tertiary opcode.  Only the opcode sections which a particular
2504 // instruction needs for encoding need to be specified.
2505 encode %{
2506   // Build emit functions for each basic byte or larger field in the
2507   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2508   // from C++ code in the enc_class source block.  Emit functions will
2509   // live in the main source block for now.  In future, we can
2510   // generalize this by adding a syntax that specifies the sizes of
2511   // fields in an order, so that the adlc can build the emit functions
2512   // automagically
2513 
2514   // catch all for unimplemented encodings
2515   enc_class enc_unimplemented %{
2516     MacroAssembler _masm(&cbuf);
2517     __ unimplemented("C2 catch all");
2518   %}
2519 
2520   // BEGIN Non-volatile memory access
2521 
2522   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2523     Register dst_reg = as_Register($dst$$reg);
2524     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2525                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2526   %}
2527 
2528   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2529     Register dst_reg = as_Register($dst$$reg);
2530     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2531                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2532   %}
2533 
2534   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2535     Register dst_reg = as_Register($dst$$reg);
2536     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2537                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2538   %}
2539 
2540   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2541     Register dst_reg = as_Register($dst$$reg);
2542     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2543                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2544   %}
2545 
2546   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2547     Register dst_reg = as_Register($dst$$reg);
2548     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2549                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2550   %}
2551 
2552   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2553     Register dst_reg = as_Register($dst$$reg);
2554     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2555                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2556   %}
2557 
2558   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2559     Register dst_reg = as_Register($dst$$reg);
2560     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2561                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2562   %}
2563 
2564   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2565     Register dst_reg = as_Register($dst$$reg);
2566     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2567                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2568   %}
2569 
2570   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2571     Register dst_reg = as_Register($dst$$reg);
2572     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2573                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2574   %}
2575 
2576   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2577     Register dst_reg = as_Register($dst$$reg);
2578     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2579                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2580   %}
2581 
2582   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2583     Register dst_reg = as_Register($dst$$reg);
2584     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2585                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2586   %}
2587 
2588   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2589     Register dst_reg = as_Register($dst$$reg);
2590     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2591                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2592   %}
2593 
2594   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2595     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2596     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2597                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2598   %}
2599 
2600   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2601     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2602     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2603                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2604   %}
2605 
2606   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2607     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2608     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2609        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2610   %}
2611 
2612   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2613     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2614     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2615        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2616   %}
2617 
2618   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2619     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2620     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2621        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2622   %}
2623 
2624   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2625     Register src_reg = as_Register($src$$reg);
2626     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2627                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2628   %}
2629 
2630   enc_class aarch64_enc_strb0(memory mem) %{
2631     MacroAssembler _masm(&cbuf);
2632     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2633                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2634   %}
2635 
2636   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2637     MacroAssembler _masm(&cbuf);
2638     __ membar(Assembler::StoreStore);
2639     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2640                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2641   %}
2642 
2643   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2644     Register src_reg = as_Register($src$$reg);
2645     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2646                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2647   %}
2648 
2649   enc_class aarch64_enc_strh0(memory mem) %{
2650     MacroAssembler _masm(&cbuf);
2651     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2652                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2653   %}
2654 
2655   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2656     Register src_reg = as_Register($src$$reg);
2657     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2658                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2659   %}
2660 
2661   enc_class aarch64_enc_strw0(memory mem) %{
2662     MacroAssembler _masm(&cbuf);
2663     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2664                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2665   %}
2666 
2667   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2668     Register src_reg = as_Register($src$$reg);
2669     // we sometimes get asked to store the stack pointer into the
2670     // current thread -- we cannot do that directly on AArch64
2671     if (src_reg == r31_sp) {
2672       MacroAssembler _masm(&cbuf);
2673       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2674       __ mov(rscratch2, sp);
2675       src_reg = rscratch2;
2676     }
2677     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2678                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2679   %}
2680 
2681   enc_class aarch64_enc_str0(memory mem) %{
2682     MacroAssembler _masm(&cbuf);
2683     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2684                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2685   %}
2686 
2687   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2688     FloatRegister src_reg = as_FloatRegister($src$$reg);
2689     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2690                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2691   %}
2692 
2693   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2694     FloatRegister src_reg = as_FloatRegister($src$$reg);
2695     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2696                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2697   %}
2698 
2699   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2700     FloatRegister src_reg = as_FloatRegister($src$$reg);
2701     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2702        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2703   %}
2704 
2705   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2706     FloatRegister src_reg = as_FloatRegister($src$$reg);
2707     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2708        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2709   %}
2710 
2711   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2712     FloatRegister src_reg = as_FloatRegister($src$$reg);
2713     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2714        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2715   %}
2716 
2717   // END Non-volatile memory access
2718 
2719   // volatile loads and stores
2720 
2721   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2722     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2723                  rscratch1, stlrb);
2724   %}
2725 
2726   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2727     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2728                  rscratch1, stlrh);
2729   %}
2730 
2731   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2732     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2733                  rscratch1, stlrw);
2734   %}
2735 
2736 
2737   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2738     Register dst_reg = as_Register($dst$$reg);
2739     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2740              rscratch1, ldarb);
2741     __ sxtbw(dst_reg, dst_reg);
2742   %}
2743 
2744   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2745     Register dst_reg = as_Register($dst$$reg);
2746     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2747              rscratch1, ldarb);
2748     __ sxtb(dst_reg, dst_reg);
2749   %}
2750 
2751   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2752     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2753              rscratch1, ldarb);
2754   %}
2755 
2756   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2757     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2758              rscratch1, ldarb);
2759   %}
2760 
2761   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2762     Register dst_reg = as_Register($dst$$reg);
2763     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2764              rscratch1, ldarh);
2765     __ sxthw(dst_reg, dst_reg);
2766   %}
2767 
2768   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2769     Register dst_reg = as_Register($dst$$reg);
2770     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2771              rscratch1, ldarh);
2772     __ sxth(dst_reg, dst_reg);
2773   %}
2774 
2775   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2776     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2777              rscratch1, ldarh);
2778   %}
2779 
2780   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2781     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2782              rscratch1, ldarh);
2783   %}
2784 
2785   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2786     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2787              rscratch1, ldarw);
2788   %}
2789 
2790   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2791     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2792              rscratch1, ldarw);
2793   %}
2794 
2795   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2796     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2797              rscratch1, ldar);
2798   %}
2799 
2800   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2801     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2802              rscratch1, ldarw);
2803     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2804   %}
2805 
2806   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2807     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2808              rscratch1, ldar);
2809     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2810   %}
2811 
2812   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2813     Register src_reg = as_Register($src$$reg);
2814     // we sometimes get asked to store the stack pointer into the
2815     // current thread -- we cannot do that directly on AArch64
2816     if (src_reg == r31_sp) {
2817         MacroAssembler _masm(&cbuf);
2818       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2819       __ mov(rscratch2, sp);
2820       src_reg = rscratch2;
2821     }
2822     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2823                  rscratch1, stlr);
2824   %}
2825 
2826   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2827     {
2828       MacroAssembler _masm(&cbuf);
2829       FloatRegister src_reg = as_FloatRegister($src$$reg);
2830       __ fmovs(rscratch2, src_reg);
2831     }
2832     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2833                  rscratch1, stlrw);
2834   %}
2835 
2836   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2837     {
2838       MacroAssembler _masm(&cbuf);
2839       FloatRegister src_reg = as_FloatRegister($src$$reg);
2840       __ fmovd(rscratch2, src_reg);
2841     }
2842     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2843                  rscratch1, stlr);
2844   %}
2845 
2846   // synchronized read/update encodings
2847 
2848   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2849     MacroAssembler _masm(&cbuf);
2850     Register dst_reg = as_Register($dst$$reg);
2851     Register base = as_Register($mem$$base);
2852     int index = $mem$$index;
2853     int scale = $mem$$scale;
2854     int disp = $mem$$disp;
2855     if (index == -1) {
2856        if (disp != 0) {
2857         __ lea(rscratch1, Address(base, disp));
2858         __ ldaxr(dst_reg, rscratch1);
2859       } else {
2860         // TODO
2861         // should we ever get anything other than this case?
2862         __ ldaxr(dst_reg, base);
2863       }
2864     } else {
2865       Register index_reg = as_Register(index);
2866       if (disp == 0) {
2867         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2868         __ ldaxr(dst_reg, rscratch1);
2869       } else {
2870         __ lea(rscratch1, Address(base, disp));
2871         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2872         __ ldaxr(dst_reg, rscratch1);
2873       }
2874     }
2875   %}
2876 
2877   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2878     MacroAssembler _masm(&cbuf);
2879     Register src_reg = as_Register($src$$reg);
2880     Register base = as_Register($mem$$base);
2881     int index = $mem$$index;
2882     int scale = $mem$$scale;
2883     int disp = $mem$$disp;
2884     if (index == -1) {
2885        if (disp != 0) {
2886         __ lea(rscratch2, Address(base, disp));
2887         __ stlxr(rscratch1, src_reg, rscratch2);
2888       } else {
2889         // TODO
2890         // should we ever get anything other than this case?
2891         __ stlxr(rscratch1, src_reg, base);
2892       }
2893     } else {
2894       Register index_reg = as_Register(index);
2895       if (disp == 0) {
2896         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
2897         __ stlxr(rscratch1, src_reg, rscratch2);
2898       } else {
2899         __ lea(rscratch2, Address(base, disp));
2900         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
2901         __ stlxr(rscratch1, src_reg, rscratch2);
2902       }
2903     }
2904     __ cmpw(rscratch1, zr);
2905   %}
2906 
2907   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2908     MacroAssembler _masm(&cbuf);
2909     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2910     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2911                Assembler::xword, /*acquire*/ false, /*release*/ true,
2912                /*weak*/ false, noreg);
2913   %}
2914 
2915   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2916     MacroAssembler _masm(&cbuf);
2917     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2918     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2919                Assembler::word, /*acquire*/ false, /*release*/ true,
2920                /*weak*/ false, noreg);
2921   %}
2922 
2923   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2924     MacroAssembler _masm(&cbuf);
2925     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2926     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2927                Assembler::halfword, /*acquire*/ false, /*release*/ true,
2928                /*weak*/ false, noreg);
2929   %}
2930 
2931   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2932     MacroAssembler _masm(&cbuf);
2933     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2934     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2935                Assembler::byte, /*acquire*/ false, /*release*/ true,
2936                /*weak*/ false, noreg);
2937   %}
2938 
2939 
2940   // The only difference between aarch64_enc_cmpxchg and
2941   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
2942   // CompareAndSwap sequence to serve as a barrier on acquiring a
2943   // lock.
2944   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2945     MacroAssembler _masm(&cbuf);
2946     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2947     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2948                Assembler::xword, /*acquire*/ true, /*release*/ true,
2949                /*weak*/ false, noreg);
2950   %}
2951 
2952   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2953     MacroAssembler _masm(&cbuf);
2954     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2955     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2956                Assembler::word, /*acquire*/ true, /*release*/ true,
2957                /*weak*/ false, noreg);
2958   %}
2959 
2960   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2961     MacroAssembler _masm(&cbuf);
2962     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2963     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2964                Assembler::halfword, /*acquire*/ true, /*release*/ true,
2965                /*weak*/ false, noreg);
2966   %}
2967 
2968   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2969     MacroAssembler _masm(&cbuf);
2970     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2971     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2972                Assembler::byte, /*acquire*/ true, /*release*/ true,
2973                /*weak*/ false, noreg);
2974   %}
2975 
2976   // auxiliary used for CompareAndSwapX to set result register
2977   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
2978     MacroAssembler _masm(&cbuf);
2979     Register res_reg = as_Register($res$$reg);
2980     __ cset(res_reg, Assembler::EQ);
2981   %}
2982 
2983   // prefetch encodings
2984 
2985   enc_class aarch64_enc_prefetchw(memory mem) %{
2986     MacroAssembler _masm(&cbuf);
2987     Register base = as_Register($mem$$base);
2988     int index = $mem$$index;
2989     int scale = $mem$$scale;
2990     int disp = $mem$$disp;
2991     if (index == -1) {
2992       __ prfm(Address(base, disp), PSTL1KEEP);
2993     } else {
2994       Register index_reg = as_Register(index);
2995       if (disp == 0) {
2996         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
2997       } else {
2998         __ lea(rscratch1, Address(base, disp));
2999         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3000       }
3001     }
3002   %}
3003 
3004   /// mov envcodings
3005 
3006   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3007     MacroAssembler _masm(&cbuf);
3008     u_int32_t con = (u_int32_t)$src$$constant;
3009     Register dst_reg = as_Register($dst$$reg);
3010     if (con == 0) {
3011       __ movw(dst_reg, zr);
3012     } else {
3013       __ movw(dst_reg, con);
3014     }
3015   %}
3016 
3017   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3018     MacroAssembler _masm(&cbuf);
3019     Register dst_reg = as_Register($dst$$reg);
3020     u_int64_t con = (u_int64_t)$src$$constant;
3021     if (con == 0) {
3022       __ mov(dst_reg, zr);
3023     } else {
3024       __ mov(dst_reg, con);
3025     }
3026   %}
3027 
3028   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3029     MacroAssembler _masm(&cbuf);
3030     Register dst_reg = as_Register($dst$$reg);
3031     address con = (address)$src$$constant;
3032     if (con == NULL || con == (address)1) {
3033       ShouldNotReachHere();
3034     } else {
3035       relocInfo::relocType rtype = $src->constant_reloc();
3036       if (rtype == relocInfo::oop_type) {
3037         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3038       } else if (rtype == relocInfo::metadata_type) {
3039         __ mov_metadata(dst_reg, (Metadata*)con);
3040       } else {
3041         assert(rtype == relocInfo::none, "unexpected reloc type");
3042         if (con < (address)(uintptr_t)os::vm_page_size()) {
3043           __ mov(dst_reg, con);
3044         } else {
3045           unsigned long offset;
3046           __ adrp(dst_reg, con, offset);
3047           __ add(dst_reg, dst_reg, offset);
3048         }
3049       }
3050     }
3051   %}
3052 
3053   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3054     MacroAssembler _masm(&cbuf);
3055     Register dst_reg = as_Register($dst$$reg);
3056     __ mov(dst_reg, zr);
3057   %}
3058 
3059   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3060     MacroAssembler _masm(&cbuf);
3061     Register dst_reg = as_Register($dst$$reg);
3062     __ mov(dst_reg, (u_int64_t)1);
3063   %}
3064 
3065   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3066     MacroAssembler _masm(&cbuf);
3067     address page = (address)$src$$constant;
3068     Register dst_reg = as_Register($dst$$reg);
3069     unsigned long off;
3070     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3071     assert(off == 0, "assumed offset == 0");
3072   %}
3073 
3074   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3075     MacroAssembler _masm(&cbuf);
3076     __ load_byte_map_base($dst$$Register);
3077   %}
3078 
3079   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3080     MacroAssembler _masm(&cbuf);
3081     Register dst_reg = as_Register($dst$$reg);
3082     address con = (address)$src$$constant;
3083     if (con == NULL) {
3084       ShouldNotReachHere();
3085     } else {
3086       relocInfo::relocType rtype = $src->constant_reloc();
3087       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3088       __ set_narrow_oop(dst_reg, (jobject)con);
3089     }
3090   %}
3091 
3092   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3093     MacroAssembler _masm(&cbuf);
3094     Register dst_reg = as_Register($dst$$reg);
3095     __ mov(dst_reg, zr);
3096   %}
3097 
3098   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3099     MacroAssembler _masm(&cbuf);
3100     Register dst_reg = as_Register($dst$$reg);
3101     address con = (address)$src$$constant;
3102     if (con == NULL) {
3103       ShouldNotReachHere();
3104     } else {
3105       relocInfo::relocType rtype = $src->constant_reloc();
3106       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3107       __ set_narrow_klass(dst_reg, (Klass *)con);
3108     }
3109   %}
3110 
3111   // arithmetic encodings
3112 
3113   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3114     MacroAssembler _masm(&cbuf);
3115     Register dst_reg = as_Register($dst$$reg);
3116     Register src_reg = as_Register($src1$$reg);
3117     int32_t con = (int32_t)$src2$$constant;
3118     // add has primary == 0, subtract has primary == 1
3119     if ($primary) { con = -con; }
3120     if (con < 0) {
3121       __ subw(dst_reg, src_reg, -con);
3122     } else {
3123       __ addw(dst_reg, src_reg, con);
3124     }
3125   %}
3126 
3127   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3128     MacroAssembler _masm(&cbuf);
3129     Register dst_reg = as_Register($dst$$reg);
3130     Register src_reg = as_Register($src1$$reg);
3131     int32_t con = (int32_t)$src2$$constant;
3132     // add has primary == 0, subtract has primary == 1
3133     if ($primary) { con = -con; }
3134     if (con < 0) {
3135       __ sub(dst_reg, src_reg, -con);
3136     } else {
3137       __ add(dst_reg, src_reg, con);
3138     }
3139   %}
3140 
3141   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3142     MacroAssembler _masm(&cbuf);
3143    Register dst_reg = as_Register($dst$$reg);
3144    Register src1_reg = as_Register($src1$$reg);
3145    Register src2_reg = as_Register($src2$$reg);
3146     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3147   %}
3148 
3149   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3150     MacroAssembler _masm(&cbuf);
3151    Register dst_reg = as_Register($dst$$reg);
3152    Register src1_reg = as_Register($src1$$reg);
3153    Register src2_reg = as_Register($src2$$reg);
3154     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3155   %}
3156 
3157   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3158     MacroAssembler _masm(&cbuf);
3159    Register dst_reg = as_Register($dst$$reg);
3160    Register src1_reg = as_Register($src1$$reg);
3161    Register src2_reg = as_Register($src2$$reg);
3162     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3163   %}
3164 
3165   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3166     MacroAssembler _masm(&cbuf);
3167    Register dst_reg = as_Register($dst$$reg);
3168    Register src1_reg = as_Register($src1$$reg);
3169    Register src2_reg = as_Register($src2$$reg);
3170     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3171   %}
3172 
3173   // compare instruction encodings
3174 
3175   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3176     MacroAssembler _masm(&cbuf);
3177     Register reg1 = as_Register($src1$$reg);
3178     Register reg2 = as_Register($src2$$reg);
3179     __ cmpw(reg1, reg2);
3180   %}
3181 
3182   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3183     MacroAssembler _masm(&cbuf);
3184     Register reg = as_Register($src1$$reg);
3185     int32_t val = $src2$$constant;
3186     if (val >= 0) {
3187       __ subsw(zr, reg, val);
3188     } else {
3189       __ addsw(zr, reg, -val);
3190     }
3191   %}
3192 
3193   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3194     MacroAssembler _masm(&cbuf);
3195     Register reg1 = as_Register($src1$$reg);
3196     u_int32_t val = (u_int32_t)$src2$$constant;
3197     __ movw(rscratch1, val);
3198     __ cmpw(reg1, rscratch1);
3199   %}
3200 
3201   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3202     MacroAssembler _masm(&cbuf);
3203     Register reg1 = as_Register($src1$$reg);
3204     Register reg2 = as_Register($src2$$reg);
3205     __ cmp(reg1, reg2);
3206   %}
3207 
3208   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3209     MacroAssembler _masm(&cbuf);
3210     Register reg = as_Register($src1$$reg);
3211     int64_t val = $src2$$constant;
3212     if (val >= 0) {
3213       __ subs(zr, reg, val);
3214     } else if (val != -val) {
3215       __ adds(zr, reg, -val);
3216     } else {
3217     // aargh, Long.MIN_VALUE is a special case
3218       __ orr(rscratch1, zr, (u_int64_t)val);
3219       __ subs(zr, reg, rscratch1);
3220     }
3221   %}
3222 
3223   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3224     MacroAssembler _masm(&cbuf);
3225     Register reg1 = as_Register($src1$$reg);
3226     u_int64_t val = (u_int64_t)$src2$$constant;
3227     __ mov(rscratch1, val);
3228     __ cmp(reg1, rscratch1);
3229   %}
3230 
3231   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3232     MacroAssembler _masm(&cbuf);
3233     Register reg1 = as_Register($src1$$reg);
3234     Register reg2 = as_Register($src2$$reg);
3235     __ cmp(reg1, reg2);
3236   %}
3237 
3238   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3239     MacroAssembler _masm(&cbuf);
3240     Register reg1 = as_Register($src1$$reg);
3241     Register reg2 = as_Register($src2$$reg);
3242     __ cmpw(reg1, reg2);
3243   %}
3244 
3245   enc_class aarch64_enc_testp(iRegP src) %{
3246     MacroAssembler _masm(&cbuf);
3247     Register reg = as_Register($src$$reg);
3248     __ cmp(reg, zr);
3249   %}
3250 
3251   enc_class aarch64_enc_testn(iRegN src) %{
3252     MacroAssembler _masm(&cbuf);
3253     Register reg = as_Register($src$$reg);
3254     __ cmpw(reg, zr);
3255   %}
3256 
3257   enc_class aarch64_enc_b(label lbl) %{
3258     MacroAssembler _masm(&cbuf);
3259     Label *L = $lbl$$label;
3260     __ b(*L);
3261   %}
3262 
3263   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3264     MacroAssembler _masm(&cbuf);
3265     Label *L = $lbl$$label;
3266     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3267   %}
3268 
3269   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3270     MacroAssembler _masm(&cbuf);
3271     Label *L = $lbl$$label;
3272     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3273   %}
3274 
3275   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3276   %{
3277      Register sub_reg = as_Register($sub$$reg);
3278      Register super_reg = as_Register($super$$reg);
3279      Register temp_reg = as_Register($temp$$reg);
3280      Register result_reg = as_Register($result$$reg);
3281 
3282      Label miss;
3283      MacroAssembler _masm(&cbuf);
3284      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3285                                      NULL, &miss,
3286                                      /*set_cond_codes:*/ true);
3287      if ($primary) {
3288        __ mov(result_reg, zr);
3289      }
3290      __ bind(miss);
3291   %}
3292 
3293   enc_class aarch64_enc_java_static_call(method meth) %{
3294     MacroAssembler _masm(&cbuf);
3295 
3296     address addr = (address)$meth$$method;
3297     address call;
3298     if (!_method) {
3299       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3300       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3301     } else {
3302       int method_index = resolved_method_index(cbuf);
3303       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3304                                                   : static_call_Relocation::spec(method_index);
3305       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3306 
3307       // Emit stub for static call
3308       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3309       if (stub == NULL) {
3310         ciEnv::current()->record_failure("CodeCache is full");
3311         return;
3312       }
3313     }
3314     if (call == NULL) {
3315       ciEnv::current()->record_failure("CodeCache is full");
3316       return;
3317     }
3318   %}
3319 
3320   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3321     MacroAssembler _masm(&cbuf);
3322     int method_index = resolved_method_index(cbuf);
3323     address call = __ ic_call((address)$meth$$method, method_index);
3324     if (call == NULL) {
3325       ciEnv::current()->record_failure("CodeCache is full");
3326       return;
3327     }
3328   %}
3329 
3330   enc_class aarch64_enc_call_epilog() %{
3331     MacroAssembler _masm(&cbuf);
3332     if (VerifyStackAtCalls) {
3333       // Check that stack depth is unchanged: find majik cookie on stack
3334       __ call_Unimplemented();
3335     }
3336   %}
3337 
3338   enc_class aarch64_enc_java_to_runtime(method meth) %{
3339     MacroAssembler _masm(&cbuf);
3340 
3341     // some calls to generated routines (arraycopy code) are scheduled
3342     // by C2 as runtime calls. if so we can call them using a br (they
3343     // will be in a reachable segment) otherwise we have to use a blrt
3344     // which loads the absolute address into a register.
3345     address entry = (address)$meth$$method;
3346     CodeBlob *cb = CodeCache::find_blob(entry);
3347     if (cb) {
3348       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3349       if (call == NULL) {
3350         ciEnv::current()->record_failure("CodeCache is full");
3351         return;
3352       }
3353     } else {
3354       int gpcnt;
3355       int fpcnt;
3356       int rtype;
3357       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3358       Label retaddr;
3359       __ adr(rscratch2, retaddr);
3360       __ lea(rscratch1, RuntimeAddress(entry));
3361       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3362       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3363       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3364       __ bind(retaddr);
3365       __ add(sp, sp, 2 * wordSize);
3366     }
3367   %}
3368 
3369   enc_class aarch64_enc_rethrow() %{
3370     MacroAssembler _masm(&cbuf);
3371     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3372   %}
3373 
3374   enc_class aarch64_enc_ret() %{
3375     MacroAssembler _masm(&cbuf);
3376     __ ret(lr);
3377   %}
3378 
3379   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3380     MacroAssembler _masm(&cbuf);
3381     Register target_reg = as_Register($jump_target$$reg);
3382     __ br(target_reg);
3383   %}
3384 
3385   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3386     MacroAssembler _masm(&cbuf);
3387     Register target_reg = as_Register($jump_target$$reg);
3388     // exception oop should be in r0
3389     // ret addr has been popped into lr
3390     // callee expects it in r3
3391     __ mov(r3, lr);
3392     __ br(target_reg);
3393   %}
3394 
3395   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3396     MacroAssembler _masm(&cbuf);
3397     Register oop = as_Register($object$$reg);
3398     Register box = as_Register($box$$reg);
3399     Register disp_hdr = as_Register($tmp$$reg);
3400     Register tmp = as_Register($tmp2$$reg);
3401     Label cont;
3402     Label object_has_monitor;
3403     Label cas_failed;
3404 
3405     assert_different_registers(oop, box, tmp, disp_hdr);
3406 
3407     // Load markOop from object into displaced_header.
3408     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3409 
3410     if (UseBiasedLocking && !UseOptoBiasInlining) {
3411       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3412     }
3413 
3414     // Handle existing monitor
3415     // we can use AArch64's bit test and branch here but
3416     // markoopDesc does not define a bit index just the bit value
3417     // so assert in case the bit pos changes
3418 #   define __monitor_value_log2 1
3419     assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
3420     __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
3421 #   undef __monitor_value_log2
3422 
3423     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
3424     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
3425 
3426     // Load Compare Value application register.
3427 
3428     // Initialize the box. (Must happen before we update the object mark!)
3429     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3430 
3431     // Compare object markOop with mark and if equal exchange scratch1
3432     // with object markOop.
3433     if (UseLSE) {
3434       __ mov(tmp, disp_hdr);
3435       __ casal(Assembler::xword, tmp, box, oop);
3436       __ cmp(tmp, disp_hdr);
3437       __ br(Assembler::EQ, cont);
3438     } else {
3439       Label retry_load;
3440       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3441         __ prfm(Address(oop), PSTL1STRM);
3442       __ bind(retry_load);
3443       __ ldaxr(tmp, oop);
3444       __ cmp(tmp, disp_hdr);
3445       __ br(Assembler::NE, cas_failed);
3446       // use stlxr to ensure update is immediately visible
3447       __ stlxr(tmp, box, oop);
3448       __ cbzw(tmp, cont);
3449       __ b(retry_load);
3450     }
3451 
3452     // Formerly:
3453     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3454     //               /*newv=*/box,
3455     //               /*addr=*/oop,
3456     //               /*tmp=*/tmp,
3457     //               cont,
3458     //               /*fail*/NULL);
3459 
3460     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3461 
3462     // If the compare-and-exchange succeeded, then we found an unlocked
3463     // object, will have now locked it will continue at label cont
3464 
3465     __ bind(cas_failed);
3466     // We did not see an unlocked object so try the fast recursive case.
3467 
3468     // Check if the owner is self by comparing the value in the
3469     // markOop of object (disp_hdr) with the stack pointer.
3470     __ mov(rscratch1, sp);
3471     __ sub(disp_hdr, disp_hdr, rscratch1);
3472     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3473     // If condition is true we are cont and hence we can store 0 as the
3474     // displaced header in the box, which indicates that it is a recursive lock.
3475     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3476     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3477 
3478     // Handle existing monitor.
3479     __ b(cont);
3480 
3481     __ bind(object_has_monitor);
3482     // The object's monitor m is unlocked iff m->owner == NULL,
3483     // otherwise m->owner may contain a thread or a stack address.
3484     //
3485     // Try to CAS m->owner from NULL to current thread.
3486     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3487     __ mov(disp_hdr, zr);
3488 
3489     if (UseLSE) {
3490       __ mov(rscratch1, disp_hdr);
3491       __ casal(Assembler::xword, rscratch1, rthread, tmp);
3492       __ cmp(rscratch1, disp_hdr);
3493     } else {
3494       Label retry_load, fail;
3495       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) {
3496         __ prfm(Address(tmp), PSTL1STRM);
3497       }
3498       __ bind(retry_load);
3499       __ ldaxr(rscratch1, tmp);
3500       __ cmp(disp_hdr, rscratch1);
3501       __ br(Assembler::NE, fail);
3502       // use stlxr to ensure update is immediately visible
3503       __ stlxr(rscratch1, rthread, tmp);
3504       __ cbnzw(rscratch1, retry_load);
3505       __ bind(fail);
3506     }
3507 
3508     // Label next;
3509     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3510     //               /*newv=*/rthread,
3511     //               /*addr=*/tmp,
3512     //               /*tmp=*/rscratch1,
3513     //               /*succeed*/next,
3514     //               /*fail*/NULL);
3515     // __ bind(next);
3516 
3517     // store a non-null value into the box.
3518     __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3519 
3520     // PPC port checks the following invariants
3521     // #ifdef ASSERT
3522     // bne(flag, cont);
3523     // We have acquired the monitor, check some invariants.
3524     // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
3525     // Invariant 1: _recursions should be 0.
3526     // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
3527     // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
3528     //                        "monitor->_recursions should be 0", -1);
3529     // Invariant 2: OwnerIsThread shouldn't be 0.
3530     // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
3531     //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
3532     //                           "monitor->OwnerIsThread shouldn't be 0", -1);
3533     // #endif
3534 
3535     __ bind(cont);
3536     // flag == EQ indicates success
3537     // flag == NE indicates failure
3538 
3539   %}
3540 
3541   // TODO
3542   // reimplement this with custom cmpxchgptr code
3543   // which avoids some of the unnecessary branching
3544   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3545     MacroAssembler _masm(&cbuf);
3546     Register oop = as_Register($object$$reg);
3547     Register box = as_Register($box$$reg);
3548     Register disp_hdr = as_Register($tmp$$reg);
3549     Register tmp = as_Register($tmp2$$reg);
3550     Label cont;
3551     Label object_has_monitor;
3552     Label cas_failed;
3553 
3554     assert_different_registers(oop, box, tmp, disp_hdr);
3555 
3556     if (UseBiasedLocking && !UseOptoBiasInlining) {
3557       __ biased_locking_exit(oop, tmp, cont);
3558     }
3559 
3560     // Find the lock address and load the displaced header from the stack.
3561     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3562 
3563     // If the displaced header is 0, we have a recursive unlock.
3564     __ cmp(disp_hdr, zr);
3565     __ br(Assembler::EQ, cont);
3566 
3567 
3568     // Handle existing monitor.
3569     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3570     __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3571 
3572     // Check if it is still a light weight lock, this is is true if we
3573     // see the stack address of the basicLock in the markOop of the
3574     // object.
3575 
3576       if (UseLSE) {
3577         __ mov(tmp, box);
3578         __ casl(Assembler::xword, tmp, disp_hdr, oop);
3579         __ cmp(tmp, box);
3580       } else {
3581         Label retry_load;
3582         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3583           __ prfm(Address(oop), PSTL1STRM);
3584         __ bind(retry_load);
3585         __ ldxr(tmp, oop);
3586         __ cmp(box, tmp);
3587         __ br(Assembler::NE, cas_failed);
3588         // use stlxr to ensure update is immediately visible
3589         __ stlxr(tmp, disp_hdr, oop);
3590         __ cbzw(tmp, cont);
3591         __ b(retry_load);
3592       }
3593 
3594     // __ cmpxchgptr(/*compare_value=*/box,
3595     //               /*exchange_value=*/disp_hdr,
3596     //               /*where=*/oop,
3597     //               /*result=*/tmp,
3598     //               cont,
3599     //               /*cas_failed*/NULL);
3600     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3601 
3602     __ bind(cas_failed);
3603 
3604     // Handle existing monitor.
3605     __ b(cont);
3606 
3607     __ bind(object_has_monitor);
3608     __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3609     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3610     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3611     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3612     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3613     __ cmp(rscratch1, zr);
3614     __ br(Assembler::NE, cont);
3615 
3616     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3617     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3618     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3619     __ cmp(rscratch1, zr);
3620     __ cbnz(rscratch1, cont);
3621     // need a release store here
3622     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3623     __ stlr(rscratch1, tmp); // rscratch1 is zero
3624 
3625     __ bind(cont);
3626     // flag == EQ indicates success
3627     // flag == NE indicates failure
3628   %}
3629 
3630 %}
3631 
3632 //----------FRAME--------------------------------------------------------------
3633 // Definition of frame structure and management information.
3634 //
3635 //  S T A C K   L A Y O U T    Allocators stack-slot number
3636 //                             |   (to get allocators register number
3637 //  G  Owned by    |        |  v    add OptoReg::stack0())
3638 //  r   CALLER     |        |
3639 //  o     |        +--------+      pad to even-align allocators stack-slot
3640 //  w     V        |  pad0  |        numbers; owned by CALLER
3641 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3642 //  h     ^        |   in   |  5
3643 //        |        |  args  |  4   Holes in incoming args owned by SELF
3644 //  |     |        |        |  3
3645 //  |     |        +--------+
3646 //  V     |        | old out|      Empty on Intel, window on Sparc
3647 //        |    old |preserve|      Must be even aligned.
3648 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3649 //        |        |   in   |  3   area for Intel ret address
3650 //     Owned by    |preserve|      Empty on Sparc.
3651 //       SELF      +--------+
3652 //        |        |  pad2  |  2   pad to align old SP
3653 //        |        +--------+  1
3654 //        |        | locks  |  0
3655 //        |        +--------+----> OptoReg::stack0(), even aligned
3656 //        |        |  pad1  | 11   pad to align new SP
3657 //        |        +--------+
3658 //        |        |        | 10
3659 //        |        | spills |  9   spills
3660 //        V        |        |  8   (pad0 slot for callee)
3661 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3662 //        ^        |  out   |  7
3663 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3664 //     Owned by    +--------+
3665 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3666 //        |    new |preserve|      Must be even-aligned.
3667 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3668 //        |        |        |
3669 //
3670 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3671 //         known from SELF's arguments and the Java calling convention.
3672 //         Region 6-7 is determined per call site.
3673 // Note 2: If the calling convention leaves holes in the incoming argument
3674 //         area, those holes are owned by SELF.  Holes in the outgoing area
3675 //         are owned by the CALLEE.  Holes should not be nessecary in the
3676 //         incoming area, as the Java calling convention is completely under
3677 //         the control of the AD file.  Doubles can be sorted and packed to
3678 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3679 //         varargs C calling conventions.
3680 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3681 //         even aligned with pad0 as needed.
3682 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3683 //           (the latter is true on Intel but is it false on AArch64?)
3684 //         region 6-11 is even aligned; it may be padded out more so that
3685 //         the region from SP to FP meets the minimum stack alignment.
3686 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3687 //         alignment.  Region 11, pad1, may be dynamically extended so that
3688 //         SP meets the minimum alignment.
3689 
3690 frame %{
3691   // What direction does stack grow in (assumed to be same for C & Java)
3692   stack_direction(TOWARDS_LOW);
3693 
3694   // These three registers define part of the calling convention
3695   // between compiled code and the interpreter.
3696 
3697   // Inline Cache Register or methodOop for I2C.
3698   inline_cache_reg(R12);
3699 
3700   // Method Oop Register when calling interpreter.
3701   interpreter_method_oop_reg(R12);
3702 
3703   // Number of stack slots consumed by locking an object
3704   sync_stack_slots(2);
3705 
3706   // Compiled code's Frame Pointer
3707   frame_pointer(R31);
3708 
3709   // Interpreter stores its frame pointer in a register which is
3710   // stored to the stack by I2CAdaptors.
3711   // I2CAdaptors convert from interpreted java to compiled java.
3712   interpreter_frame_pointer(R29);
3713 
3714   // Stack alignment requirement
3715   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3716 
3717   // Number of stack slots between incoming argument block and the start of
3718   // a new frame.  The PROLOG must add this many slots to the stack.  The
3719   // EPILOG must remove this many slots. aarch64 needs two slots for
3720   // return address and fp.
3721   // TODO think this is correct but check
3722   in_preserve_stack_slots(4);
3723 
3724   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3725   // for calls to C.  Supports the var-args backing area for register parms.
3726   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3727 
3728   // The after-PROLOG location of the return address.  Location of
3729   // return address specifies a type (REG or STACK) and a number
3730   // representing the register number (i.e. - use a register name) or
3731   // stack slot.
3732   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3733   // Otherwise, it is above the locks and verification slot and alignment word
3734   // TODO this may well be correct but need to check why that - 2 is there
3735   // ppc port uses 0 but we definitely need to allow for fixed_slots
3736   // which folds in the space used for monitors
3737   return_addr(STACK - 2 +
3738               align_up((Compile::current()->in_preserve_stack_slots() +
3739                         Compile::current()->fixed_slots()),
3740                        stack_alignment_in_slots()));
3741 
3742   // Body of function which returns an integer array locating
3743   // arguments either in registers or in stack slots.  Passed an array
3744   // of ideal registers called "sig" and a "length" count.  Stack-slot
3745   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3746   // arguments for a CALLEE.  Incoming stack arguments are
3747   // automatically biased by the preserve_stack_slots field above.
3748 
3749   calling_convention
3750   %{
3751     // No difference between ingoing/outgoing just pass false
3752     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3753   %}
3754 
3755   c_calling_convention
3756   %{
3757     // This is obviously always outgoing
3758     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3759   %}
3760 
3761   // Location of compiled Java return values.  Same as C for now.
3762   return_value
3763   %{
3764     // TODO do we allow ideal_reg == Op_RegN???
3765     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3766            "only return normal values");
3767 
3768     static const int lo[Op_RegL + 1] = { // enum name
3769       0,                                 // Op_Node
3770       0,                                 // Op_Set
3771       R0_num,                            // Op_RegN
3772       R0_num,                            // Op_RegI
3773       R0_num,                            // Op_RegP
3774       V0_num,                            // Op_RegF
3775       V0_num,                            // Op_RegD
3776       R0_num                             // Op_RegL
3777     };
3778 
3779     static const int hi[Op_RegL + 1] = { // enum name
3780       0,                                 // Op_Node
3781       0,                                 // Op_Set
3782       OptoReg::Bad,                       // Op_RegN
3783       OptoReg::Bad,                      // Op_RegI
3784       R0_H_num,                          // Op_RegP
3785       OptoReg::Bad,                      // Op_RegF
3786       V0_H_num,                          // Op_RegD
3787       R0_H_num                           // Op_RegL
3788     };
3789 
3790     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3791   %}
3792 %}
3793 
3794 //----------ATTRIBUTES---------------------------------------------------------
3795 //----------Operand Attributes-------------------------------------------------
3796 op_attrib op_cost(1);        // Required cost attribute
3797 
3798 //----------Instruction Attributes---------------------------------------------
3799 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3800 ins_attrib ins_size(32);        // Required size attribute (in bits)
3801 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3802                                 // a non-matching short branch variant
3803                                 // of some long branch?
3804 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3805                                 // be a power of 2) specifies the
3806                                 // alignment that some part of the
3807                                 // instruction (not necessarily the
3808                                 // start) requires.  If > 1, a
3809                                 // compute_padding() function must be
3810                                 // provided for the instruction
3811 
3812 //----------OPERANDS-----------------------------------------------------------
3813 // Operand definitions must precede instruction definitions for correct parsing
3814 // in the ADLC because operands constitute user defined types which are used in
3815 // instruction definitions.
3816 
3817 //----------Simple Operands----------------------------------------------------
3818 
3819 // Integer operands 32 bit
3820 // 32 bit immediate
3821 operand immI()
3822 %{
3823   match(ConI);
3824 
3825   op_cost(0);
3826   format %{ %}
3827   interface(CONST_INTER);
3828 %}
3829 
3830 // 32 bit zero
3831 operand immI0()
3832 %{
3833   predicate(n->get_int() == 0);
3834   match(ConI);
3835 
3836   op_cost(0);
3837   format %{ %}
3838   interface(CONST_INTER);
3839 %}
3840 
3841 // 32 bit unit increment
3842 operand immI_1()
3843 %{
3844   predicate(n->get_int() == 1);
3845   match(ConI);
3846 
3847   op_cost(0);
3848   format %{ %}
3849   interface(CONST_INTER);
3850 %}
3851 
3852 // 32 bit unit decrement
3853 operand immI_M1()
3854 %{
3855   predicate(n->get_int() == -1);
3856   match(ConI);
3857 
3858   op_cost(0);
3859   format %{ %}
3860   interface(CONST_INTER);
3861 %}
3862 
3863 // Shift values for add/sub extension shift
3864 operand immIExt()
3865 %{
3866   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3867   match(ConI);
3868 
3869   op_cost(0);
3870   format %{ %}
3871   interface(CONST_INTER);
3872 %}
3873 
3874 operand immI_le_4()
3875 %{
3876   predicate(n->get_int() <= 4);
3877   match(ConI);
3878 
3879   op_cost(0);
3880   format %{ %}
3881   interface(CONST_INTER);
3882 %}
3883 
3884 operand immI_31()
3885 %{
3886   predicate(n->get_int() == 31);
3887   match(ConI);
3888 
3889   op_cost(0);
3890   format %{ %}
3891   interface(CONST_INTER);
3892 %}
3893 
3894 operand immI_8()
3895 %{
3896   predicate(n->get_int() == 8);
3897   match(ConI);
3898 
3899   op_cost(0);
3900   format %{ %}
3901   interface(CONST_INTER);
3902 %}
3903 
3904 operand immI_16()
3905 %{
3906   predicate(n->get_int() == 16);
3907   match(ConI);
3908 
3909   op_cost(0);
3910   format %{ %}
3911   interface(CONST_INTER);
3912 %}
3913 
3914 operand immI_24()
3915 %{
3916   predicate(n->get_int() == 24);
3917   match(ConI);
3918 
3919   op_cost(0);
3920   format %{ %}
3921   interface(CONST_INTER);
3922 %}
3923 
3924 operand immI_32()
3925 %{
3926   predicate(n->get_int() == 32);
3927   match(ConI);
3928 
3929   op_cost(0);
3930   format %{ %}
3931   interface(CONST_INTER);
3932 %}
3933 
3934 operand immI_48()
3935 %{
3936   predicate(n->get_int() == 48);
3937   match(ConI);
3938 
3939   op_cost(0);
3940   format %{ %}
3941   interface(CONST_INTER);
3942 %}
3943 
3944 operand immI_56()
3945 %{
3946   predicate(n->get_int() == 56);
3947   match(ConI);
3948 
3949   op_cost(0);
3950   format %{ %}
3951   interface(CONST_INTER);
3952 %}
3953 
3954 operand immI_63()
3955 %{
3956   predicate(n->get_int() == 63);
3957   match(ConI);
3958 
3959   op_cost(0);
3960   format %{ %}
3961   interface(CONST_INTER);
3962 %}
3963 
3964 operand immI_64()
3965 %{
3966   predicate(n->get_int() == 64);
3967   match(ConI);
3968 
3969   op_cost(0);
3970   format %{ %}
3971   interface(CONST_INTER);
3972 %}
3973 
3974 operand immI_255()
3975 %{
3976   predicate(n->get_int() == 255);
3977   match(ConI);
3978 
3979   op_cost(0);
3980   format %{ %}
3981   interface(CONST_INTER);
3982 %}
3983 
3984 operand immI_65535()
3985 %{
3986   predicate(n->get_int() == 65535);
3987   match(ConI);
3988 
3989   op_cost(0);
3990   format %{ %}
3991   interface(CONST_INTER);
3992 %}
3993 
3994 operand immL_255()
3995 %{
3996   predicate(n->get_long() == 255L);
3997   match(ConL);
3998 
3999   op_cost(0);
4000   format %{ %}
4001   interface(CONST_INTER);
4002 %}
4003 
4004 operand immL_65535()
4005 %{
4006   predicate(n->get_long() == 65535L);
4007   match(ConL);
4008 
4009   op_cost(0);
4010   format %{ %}
4011   interface(CONST_INTER);
4012 %}
4013 
4014 operand immL_4294967295()
4015 %{
4016   predicate(n->get_long() == 4294967295L);
4017   match(ConL);
4018 
4019   op_cost(0);
4020   format %{ %}
4021   interface(CONST_INTER);
4022 %}
4023 
4024 operand immL_bitmask()
4025 %{
4026   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4027             && is_power_of_2(n->get_long() + 1));
4028   match(ConL);
4029 
4030   op_cost(0);
4031   format %{ %}
4032   interface(CONST_INTER);
4033 %}
4034 
4035 operand immI_bitmask()
4036 %{
4037   predicate(((n->get_int() & 0xc0000000) == 0)
4038             && is_power_of_2(n->get_int() + 1));
4039   match(ConI);
4040 
4041   op_cost(0);
4042   format %{ %}
4043   interface(CONST_INTER);
4044 %}
4045 
4046 // Scale values for scaled offset addressing modes (up to long but not quad)
4047 operand immIScale()
4048 %{
4049   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4050   match(ConI);
4051 
4052   op_cost(0);
4053   format %{ %}
4054   interface(CONST_INTER);
4055 %}
4056 
4057 // 26 bit signed offset -- for pc-relative branches
4058 operand immI26()
4059 %{
4060   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4061   match(ConI);
4062 
4063   op_cost(0);
4064   format %{ %}
4065   interface(CONST_INTER);
4066 %}
4067 
4068 // 19 bit signed offset -- for pc-relative loads
4069 operand immI19()
4070 %{
4071   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4072   match(ConI);
4073 
4074   op_cost(0);
4075   format %{ %}
4076   interface(CONST_INTER);
4077 %}
4078 
4079 // 12 bit unsigned offset -- for base plus immediate loads
4080 operand immIU12()
4081 %{
4082   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4083   match(ConI);
4084 
4085   op_cost(0);
4086   format %{ %}
4087   interface(CONST_INTER);
4088 %}
4089 
4090 operand immLU12()
4091 %{
4092   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4093   match(ConL);
4094 
4095   op_cost(0);
4096   format %{ %}
4097   interface(CONST_INTER);
4098 %}
4099 
4100 // Offset for scaled or unscaled immediate loads and stores
4101 operand immIOffset()
4102 %{
4103   predicate(Address::offset_ok_for_immed(n->get_int()));
4104   match(ConI);
4105 
4106   op_cost(0);
4107   format %{ %}
4108   interface(CONST_INTER);
4109 %}
4110 
4111 operand immIOffset4()
4112 %{
4113   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4114   match(ConI);
4115 
4116   op_cost(0);
4117   format %{ %}
4118   interface(CONST_INTER);
4119 %}
4120 
4121 operand immIOffset8()
4122 %{
4123   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4124   match(ConI);
4125 
4126   op_cost(0);
4127   format %{ %}
4128   interface(CONST_INTER);
4129 %}
4130 
4131 operand immIOffset16()
4132 %{
4133   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4134   match(ConI);
4135 
4136   op_cost(0);
4137   format %{ %}
4138   interface(CONST_INTER);
4139 %}
4140 
4141 operand immLoffset()
4142 %{
4143   predicate(Address::offset_ok_for_immed(n->get_long()));
4144   match(ConL);
4145 
4146   op_cost(0);
4147   format %{ %}
4148   interface(CONST_INTER);
4149 %}
4150 
4151 operand immLoffset4()
4152 %{
4153   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4154   match(ConL);
4155 
4156   op_cost(0);
4157   format %{ %}
4158   interface(CONST_INTER);
4159 %}
4160 
4161 operand immLoffset8()
4162 %{
4163   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4164   match(ConL);
4165 
4166   op_cost(0);
4167   format %{ %}
4168   interface(CONST_INTER);
4169 %}
4170 
4171 operand immLoffset16()
4172 %{
4173   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4174   match(ConL);
4175 
4176   op_cost(0);
4177   format %{ %}
4178   interface(CONST_INTER);
4179 %}
4180 
4181 // 32 bit integer valid for add sub immediate
4182 operand immIAddSub()
4183 %{
4184   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4185   match(ConI);
4186   op_cost(0);
4187   format %{ %}
4188   interface(CONST_INTER);
4189 %}
4190 
4191 // 32 bit unsigned integer valid for logical immediate
4192 // TODO -- check this is right when e.g the mask is 0x80000000
4193 operand immILog()
4194 %{
4195   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4196   match(ConI);
4197 
4198   op_cost(0);
4199   format %{ %}
4200   interface(CONST_INTER);
4201 %}
4202 
4203 // Integer operands 64 bit
4204 // 64 bit immediate
4205 operand immL()
4206 %{
4207   match(ConL);
4208 
4209   op_cost(0);
4210   format %{ %}
4211   interface(CONST_INTER);
4212 %}
4213 
4214 // 64 bit zero
4215 operand immL0()
4216 %{
4217   predicate(n->get_long() == 0);
4218   match(ConL);
4219 
4220   op_cost(0);
4221   format %{ %}
4222   interface(CONST_INTER);
4223 %}
4224 
4225 // 64 bit unit increment
4226 operand immL_1()
4227 %{
4228   predicate(n->get_long() == 1);
4229   match(ConL);
4230 
4231   op_cost(0);
4232   format %{ %}
4233   interface(CONST_INTER);
4234 %}
4235 
4236 // 64 bit unit decrement
4237 operand immL_M1()
4238 %{
4239   predicate(n->get_long() == -1);
4240   match(ConL);
4241 
4242   op_cost(0);
4243   format %{ %}
4244   interface(CONST_INTER);
4245 %}
4246 
4247 // 32 bit offset of pc in thread anchor
4248 
4249 operand immL_pc_off()
4250 %{
4251   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4252                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4253   match(ConL);
4254 
4255   op_cost(0);
4256   format %{ %}
4257   interface(CONST_INTER);
4258 %}
4259 
4260 // 64 bit integer valid for add sub immediate
4261 operand immLAddSub()
4262 %{
4263   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4264   match(ConL);
4265   op_cost(0);
4266   format %{ %}
4267   interface(CONST_INTER);
4268 %}
4269 
4270 // 64 bit integer valid for logical immediate
4271 operand immLLog()
4272 %{
4273   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4274   match(ConL);
4275   op_cost(0);
4276   format %{ %}
4277   interface(CONST_INTER);
4278 %}
4279 
4280 // Long Immediate: low 32-bit mask
4281 operand immL_32bits()
4282 %{
4283   predicate(n->get_long() == 0xFFFFFFFFL);
4284   match(ConL);
4285   op_cost(0);
4286   format %{ %}
4287   interface(CONST_INTER);
4288 %}
4289 
4290 // Pointer operands
4291 // Pointer Immediate
4292 operand immP()
4293 %{
4294   match(ConP);
4295 
4296   op_cost(0);
4297   format %{ %}
4298   interface(CONST_INTER);
4299 %}
4300 
4301 // NULL Pointer Immediate
4302 operand immP0()
4303 %{
4304   predicate(n->get_ptr() == 0);
4305   match(ConP);
4306 
4307   op_cost(0);
4308   format %{ %}
4309   interface(CONST_INTER);
4310 %}
4311 
4312 // Pointer Immediate One
4313 // this is used in object initialization (initial object header)
4314 operand immP_1()
4315 %{
4316   predicate(n->get_ptr() == 1);
4317   match(ConP);
4318 
4319   op_cost(0);
4320   format %{ %}
4321   interface(CONST_INTER);
4322 %}
4323 
4324 // Polling Page Pointer Immediate
4325 operand immPollPage()
4326 %{
4327   predicate((address)n->get_ptr() == os::get_polling_page());
4328   match(ConP);
4329 
4330   op_cost(0);
4331   format %{ %}
4332   interface(CONST_INTER);
4333 %}
4334 
4335 // Card Table Byte Map Base
4336 operand immByteMapBase()
4337 %{
4338   // Get base of card map
4339   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4340             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4341   match(ConP);
4342 
4343   op_cost(0);
4344   format %{ %}
4345   interface(CONST_INTER);
4346 %}
4347 
4348 // Pointer Immediate Minus One
4349 // this is used when we want to write the current PC to the thread anchor
4350 operand immP_M1()
4351 %{
4352   predicate(n->get_ptr() == -1);
4353   match(ConP);
4354 
4355   op_cost(0);
4356   format %{ %}
4357   interface(CONST_INTER);
4358 %}
4359 
4360 // Pointer Immediate Minus Two
4361 // this is used when we want to write the current PC to the thread anchor
4362 operand immP_M2()
4363 %{
4364   predicate(n->get_ptr() == -2);
4365   match(ConP);
4366 
4367   op_cost(0);
4368   format %{ %}
4369   interface(CONST_INTER);
4370 %}
4371 
4372 // Float and Double operands
4373 // Double Immediate
4374 operand immD()
4375 %{
4376   match(ConD);
4377   op_cost(0);
4378   format %{ %}
4379   interface(CONST_INTER);
4380 %}
4381 
4382 // Double Immediate: +0.0d
4383 operand immD0()
4384 %{
4385   predicate(jlong_cast(n->getd()) == 0);
4386   match(ConD);
4387 
4388   op_cost(0);
4389   format %{ %}
4390   interface(CONST_INTER);
4391 %}
4392 
4393 // constant 'double +0.0'.
4394 operand immDPacked()
4395 %{
4396   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4397   match(ConD);
4398   op_cost(0);
4399   format %{ %}
4400   interface(CONST_INTER);
4401 %}
4402 
4403 // Float Immediate
4404 operand immF()
4405 %{
4406   match(ConF);
4407   op_cost(0);
4408   format %{ %}
4409   interface(CONST_INTER);
4410 %}
4411 
4412 // Float Immediate: +0.0f.
4413 operand immF0()
4414 %{
4415   predicate(jint_cast(n->getf()) == 0);
4416   match(ConF);
4417 
4418   op_cost(0);
4419   format %{ %}
4420   interface(CONST_INTER);
4421 %}
4422 
4423 //
4424 operand immFPacked()
4425 %{
4426   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4427   match(ConF);
4428   op_cost(0);
4429   format %{ %}
4430   interface(CONST_INTER);
4431 %}
4432 
4433 // Narrow pointer operands
4434 // Narrow Pointer Immediate
4435 operand immN()
4436 %{
4437   match(ConN);
4438 
4439   op_cost(0);
4440   format %{ %}
4441   interface(CONST_INTER);
4442 %}
4443 
4444 // Narrow NULL Pointer Immediate
4445 operand immN0()
4446 %{
4447   predicate(n->get_narrowcon() == 0);
4448   match(ConN);
4449 
4450   op_cost(0);
4451   format %{ %}
4452   interface(CONST_INTER);
4453 %}
4454 
4455 operand immNKlass()
4456 %{
4457   match(ConNKlass);
4458 
4459   op_cost(0);
4460   format %{ %}
4461   interface(CONST_INTER);
4462 %}
4463 
4464 // Integer 32 bit Register Operands
4465 // Integer 32 bitRegister (excludes SP)
4466 operand iRegI()
4467 %{
4468   constraint(ALLOC_IN_RC(any_reg32));
4469   match(RegI);
4470   match(iRegINoSp);
4471   op_cost(0);
4472   format %{ %}
4473   interface(REG_INTER);
4474 %}
4475 
4476 // Integer 32 bit Register not Special
4477 operand iRegINoSp()
4478 %{
4479   constraint(ALLOC_IN_RC(no_special_reg32));
4480   match(RegI);
4481   op_cost(0);
4482   format %{ %}
4483   interface(REG_INTER);
4484 %}
4485 
4486 // Integer 64 bit Register Operands
4487 // Integer 64 bit Register (includes SP)
4488 operand iRegL()
4489 %{
4490   constraint(ALLOC_IN_RC(any_reg));
4491   match(RegL);
4492   match(iRegLNoSp);
4493   op_cost(0);
4494   format %{ %}
4495   interface(REG_INTER);
4496 %}
4497 
4498 // Integer 64 bit Register not Special
4499 operand iRegLNoSp()
4500 %{
4501   constraint(ALLOC_IN_RC(no_special_reg));
4502   match(RegL);
4503   match(iRegL_R0);
4504   format %{ %}
4505   interface(REG_INTER);
4506 %}
4507 
4508 // Pointer Register Operands
4509 // Pointer Register
4510 operand iRegP()
4511 %{
4512   constraint(ALLOC_IN_RC(ptr_reg));
4513   match(RegP);
4514   match(iRegPNoSp);
4515   match(iRegP_R0);
4516   //match(iRegP_R2);
4517   //match(iRegP_R4);
4518   //match(iRegP_R5);
4519   match(thread_RegP);
4520   op_cost(0);
4521   format %{ %}
4522   interface(REG_INTER);
4523 %}
4524 
4525 // Pointer 64 bit Register not Special
4526 operand iRegPNoSp()
4527 %{
4528   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4529   match(RegP);
4530   // match(iRegP);
4531   // match(iRegP_R0);
4532   // match(iRegP_R2);
4533   // match(iRegP_R4);
4534   // match(iRegP_R5);
4535   // match(thread_RegP);
4536   op_cost(0);
4537   format %{ %}
4538   interface(REG_INTER);
4539 %}
4540 
4541 // Pointer 64 bit Register R0 only
4542 operand iRegP_R0()
4543 %{
4544   constraint(ALLOC_IN_RC(r0_reg));
4545   match(RegP);
4546   // match(iRegP);
4547   match(iRegPNoSp);
4548   op_cost(0);
4549   format %{ %}
4550   interface(REG_INTER);
4551 %}
4552 
4553 // Pointer 64 bit Register R1 only
4554 operand iRegP_R1()
4555 %{
4556   constraint(ALLOC_IN_RC(r1_reg));
4557   match(RegP);
4558   // match(iRegP);
4559   match(iRegPNoSp);
4560   op_cost(0);
4561   format %{ %}
4562   interface(REG_INTER);
4563 %}
4564 
4565 // Pointer 64 bit Register R2 only
4566 operand iRegP_R2()
4567 %{
4568   constraint(ALLOC_IN_RC(r2_reg));
4569   match(RegP);
4570   // match(iRegP);
4571   match(iRegPNoSp);
4572   op_cost(0);
4573   format %{ %}
4574   interface(REG_INTER);
4575 %}
4576 
4577 // Pointer 64 bit Register R3 only
4578 operand iRegP_R3()
4579 %{
4580   constraint(ALLOC_IN_RC(r3_reg));
4581   match(RegP);
4582   // match(iRegP);
4583   match(iRegPNoSp);
4584   op_cost(0);
4585   format %{ %}
4586   interface(REG_INTER);
4587 %}
4588 
4589 // Pointer 64 bit Register R4 only
4590 operand iRegP_R4()
4591 %{
4592   constraint(ALLOC_IN_RC(r4_reg));
4593   match(RegP);
4594   // match(iRegP);
4595   match(iRegPNoSp);
4596   op_cost(0);
4597   format %{ %}
4598   interface(REG_INTER);
4599 %}
4600 
4601 // Pointer 64 bit Register R5 only
4602 operand iRegP_R5()
4603 %{
4604   constraint(ALLOC_IN_RC(r5_reg));
4605   match(RegP);
4606   // match(iRegP);
4607   match(iRegPNoSp);
4608   op_cost(0);
4609   format %{ %}
4610   interface(REG_INTER);
4611 %}
4612 
4613 // Pointer 64 bit Register R10 only
4614 operand iRegP_R10()
4615 %{
4616   constraint(ALLOC_IN_RC(r10_reg));
4617   match(RegP);
4618   // match(iRegP);
4619   match(iRegPNoSp);
4620   op_cost(0);
4621   format %{ %}
4622   interface(REG_INTER);
4623 %}
4624 
4625 // Long 64 bit Register R0 only
4626 operand iRegL_R0()
4627 %{
4628   constraint(ALLOC_IN_RC(r0_reg));
4629   match(RegL);
4630   match(iRegLNoSp);
4631   op_cost(0);
4632   format %{ %}
4633   interface(REG_INTER);
4634 %}
4635 
4636 // Long 64 bit Register R2 only
4637 operand iRegL_R2()
4638 %{
4639   constraint(ALLOC_IN_RC(r2_reg));
4640   match(RegL);
4641   match(iRegLNoSp);
4642   op_cost(0);
4643   format %{ %}
4644   interface(REG_INTER);
4645 %}
4646 
4647 // Long 64 bit Register R3 only
4648 operand iRegL_R3()
4649 %{
4650   constraint(ALLOC_IN_RC(r3_reg));
4651   match(RegL);
4652   match(iRegLNoSp);
4653   op_cost(0);
4654   format %{ %}
4655   interface(REG_INTER);
4656 %}
4657 
4658 // Long 64 bit Register R11 only
4659 operand iRegL_R11()
4660 %{
4661   constraint(ALLOC_IN_RC(r11_reg));
4662   match(RegL);
4663   match(iRegLNoSp);
4664   op_cost(0);
4665   format %{ %}
4666   interface(REG_INTER);
4667 %}
4668 
4669 // Pointer 64 bit Register FP only
4670 operand iRegP_FP()
4671 %{
4672   constraint(ALLOC_IN_RC(fp_reg));
4673   match(RegP);
4674   // match(iRegP);
4675   op_cost(0);
4676   format %{ %}
4677   interface(REG_INTER);
4678 %}
4679 
4680 // Register R0 only
4681 operand iRegI_R0()
4682 %{
4683   constraint(ALLOC_IN_RC(int_r0_reg));
4684   match(RegI);
4685   match(iRegINoSp);
4686   op_cost(0);
4687   format %{ %}
4688   interface(REG_INTER);
4689 %}
4690 
4691 // Register R2 only
4692 operand iRegI_R2()
4693 %{
4694   constraint(ALLOC_IN_RC(int_r2_reg));
4695   match(RegI);
4696   match(iRegINoSp);
4697   op_cost(0);
4698   format %{ %}
4699   interface(REG_INTER);
4700 %}
4701 
4702 // Register R3 only
4703 operand iRegI_R3()
4704 %{
4705   constraint(ALLOC_IN_RC(int_r3_reg));
4706   match(RegI);
4707   match(iRegINoSp);
4708   op_cost(0);
4709   format %{ %}
4710   interface(REG_INTER);
4711 %}
4712 
4713 
4714 // Register R4 only
4715 operand iRegI_R4()
4716 %{
4717   constraint(ALLOC_IN_RC(int_r4_reg));
4718   match(RegI);
4719   match(iRegINoSp);
4720   op_cost(0);
4721   format %{ %}
4722   interface(REG_INTER);
4723 %}
4724 
4725 
4726 // Pointer Register Operands
4727 // Narrow Pointer Register
4728 operand iRegN()
4729 %{
4730   constraint(ALLOC_IN_RC(any_reg32));
4731   match(RegN);
4732   match(iRegNNoSp);
4733   op_cost(0);
4734   format %{ %}
4735   interface(REG_INTER);
4736 %}
4737 
4738 operand iRegN_R0()
4739 %{
4740   constraint(ALLOC_IN_RC(r0_reg));
4741   match(iRegN);
4742   op_cost(0);
4743   format %{ %}
4744   interface(REG_INTER);
4745 %}
4746 
4747 operand iRegN_R2()
4748 %{
4749   constraint(ALLOC_IN_RC(r2_reg));
4750   match(iRegN);
4751   op_cost(0);
4752   format %{ %}
4753   interface(REG_INTER);
4754 %}
4755 
4756 operand iRegN_R3()
4757 %{
4758   constraint(ALLOC_IN_RC(r3_reg));
4759   match(iRegN);
4760   op_cost(0);
4761   format %{ %}
4762   interface(REG_INTER);
4763 %}
4764 
4765 // Integer 64 bit Register not Special
4766 operand iRegNNoSp()
4767 %{
4768   constraint(ALLOC_IN_RC(no_special_reg32));
4769   match(RegN);
4770   op_cost(0);
4771   format %{ %}
4772   interface(REG_INTER);
4773 %}
4774 
4775 // heap base register -- used for encoding immN0
4776 
4777 operand iRegIHeapbase()
4778 %{
4779   constraint(ALLOC_IN_RC(heapbase_reg));
4780   match(RegI);
4781   op_cost(0);
4782   format %{ %}
4783   interface(REG_INTER);
4784 %}
4785 
4786 // Float Register
4787 // Float register operands
4788 operand vRegF()
4789 %{
4790   constraint(ALLOC_IN_RC(float_reg));
4791   match(RegF);
4792 
4793   op_cost(0);
4794   format %{ %}
4795   interface(REG_INTER);
4796 %}
4797 
4798 // Double Register
4799 // Double register operands
4800 operand vRegD()
4801 %{
4802   constraint(ALLOC_IN_RC(double_reg));
4803   match(RegD);
4804 
4805   op_cost(0);
4806   format %{ %}
4807   interface(REG_INTER);
4808 %}
4809 
4810 operand vecD()
4811 %{
4812   constraint(ALLOC_IN_RC(vectord_reg));
4813   match(VecD);
4814 
4815   op_cost(0);
4816   format %{ %}
4817   interface(REG_INTER);
4818 %}
4819 
4820 operand vecX()
4821 %{
4822   constraint(ALLOC_IN_RC(vectorx_reg));
4823   match(VecX);
4824 
4825   op_cost(0);
4826   format %{ %}
4827   interface(REG_INTER);
4828 %}
4829 
4830 operand vRegD_V0()
4831 %{
4832   constraint(ALLOC_IN_RC(v0_reg));
4833   match(RegD);
4834   op_cost(0);
4835   format %{ %}
4836   interface(REG_INTER);
4837 %}
4838 
4839 operand vRegD_V1()
4840 %{
4841   constraint(ALLOC_IN_RC(v1_reg));
4842   match(RegD);
4843   op_cost(0);
4844   format %{ %}
4845   interface(REG_INTER);
4846 %}
4847 
4848 operand vRegD_V2()
4849 %{
4850   constraint(ALLOC_IN_RC(v2_reg));
4851   match(RegD);
4852   op_cost(0);
4853   format %{ %}
4854   interface(REG_INTER);
4855 %}
4856 
4857 operand vRegD_V3()
4858 %{
4859   constraint(ALLOC_IN_RC(v3_reg));
4860   match(RegD);
4861   op_cost(0);
4862   format %{ %}
4863   interface(REG_INTER);
4864 %}
4865 
4866 // Flags register, used as output of signed compare instructions
4867 
4868 // note that on AArch64 we also use this register as the output for
4869 // for floating point compare instructions (CmpF CmpD). this ensures
4870 // that ordered inequality tests use GT, GE, LT or LE none of which
4871 // pass through cases where the result is unordered i.e. one or both
4872 // inputs to the compare is a NaN. this means that the ideal code can
4873 // replace e.g. a GT with an LE and not end up capturing the NaN case
4874 // (where the comparison should always fail). EQ and NE tests are
4875 // always generated in ideal code so that unordered folds into the NE
4876 // case, matching the behaviour of AArch64 NE.
4877 //
4878 // This differs from x86 where the outputs of FP compares use a
4879 // special FP flags registers and where compares based on this
4880 // register are distinguished into ordered inequalities (cmpOpUCF) and
4881 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4882 // to explicitly handle the unordered case in branches. x86 also has
4883 // to include extra CMoveX rules to accept a cmpOpUCF input.
4884 
4885 operand rFlagsReg()
4886 %{
4887   constraint(ALLOC_IN_RC(int_flags));
4888   match(RegFlags);
4889 
4890   op_cost(0);
4891   format %{ "RFLAGS" %}
4892   interface(REG_INTER);
4893 %}
4894 
4895 // Flags register, used as output of unsigned compare instructions
4896 operand rFlagsRegU()
4897 %{
4898   constraint(ALLOC_IN_RC(int_flags));
4899   match(RegFlags);
4900 
4901   op_cost(0);
4902   format %{ "RFLAGSU" %}
4903   interface(REG_INTER);
4904 %}
4905 
4906 // Special Registers
4907 
4908 // Method Register
4909 operand inline_cache_RegP(iRegP reg)
4910 %{
4911   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4912   match(reg);
4913   match(iRegPNoSp);
4914   op_cost(0);
4915   format %{ %}
4916   interface(REG_INTER);
4917 %}
4918 
4919 operand interpreter_method_oop_RegP(iRegP reg)
4920 %{
4921   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4922   match(reg);
4923   match(iRegPNoSp);
4924   op_cost(0);
4925   format %{ %}
4926   interface(REG_INTER);
4927 %}
4928 
4929 // Thread Register
4930 operand thread_RegP(iRegP reg)
4931 %{
4932   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
4933   match(reg);
4934   op_cost(0);
4935   format %{ %}
4936   interface(REG_INTER);
4937 %}
4938 
4939 operand lr_RegP(iRegP reg)
4940 %{
4941   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
4942   match(reg);
4943   op_cost(0);
4944   format %{ %}
4945   interface(REG_INTER);
4946 %}
4947 
4948 //----------Memory Operands----------------------------------------------------
4949 
4950 operand indirect(iRegP reg)
4951 %{
4952   constraint(ALLOC_IN_RC(ptr_reg));
4953   match(reg);
4954   op_cost(0);
4955   format %{ "[$reg]" %}
4956   interface(MEMORY_INTER) %{
4957     base($reg);
4958     index(0xffffffff);
4959     scale(0x0);
4960     disp(0x0);
4961   %}
4962 %}
4963 
4964 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
4965 %{
4966   constraint(ALLOC_IN_RC(ptr_reg));
4967   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4968   match(AddP reg (LShiftL (ConvI2L ireg) scale));
4969   op_cost(0);
4970   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
4971   interface(MEMORY_INTER) %{
4972     base($reg);
4973     index($ireg);
4974     scale($scale);
4975     disp(0x0);
4976   %}
4977 %}
4978 
4979 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
4980 %{
4981   constraint(ALLOC_IN_RC(ptr_reg));
4982   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4983   match(AddP reg (LShiftL lreg scale));
4984   op_cost(0);
4985   format %{ "$reg, $lreg lsl($scale)" %}
4986   interface(MEMORY_INTER) %{
4987     base($reg);
4988     index($lreg);
4989     scale($scale);
4990     disp(0x0);
4991   %}
4992 %}
4993 
4994 operand indIndexI2L(iRegP reg, iRegI ireg)
4995 %{
4996   constraint(ALLOC_IN_RC(ptr_reg));
4997   match(AddP reg (ConvI2L ireg));
4998   op_cost(0);
4999   format %{ "$reg, $ireg, 0, I2L" %}
5000   interface(MEMORY_INTER) %{
5001     base($reg);
5002     index($ireg);
5003     scale(0x0);
5004     disp(0x0);
5005   %}
5006 %}
5007 
5008 operand indIndex(iRegP reg, iRegL lreg)
5009 %{
5010   constraint(ALLOC_IN_RC(ptr_reg));
5011   match(AddP reg lreg);
5012   op_cost(0);
5013   format %{ "$reg, $lreg" %}
5014   interface(MEMORY_INTER) %{
5015     base($reg);
5016     index($lreg);
5017     scale(0x0);
5018     disp(0x0);
5019   %}
5020 %}
5021 
5022 operand indOffI(iRegP reg, immIOffset off)
5023 %{
5024   constraint(ALLOC_IN_RC(ptr_reg));
5025   match(AddP reg off);
5026   op_cost(0);
5027   format %{ "[$reg, $off]" %}
5028   interface(MEMORY_INTER) %{
5029     base($reg);
5030     index(0xffffffff);
5031     scale(0x0);
5032     disp($off);
5033   %}
5034 %}
5035 
5036 operand indOffI4(iRegP reg, immIOffset4 off)
5037 %{
5038   constraint(ALLOC_IN_RC(ptr_reg));
5039   match(AddP reg off);
5040   op_cost(0);
5041   format %{ "[$reg, $off]" %}
5042   interface(MEMORY_INTER) %{
5043     base($reg);
5044     index(0xffffffff);
5045     scale(0x0);
5046     disp($off);
5047   %}
5048 %}
5049 
5050 operand indOffI8(iRegP reg, immIOffset8 off)
5051 %{
5052   constraint(ALLOC_IN_RC(ptr_reg));
5053   match(AddP reg off);
5054   op_cost(0);
5055   format %{ "[$reg, $off]" %}
5056   interface(MEMORY_INTER) %{
5057     base($reg);
5058     index(0xffffffff);
5059     scale(0x0);
5060     disp($off);
5061   %}
5062 %}
5063 
5064 operand indOffI16(iRegP reg, immIOffset16 off)
5065 %{
5066   constraint(ALLOC_IN_RC(ptr_reg));
5067   match(AddP reg off);
5068   op_cost(0);
5069   format %{ "[$reg, $off]" %}
5070   interface(MEMORY_INTER) %{
5071     base($reg);
5072     index(0xffffffff);
5073     scale(0x0);
5074     disp($off);
5075   %}
5076 %}
5077 
5078 operand indOffL(iRegP reg, immLoffset off)
5079 %{
5080   constraint(ALLOC_IN_RC(ptr_reg));
5081   match(AddP reg off);
5082   op_cost(0);
5083   format %{ "[$reg, $off]" %}
5084   interface(MEMORY_INTER) %{
5085     base($reg);
5086     index(0xffffffff);
5087     scale(0x0);
5088     disp($off);
5089   %}
5090 %}
5091 
5092 operand indOffL4(iRegP reg, immLoffset4 off)
5093 %{
5094   constraint(ALLOC_IN_RC(ptr_reg));
5095   match(AddP reg off);
5096   op_cost(0);
5097   format %{ "[$reg, $off]" %}
5098   interface(MEMORY_INTER) %{
5099     base($reg);
5100     index(0xffffffff);
5101     scale(0x0);
5102     disp($off);
5103   %}
5104 %}
5105 
5106 operand indOffL8(iRegP reg, immLoffset8 off)
5107 %{
5108   constraint(ALLOC_IN_RC(ptr_reg));
5109   match(AddP reg off);
5110   op_cost(0);
5111   format %{ "[$reg, $off]" %}
5112   interface(MEMORY_INTER) %{
5113     base($reg);
5114     index(0xffffffff);
5115     scale(0x0);
5116     disp($off);
5117   %}
5118 %}
5119 
5120 operand indOffL16(iRegP reg, immLoffset16 off)
5121 %{
5122   constraint(ALLOC_IN_RC(ptr_reg));
5123   match(AddP reg off);
5124   op_cost(0);
5125   format %{ "[$reg, $off]" %}
5126   interface(MEMORY_INTER) %{
5127     base($reg);
5128     index(0xffffffff);
5129     scale(0x0);
5130     disp($off);
5131   %}
5132 %}
5133 
5134 operand indirectN(iRegN reg)
5135 %{
5136   predicate(Universe::narrow_oop_shift() == 0);
5137   constraint(ALLOC_IN_RC(ptr_reg));
5138   match(DecodeN reg);
5139   op_cost(0);
5140   format %{ "[$reg]\t# narrow" %}
5141   interface(MEMORY_INTER) %{
5142     base($reg);
5143     index(0xffffffff);
5144     scale(0x0);
5145     disp(0x0);
5146   %}
5147 %}
5148 
5149 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5150 %{
5151   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5152   constraint(ALLOC_IN_RC(ptr_reg));
5153   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5154   op_cost(0);
5155   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5156   interface(MEMORY_INTER) %{
5157     base($reg);
5158     index($ireg);
5159     scale($scale);
5160     disp(0x0);
5161   %}
5162 %}
5163 
5164 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5165 %{
5166   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5167   constraint(ALLOC_IN_RC(ptr_reg));
5168   match(AddP (DecodeN reg) (LShiftL lreg scale));
5169   op_cost(0);
5170   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5171   interface(MEMORY_INTER) %{
5172     base($reg);
5173     index($lreg);
5174     scale($scale);
5175     disp(0x0);
5176   %}
5177 %}
5178 
5179 operand indIndexI2LN(iRegN reg, iRegI ireg)
5180 %{
5181   predicate(Universe::narrow_oop_shift() == 0);
5182   constraint(ALLOC_IN_RC(ptr_reg));
5183   match(AddP (DecodeN reg) (ConvI2L ireg));
5184   op_cost(0);
5185   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5186   interface(MEMORY_INTER) %{
5187     base($reg);
5188     index($ireg);
5189     scale(0x0);
5190     disp(0x0);
5191   %}
5192 %}
5193 
5194 operand indIndexN(iRegN reg, iRegL lreg)
5195 %{
5196   predicate(Universe::narrow_oop_shift() == 0);
5197   constraint(ALLOC_IN_RC(ptr_reg));
5198   match(AddP (DecodeN reg) lreg);
5199   op_cost(0);
5200   format %{ "$reg, $lreg\t# narrow" %}
5201   interface(MEMORY_INTER) %{
5202     base($reg);
5203     index($lreg);
5204     scale(0x0);
5205     disp(0x0);
5206   %}
5207 %}
5208 
5209 operand indOffIN(iRegN reg, immIOffset off)
5210 %{
5211   predicate(Universe::narrow_oop_shift() == 0);
5212   constraint(ALLOC_IN_RC(ptr_reg));
5213   match(AddP (DecodeN reg) off);
5214   op_cost(0);
5215   format %{ "[$reg, $off]\t# narrow" %}
5216   interface(MEMORY_INTER) %{
5217     base($reg);
5218     index(0xffffffff);
5219     scale(0x0);
5220     disp($off);
5221   %}
5222 %}
5223 
5224 operand indOffLN(iRegN reg, immLoffset off)
5225 %{
5226   predicate(Universe::narrow_oop_shift() == 0);
5227   constraint(ALLOC_IN_RC(ptr_reg));
5228   match(AddP (DecodeN reg) off);
5229   op_cost(0);
5230   format %{ "[$reg, $off]\t# narrow" %}
5231   interface(MEMORY_INTER) %{
5232     base($reg);
5233     index(0xffffffff);
5234     scale(0x0);
5235     disp($off);
5236   %}
5237 %}
5238 
5239 
5240 
5241 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5242 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5243 %{
5244   constraint(ALLOC_IN_RC(ptr_reg));
5245   match(AddP reg off);
5246   op_cost(0);
5247   format %{ "[$reg, $off]" %}
5248   interface(MEMORY_INTER) %{
5249     base($reg);
5250     index(0xffffffff);
5251     scale(0x0);
5252     disp($off);
5253   %}
5254 %}
5255 
5256 //----------Special Memory Operands--------------------------------------------
5257 // Stack Slot Operand - This operand is used for loading and storing temporary
5258 //                      values on the stack where a match requires a value to
5259 //                      flow through memory.
5260 operand stackSlotP(sRegP reg)
5261 %{
5262   constraint(ALLOC_IN_RC(stack_slots));
5263   op_cost(100);
5264   // No match rule because this operand is only generated in matching
5265   // match(RegP);
5266   format %{ "[$reg]" %}
5267   interface(MEMORY_INTER) %{
5268     base(0x1e);  // RSP
5269     index(0x0);  // No Index
5270     scale(0x0);  // No Scale
5271     disp($reg);  // Stack Offset
5272   %}
5273 %}
5274 
5275 operand stackSlotI(sRegI reg)
5276 %{
5277   constraint(ALLOC_IN_RC(stack_slots));
5278   // No match rule because this operand is only generated in matching
5279   // match(RegI);
5280   format %{ "[$reg]" %}
5281   interface(MEMORY_INTER) %{
5282     base(0x1e);  // RSP
5283     index(0x0);  // No Index
5284     scale(0x0);  // No Scale
5285     disp($reg);  // Stack Offset
5286   %}
5287 %}
5288 
5289 operand stackSlotF(sRegF reg)
5290 %{
5291   constraint(ALLOC_IN_RC(stack_slots));
5292   // No match rule because this operand is only generated in matching
5293   // match(RegF);
5294   format %{ "[$reg]" %}
5295   interface(MEMORY_INTER) %{
5296     base(0x1e);  // RSP
5297     index(0x0);  // No Index
5298     scale(0x0);  // No Scale
5299     disp($reg);  // Stack Offset
5300   %}
5301 %}
5302 
5303 operand stackSlotD(sRegD reg)
5304 %{
5305   constraint(ALLOC_IN_RC(stack_slots));
5306   // No match rule because this operand is only generated in matching
5307   // match(RegD);
5308   format %{ "[$reg]" %}
5309   interface(MEMORY_INTER) %{
5310     base(0x1e);  // RSP
5311     index(0x0);  // No Index
5312     scale(0x0);  // No Scale
5313     disp($reg);  // Stack Offset
5314   %}
5315 %}
5316 
5317 operand stackSlotL(sRegL reg)
5318 %{
5319   constraint(ALLOC_IN_RC(stack_slots));
5320   // No match rule because this operand is only generated in matching
5321   // match(RegL);
5322   format %{ "[$reg]" %}
5323   interface(MEMORY_INTER) %{
5324     base(0x1e);  // RSP
5325     index(0x0);  // No Index
5326     scale(0x0);  // No Scale
5327     disp($reg);  // Stack Offset
5328   %}
5329 %}
5330 
5331 // Operands for expressing Control Flow
5332 // NOTE: Label is a predefined operand which should not be redefined in
5333 //       the AD file. It is generically handled within the ADLC.
5334 
5335 //----------Conditional Branch Operands----------------------------------------
5336 // Comparison Op  - This is the operation of the comparison, and is limited to
5337 //                  the following set of codes:
5338 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5339 //
5340 // Other attributes of the comparison, such as unsignedness, are specified
5341 // by the comparison instruction that sets a condition code flags register.
5342 // That result is represented by a flags operand whose subtype is appropriate
5343 // to the unsignedness (etc.) of the comparison.
5344 //
5345 // Later, the instruction which matches both the Comparison Op (a Bool) and
5346 // the flags (produced by the Cmp) specifies the coding of the comparison op
5347 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5348 
5349 // used for signed integral comparisons and fp comparisons
5350 
5351 operand cmpOp()
5352 %{
5353   match(Bool);
5354 
5355   format %{ "" %}
5356   interface(COND_INTER) %{
5357     equal(0x0, "eq");
5358     not_equal(0x1, "ne");
5359     less(0xb, "lt");
5360     greater_equal(0xa, "ge");
5361     less_equal(0xd, "le");
5362     greater(0xc, "gt");
5363     overflow(0x6, "vs");
5364     no_overflow(0x7, "vc");
5365   %}
5366 %}
5367 
5368 // used for unsigned integral comparisons
5369 
5370 operand cmpOpU()
5371 %{
5372   match(Bool);
5373 
5374   format %{ "" %}
5375   interface(COND_INTER) %{
5376     equal(0x0, "eq");
5377     not_equal(0x1, "ne");
5378     less(0x3, "lo");
5379     greater_equal(0x2, "hs");
5380     less_equal(0x9, "ls");
5381     greater(0x8, "hi");
5382     overflow(0x6, "vs");
5383     no_overflow(0x7, "vc");
5384   %}
5385 %}
5386 
5387 // used for certain integral comparisons which can be
5388 // converted to cbxx or tbxx instructions
5389 
5390 operand cmpOpEqNe()
5391 %{
5392   match(Bool);
5393   match(CmpOp);
5394   op_cost(0);
5395   predicate(n->as_Bool()->_test._test == BoolTest::ne
5396             || n->as_Bool()->_test._test == BoolTest::eq);
5397 
5398   format %{ "" %}
5399   interface(COND_INTER) %{
5400     equal(0x0, "eq");
5401     not_equal(0x1, "ne");
5402     less(0xb, "lt");
5403     greater_equal(0xa, "ge");
5404     less_equal(0xd, "le");
5405     greater(0xc, "gt");
5406     overflow(0x6, "vs");
5407     no_overflow(0x7, "vc");
5408   %}
5409 %}
5410 
5411 // used for certain integral comparisons which can be
5412 // converted to cbxx or tbxx instructions
5413 
5414 operand cmpOpLtGe()
5415 %{
5416   match(Bool);
5417   match(CmpOp);
5418   op_cost(0);
5419 
5420   predicate(n->as_Bool()->_test._test == BoolTest::lt
5421             || n->as_Bool()->_test._test == BoolTest::ge);
5422 
5423   format %{ "" %}
5424   interface(COND_INTER) %{
5425     equal(0x0, "eq");
5426     not_equal(0x1, "ne");
5427     less(0xb, "lt");
5428     greater_equal(0xa, "ge");
5429     less_equal(0xd, "le");
5430     greater(0xc, "gt");
5431     overflow(0x6, "vs");
5432     no_overflow(0x7, "vc");
5433   %}
5434 %}
5435 
5436 // used for certain unsigned integral comparisons which can be
5437 // converted to cbxx or tbxx instructions
5438 
5439 operand cmpOpUEqNeLtGe()
5440 %{
5441   match(Bool);
5442   match(CmpOp);
5443   op_cost(0);
5444 
5445   predicate(n->as_Bool()->_test._test == BoolTest::eq
5446             || n->as_Bool()->_test._test == BoolTest::ne
5447             || n->as_Bool()->_test._test == BoolTest::lt
5448             || n->as_Bool()->_test._test == BoolTest::ge);
5449 
5450   format %{ "" %}
5451   interface(COND_INTER) %{
5452     equal(0x0, "eq");
5453     not_equal(0x1, "ne");
5454     less(0xb, "lt");
5455     greater_equal(0xa, "ge");
5456     less_equal(0xd, "le");
5457     greater(0xc, "gt");
5458     overflow(0x6, "vs");
5459     no_overflow(0x7, "vc");
5460   %}
5461 %}
5462 
5463 // Special operand allowing long args to int ops to be truncated for free
5464 
5465 operand iRegL2I(iRegL reg) %{
5466 
5467   op_cost(0);
5468 
5469   match(ConvL2I reg);
5470 
5471   format %{ "l2i($reg)" %}
5472 
5473   interface(REG_INTER)
5474 %}
5475 
5476 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5477 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5478 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5479 
5480 //----------OPERAND CLASSES----------------------------------------------------
5481 // Operand Classes are groups of operands that are used as to simplify
5482 // instruction definitions by not requiring the AD writer to specify
5483 // separate instructions for every form of operand when the
5484 // instruction accepts multiple operand types with the same basic
5485 // encoding and format. The classic case of this is memory operands.
5486 
5487 // memory is used to define read/write location for load/store
5488 // instruction defs. we can turn a memory op into an Address
5489 
5490 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5491                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5492 
5493 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5494 // operations. it allows the src to be either an iRegI or a (ConvL2I
5495 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5496 // can be elided because the 32-bit instruction will just employ the
5497 // lower 32 bits anyway.
5498 //
5499 // n.b. this does not elide all L2I conversions. if the truncated
5500 // value is consumed by more than one operation then the ConvL2I
5501 // cannot be bundled into the consuming nodes so an l2i gets planted
5502 // (actually a movw $dst $src) and the downstream instructions consume
5503 // the result of the l2i as an iRegI input. That's a shame since the
5504 // movw is actually redundant but its not too costly.
5505 
5506 opclass iRegIorL2I(iRegI, iRegL2I);
5507 
5508 //----------PIPELINE-----------------------------------------------------------
5509 // Rules which define the behavior of the target architectures pipeline.
5510 
5511 // For specific pipelines, eg A53, define the stages of that pipeline
5512 //pipe_desc(ISS, EX1, EX2, WR);
5513 #define ISS S0
5514 #define EX1 S1
5515 #define EX2 S2
5516 #define WR  S3
5517 
5518 // Integer ALU reg operation
5519 pipeline %{
5520 
5521 attributes %{
5522   // ARM instructions are of fixed length
5523   fixed_size_instructions;        // Fixed size instructions TODO does
5524   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5525   // ARM instructions come in 32-bit word units
5526   instruction_unit_size = 4;         // An instruction is 4 bytes long
5527   instruction_fetch_unit_size = 64;  // The processor fetches one line
5528   instruction_fetch_units = 1;       // of 64 bytes
5529 
5530   // List of nop instructions
5531   nops( MachNop );
5532 %}
5533 
5534 // We don't use an actual pipeline model so don't care about resources
5535 // or description. we do use pipeline classes to introduce fixed
5536 // latencies
5537 
5538 //----------RESOURCES----------------------------------------------------------
5539 // Resources are the functional units available to the machine
5540 
5541 resources( INS0, INS1, INS01 = INS0 | INS1,
5542            ALU0, ALU1, ALU = ALU0 | ALU1,
5543            MAC,
5544            DIV,
5545            BRANCH,
5546            LDST,
5547            NEON_FP);
5548 
5549 //----------PIPELINE DESCRIPTION-----------------------------------------------
5550 // Pipeline Description specifies the stages in the machine's pipeline
5551 
5552 // Define the pipeline as a generic 6 stage pipeline
5553 pipe_desc(S0, S1, S2, S3, S4, S5);
5554 
5555 //----------PIPELINE CLASSES---------------------------------------------------
5556 // Pipeline Classes describe the stages in which input and output are
5557 // referenced by the hardware pipeline.
5558 
5559 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5560 %{
5561   single_instruction;
5562   src1   : S1(read);
5563   src2   : S2(read);
5564   dst    : S5(write);
5565   INS01  : ISS;
5566   NEON_FP : S5;
5567 %}
5568 
5569 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5570 %{
5571   single_instruction;
5572   src1   : S1(read);
5573   src2   : S2(read);
5574   dst    : S5(write);
5575   INS01  : ISS;
5576   NEON_FP : S5;
5577 %}
5578 
5579 pipe_class fp_uop_s(vRegF dst, vRegF src)
5580 %{
5581   single_instruction;
5582   src    : S1(read);
5583   dst    : S5(write);
5584   INS01  : ISS;
5585   NEON_FP : S5;
5586 %}
5587 
5588 pipe_class fp_uop_d(vRegD dst, vRegD src)
5589 %{
5590   single_instruction;
5591   src    : S1(read);
5592   dst    : S5(write);
5593   INS01  : ISS;
5594   NEON_FP : S5;
5595 %}
5596 
5597 pipe_class fp_d2f(vRegF dst, vRegD src)
5598 %{
5599   single_instruction;
5600   src    : S1(read);
5601   dst    : S5(write);
5602   INS01  : ISS;
5603   NEON_FP : S5;
5604 %}
5605 
5606 pipe_class fp_f2d(vRegD dst, vRegF src)
5607 %{
5608   single_instruction;
5609   src    : S1(read);
5610   dst    : S5(write);
5611   INS01  : ISS;
5612   NEON_FP : S5;
5613 %}
5614 
5615 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5616 %{
5617   single_instruction;
5618   src    : S1(read);
5619   dst    : S5(write);
5620   INS01  : ISS;
5621   NEON_FP : S5;
5622 %}
5623 
5624 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5625 %{
5626   single_instruction;
5627   src    : S1(read);
5628   dst    : S5(write);
5629   INS01  : ISS;
5630   NEON_FP : S5;
5631 %}
5632 
5633 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5634 %{
5635   single_instruction;
5636   src    : S1(read);
5637   dst    : S5(write);
5638   INS01  : ISS;
5639   NEON_FP : S5;
5640 %}
5641 
5642 pipe_class fp_l2f(vRegF dst, iRegL src)
5643 %{
5644   single_instruction;
5645   src    : S1(read);
5646   dst    : S5(write);
5647   INS01  : ISS;
5648   NEON_FP : S5;
5649 %}
5650 
5651 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5652 %{
5653   single_instruction;
5654   src    : S1(read);
5655   dst    : S5(write);
5656   INS01  : ISS;
5657   NEON_FP : S5;
5658 %}
5659 
5660 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5661 %{
5662   single_instruction;
5663   src    : S1(read);
5664   dst    : S5(write);
5665   INS01  : ISS;
5666   NEON_FP : S5;
5667 %}
5668 
5669 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5670 %{
5671   single_instruction;
5672   src    : S1(read);
5673   dst    : S5(write);
5674   INS01  : ISS;
5675   NEON_FP : S5;
5676 %}
5677 
5678 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5679 %{
5680   single_instruction;
5681   src    : S1(read);
5682   dst    : S5(write);
5683   INS01  : ISS;
5684   NEON_FP : S5;
5685 %}
5686 
5687 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5688 %{
5689   single_instruction;
5690   src1   : S1(read);
5691   src2   : S2(read);
5692   dst    : S5(write);
5693   INS0   : ISS;
5694   NEON_FP : S5;
5695 %}
5696 
5697 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5698 %{
5699   single_instruction;
5700   src1   : S1(read);
5701   src2   : S2(read);
5702   dst    : S5(write);
5703   INS0   : ISS;
5704   NEON_FP : S5;
5705 %}
5706 
5707 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5708 %{
5709   single_instruction;
5710   cr     : S1(read);
5711   src1   : S1(read);
5712   src2   : S1(read);
5713   dst    : S3(write);
5714   INS01  : ISS;
5715   NEON_FP : S3;
5716 %}
5717 
5718 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5719 %{
5720   single_instruction;
5721   cr     : S1(read);
5722   src1   : S1(read);
5723   src2   : S1(read);
5724   dst    : S3(write);
5725   INS01  : ISS;
5726   NEON_FP : S3;
5727 %}
5728 
5729 pipe_class fp_imm_s(vRegF dst)
5730 %{
5731   single_instruction;
5732   dst    : S3(write);
5733   INS01  : ISS;
5734   NEON_FP : S3;
5735 %}
5736 
5737 pipe_class fp_imm_d(vRegD dst)
5738 %{
5739   single_instruction;
5740   dst    : S3(write);
5741   INS01  : ISS;
5742   NEON_FP : S3;
5743 %}
5744 
5745 pipe_class fp_load_constant_s(vRegF dst)
5746 %{
5747   single_instruction;
5748   dst    : S4(write);
5749   INS01  : ISS;
5750   NEON_FP : S4;
5751 %}
5752 
5753 pipe_class fp_load_constant_d(vRegD dst)
5754 %{
5755   single_instruction;
5756   dst    : S4(write);
5757   INS01  : ISS;
5758   NEON_FP : S4;
5759 %}
5760 
5761 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
5762 %{
5763   single_instruction;
5764   dst    : S5(write);
5765   src1   : S1(read);
5766   src2   : S1(read);
5767   INS01  : ISS;
5768   NEON_FP : S5;
5769 %}
5770 
5771 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
5772 %{
5773   single_instruction;
5774   dst    : S5(write);
5775   src1   : S1(read);
5776   src2   : S1(read);
5777   INS0   : ISS;
5778   NEON_FP : S5;
5779 %}
5780 
5781 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
5782 %{
5783   single_instruction;
5784   dst    : S5(write);
5785   src1   : S1(read);
5786   src2   : S1(read);
5787   dst    : S1(read);
5788   INS01  : ISS;
5789   NEON_FP : S5;
5790 %}
5791 
5792 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
5793 %{
5794   single_instruction;
5795   dst    : S5(write);
5796   src1   : S1(read);
5797   src2   : S1(read);
5798   dst    : S1(read);
5799   INS0   : ISS;
5800   NEON_FP : S5;
5801 %}
5802 
5803 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
5804 %{
5805   single_instruction;
5806   dst    : S4(write);
5807   src1   : S2(read);
5808   src2   : S2(read);
5809   INS01  : ISS;
5810   NEON_FP : S4;
5811 %}
5812 
5813 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
5814 %{
5815   single_instruction;
5816   dst    : S4(write);
5817   src1   : S2(read);
5818   src2   : S2(read);
5819   INS0   : ISS;
5820   NEON_FP : S4;
5821 %}
5822 
5823 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
5824 %{
5825   single_instruction;
5826   dst    : S3(write);
5827   src1   : S2(read);
5828   src2   : S2(read);
5829   INS01  : ISS;
5830   NEON_FP : S3;
5831 %}
5832 
5833 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
5834 %{
5835   single_instruction;
5836   dst    : S3(write);
5837   src1   : S2(read);
5838   src2   : S2(read);
5839   INS0   : ISS;
5840   NEON_FP : S3;
5841 %}
5842 
5843 pipe_class vshift64(vecD dst, vecD src, vecX shift)
5844 %{
5845   single_instruction;
5846   dst    : S3(write);
5847   src    : S1(read);
5848   shift  : S1(read);
5849   INS01  : ISS;
5850   NEON_FP : S3;
5851 %}
5852 
5853 pipe_class vshift128(vecX dst, vecX src, vecX shift)
5854 %{
5855   single_instruction;
5856   dst    : S3(write);
5857   src    : S1(read);
5858   shift  : S1(read);
5859   INS0   : ISS;
5860   NEON_FP : S3;
5861 %}
5862 
5863 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
5864 %{
5865   single_instruction;
5866   dst    : S3(write);
5867   src    : S1(read);
5868   INS01  : ISS;
5869   NEON_FP : S3;
5870 %}
5871 
5872 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
5873 %{
5874   single_instruction;
5875   dst    : S3(write);
5876   src    : S1(read);
5877   INS0   : ISS;
5878   NEON_FP : S3;
5879 %}
5880 
5881 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
5882 %{
5883   single_instruction;
5884   dst    : S5(write);
5885   src1   : S1(read);
5886   src2   : S1(read);
5887   INS01  : ISS;
5888   NEON_FP : S5;
5889 %}
5890 
5891 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
5892 %{
5893   single_instruction;
5894   dst    : S5(write);
5895   src1   : S1(read);
5896   src2   : S1(read);
5897   INS0   : ISS;
5898   NEON_FP : S5;
5899 %}
5900 
5901 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
5902 %{
5903   single_instruction;
5904   dst    : S5(write);
5905   src1   : S1(read);
5906   src2   : S1(read);
5907   INS0   : ISS;
5908   NEON_FP : S5;
5909 %}
5910 
5911 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
5912 %{
5913   single_instruction;
5914   dst    : S5(write);
5915   src1   : S1(read);
5916   src2   : S1(read);
5917   INS0   : ISS;
5918   NEON_FP : S5;
5919 %}
5920 
5921 pipe_class vsqrt_fp128(vecX dst, vecX src)
5922 %{
5923   single_instruction;
5924   dst    : S5(write);
5925   src    : S1(read);
5926   INS0   : ISS;
5927   NEON_FP : S5;
5928 %}
5929 
5930 pipe_class vunop_fp64(vecD dst, vecD src)
5931 %{
5932   single_instruction;
5933   dst    : S5(write);
5934   src    : S1(read);
5935   INS01  : ISS;
5936   NEON_FP : S5;
5937 %}
5938 
5939 pipe_class vunop_fp128(vecX dst, vecX src)
5940 %{
5941   single_instruction;
5942   dst    : S5(write);
5943   src    : S1(read);
5944   INS0   : ISS;
5945   NEON_FP : S5;
5946 %}
5947 
5948 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
5949 %{
5950   single_instruction;
5951   dst    : S3(write);
5952   src    : S1(read);
5953   INS01  : ISS;
5954   NEON_FP : S3;
5955 %}
5956 
5957 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
5958 %{
5959   single_instruction;
5960   dst    : S3(write);
5961   src    : S1(read);
5962   INS01  : ISS;
5963   NEON_FP : S3;
5964 %}
5965 
5966 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
5967 %{
5968   single_instruction;
5969   dst    : S3(write);
5970   src    : S1(read);
5971   INS01  : ISS;
5972   NEON_FP : S3;
5973 %}
5974 
5975 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
5976 %{
5977   single_instruction;
5978   dst    : S3(write);
5979   src    : S1(read);
5980   INS01  : ISS;
5981   NEON_FP : S3;
5982 %}
5983 
5984 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
5985 %{
5986   single_instruction;
5987   dst    : S3(write);
5988   src    : S1(read);
5989   INS01  : ISS;
5990   NEON_FP : S3;
5991 %}
5992 
5993 pipe_class vmovi_reg_imm64(vecD dst)
5994 %{
5995   single_instruction;
5996   dst    : S3(write);
5997   INS01  : ISS;
5998   NEON_FP : S3;
5999 %}
6000 
6001 pipe_class vmovi_reg_imm128(vecX dst)
6002 %{
6003   single_instruction;
6004   dst    : S3(write);
6005   INS0   : ISS;
6006   NEON_FP : S3;
6007 %}
6008 
6009 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6010 %{
6011   single_instruction;
6012   dst    : S5(write);
6013   mem    : ISS(read);
6014   INS01  : ISS;
6015   NEON_FP : S3;
6016 %}
6017 
6018 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6019 %{
6020   single_instruction;
6021   dst    : S5(write);
6022   mem    : ISS(read);
6023   INS01  : ISS;
6024   NEON_FP : S3;
6025 %}
6026 
6027 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6028 %{
6029   single_instruction;
6030   mem    : ISS(read);
6031   src    : S2(read);
6032   INS01  : ISS;
6033   NEON_FP : S3;
6034 %}
6035 
6036 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6037 %{
6038   single_instruction;
6039   mem    : ISS(read);
6040   src    : S2(read);
6041   INS01  : ISS;
6042   NEON_FP : S3;
6043 %}
6044 
6045 //------- Integer ALU operations --------------------------
6046 
6047 // Integer ALU reg-reg operation
6048 // Operands needed in EX1, result generated in EX2
6049 // Eg.  ADD     x0, x1, x2
6050 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6051 %{
6052   single_instruction;
6053   dst    : EX2(write);
6054   src1   : EX1(read);
6055   src2   : EX1(read);
6056   INS01  : ISS; // Dual issue as instruction 0 or 1
6057   ALU    : EX2;
6058 %}
6059 
6060 // Integer ALU reg-reg operation with constant shift
6061 // Shifted register must be available in LATE_ISS instead of EX1
6062 // Eg.  ADD     x0, x1, x2, LSL #2
6063 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6064 %{
6065   single_instruction;
6066   dst    : EX2(write);
6067   src1   : EX1(read);
6068   src2   : ISS(read);
6069   INS01  : ISS;
6070   ALU    : EX2;
6071 %}
6072 
6073 // Integer ALU reg operation with constant shift
6074 // Eg.  LSL     x0, x1, #shift
6075 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6076 %{
6077   single_instruction;
6078   dst    : EX2(write);
6079   src1   : ISS(read);
6080   INS01  : ISS;
6081   ALU    : EX2;
6082 %}
6083 
6084 // Integer ALU reg-reg operation with variable shift
6085 // Both operands must be available in LATE_ISS instead of EX1
6086 // Result is available in EX1 instead of EX2
6087 // Eg.  LSLV    x0, x1, x2
6088 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6089 %{
6090   single_instruction;
6091   dst    : EX1(write);
6092   src1   : ISS(read);
6093   src2   : ISS(read);
6094   INS01  : ISS;
6095   ALU    : EX1;
6096 %}
6097 
6098 // Integer ALU reg-reg operation with extract
6099 // As for _vshift above, but result generated in EX2
6100 // Eg.  EXTR    x0, x1, x2, #N
6101 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6102 %{
6103   single_instruction;
6104   dst    : EX2(write);
6105   src1   : ISS(read);
6106   src2   : ISS(read);
6107   INS1   : ISS; // Can only dual issue as Instruction 1
6108   ALU    : EX1;
6109 %}
6110 
6111 // Integer ALU reg operation
6112 // Eg.  NEG     x0, x1
6113 pipe_class ialu_reg(iRegI dst, iRegI src)
6114 %{
6115   single_instruction;
6116   dst    : EX2(write);
6117   src    : EX1(read);
6118   INS01  : ISS;
6119   ALU    : EX2;
6120 %}
6121 
6122 // Integer ALU reg mmediate operation
6123 // Eg.  ADD     x0, x1, #N
6124 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6125 %{
6126   single_instruction;
6127   dst    : EX2(write);
6128   src1   : EX1(read);
6129   INS01  : ISS;
6130   ALU    : EX2;
6131 %}
6132 
6133 // Integer ALU immediate operation (no source operands)
6134 // Eg.  MOV     x0, #N
6135 pipe_class ialu_imm(iRegI dst)
6136 %{
6137   single_instruction;
6138   dst    : EX1(write);
6139   INS01  : ISS;
6140   ALU    : EX1;
6141 %}
6142 
6143 //------- Compare operation -------------------------------
6144 
6145 // Compare reg-reg
6146 // Eg.  CMP     x0, x1
6147 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6148 %{
6149   single_instruction;
6150 //  fixed_latency(16);
6151   cr     : EX2(write);
6152   op1    : EX1(read);
6153   op2    : EX1(read);
6154   INS01  : ISS;
6155   ALU    : EX2;
6156 %}
6157 
6158 // Compare reg-reg
6159 // Eg.  CMP     x0, #N
6160 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6161 %{
6162   single_instruction;
6163 //  fixed_latency(16);
6164   cr     : EX2(write);
6165   op1    : EX1(read);
6166   INS01  : ISS;
6167   ALU    : EX2;
6168 %}
6169 
6170 //------- Conditional instructions ------------------------
6171 
6172 // Conditional no operands
6173 // Eg.  CSINC   x0, zr, zr, <cond>
6174 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6175 %{
6176   single_instruction;
6177   cr     : EX1(read);
6178   dst    : EX2(write);
6179   INS01  : ISS;
6180   ALU    : EX2;
6181 %}
6182 
6183 // Conditional 2 operand
6184 // EG.  CSEL    X0, X1, X2, <cond>
6185 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6186 %{
6187   single_instruction;
6188   cr     : EX1(read);
6189   src1   : EX1(read);
6190   src2   : EX1(read);
6191   dst    : EX2(write);
6192   INS01  : ISS;
6193   ALU    : EX2;
6194 %}
6195 
6196 // Conditional 2 operand
6197 // EG.  CSEL    X0, X1, X2, <cond>
6198 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6199 %{
6200   single_instruction;
6201   cr     : EX1(read);
6202   src    : EX1(read);
6203   dst    : EX2(write);
6204   INS01  : ISS;
6205   ALU    : EX2;
6206 %}
6207 
6208 //------- Multiply pipeline operations --------------------
6209 
6210 // Multiply reg-reg
6211 // Eg.  MUL     w0, w1, w2
6212 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6213 %{
6214   single_instruction;
6215   dst    : WR(write);
6216   src1   : ISS(read);
6217   src2   : ISS(read);
6218   INS01  : ISS;
6219   MAC    : WR;
6220 %}
6221 
6222 // Multiply accumulate
6223 // Eg.  MADD    w0, w1, w2, w3
6224 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6225 %{
6226   single_instruction;
6227   dst    : WR(write);
6228   src1   : ISS(read);
6229   src2   : ISS(read);
6230   src3   : ISS(read);
6231   INS01  : ISS;
6232   MAC    : WR;
6233 %}
6234 
6235 // Eg.  MUL     w0, w1, w2
6236 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6237 %{
6238   single_instruction;
6239   fixed_latency(3); // Maximum latency for 64 bit mul
6240   dst    : WR(write);
6241   src1   : ISS(read);
6242   src2   : ISS(read);
6243   INS01  : ISS;
6244   MAC    : WR;
6245 %}
6246 
6247 // Multiply accumulate
6248 // Eg.  MADD    w0, w1, w2, w3
6249 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6250 %{
6251   single_instruction;
6252   fixed_latency(3); // Maximum latency for 64 bit mul
6253   dst    : WR(write);
6254   src1   : ISS(read);
6255   src2   : ISS(read);
6256   src3   : ISS(read);
6257   INS01  : ISS;
6258   MAC    : WR;
6259 %}
6260 
6261 //------- Divide pipeline operations --------------------
6262 
6263 // Eg.  SDIV    w0, w1, w2
6264 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6265 %{
6266   single_instruction;
6267   fixed_latency(8); // Maximum latency for 32 bit divide
6268   dst    : WR(write);
6269   src1   : ISS(read);
6270   src2   : ISS(read);
6271   INS0   : ISS; // Can only dual issue as instruction 0
6272   DIV    : WR;
6273 %}
6274 
6275 // Eg.  SDIV    x0, x1, x2
6276 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6277 %{
6278   single_instruction;
6279   fixed_latency(16); // Maximum latency for 64 bit divide
6280   dst    : WR(write);
6281   src1   : ISS(read);
6282   src2   : ISS(read);
6283   INS0   : ISS; // Can only dual issue as instruction 0
6284   DIV    : WR;
6285 %}
6286 
6287 //------- Load pipeline operations ------------------------
6288 
6289 // Load - prefetch
6290 // Eg.  PFRM    <mem>
6291 pipe_class iload_prefetch(memory mem)
6292 %{
6293   single_instruction;
6294   mem    : ISS(read);
6295   INS01  : ISS;
6296   LDST   : WR;
6297 %}
6298 
6299 // Load - reg, mem
6300 // Eg.  LDR     x0, <mem>
6301 pipe_class iload_reg_mem(iRegI dst, memory mem)
6302 %{
6303   single_instruction;
6304   dst    : WR(write);
6305   mem    : ISS(read);
6306   INS01  : ISS;
6307   LDST   : WR;
6308 %}
6309 
6310 // Load - reg, reg
6311 // Eg.  LDR     x0, [sp, x1]
6312 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6313 %{
6314   single_instruction;
6315   dst    : WR(write);
6316   src    : ISS(read);
6317   INS01  : ISS;
6318   LDST   : WR;
6319 %}
6320 
6321 //------- Store pipeline operations -----------------------
6322 
6323 // Store - zr, mem
6324 // Eg.  STR     zr, <mem>
6325 pipe_class istore_mem(memory mem)
6326 %{
6327   single_instruction;
6328   mem    : ISS(read);
6329   INS01  : ISS;
6330   LDST   : WR;
6331 %}
6332 
6333 // Store - reg, mem
6334 // Eg.  STR     x0, <mem>
6335 pipe_class istore_reg_mem(iRegI src, memory mem)
6336 %{
6337   single_instruction;
6338   mem    : ISS(read);
6339   src    : EX2(read);
6340   INS01  : ISS;
6341   LDST   : WR;
6342 %}
6343 
6344 // Store - reg, reg
6345 // Eg. STR      x0, [sp, x1]
6346 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6347 %{
6348   single_instruction;
6349   dst    : ISS(read);
6350   src    : EX2(read);
6351   INS01  : ISS;
6352   LDST   : WR;
6353 %}
6354 
6355 //------- Store pipeline operations -----------------------
6356 
6357 // Branch
6358 pipe_class pipe_branch()
6359 %{
6360   single_instruction;
6361   INS01  : ISS;
6362   BRANCH : EX1;
6363 %}
6364 
6365 // Conditional branch
6366 pipe_class pipe_branch_cond(rFlagsReg cr)
6367 %{
6368   single_instruction;
6369   cr     : EX1(read);
6370   INS01  : ISS;
6371   BRANCH : EX1;
6372 %}
6373 
6374 // Compare & Branch
6375 // EG.  CBZ/CBNZ
6376 pipe_class pipe_cmp_branch(iRegI op1)
6377 %{
6378   single_instruction;
6379   op1    : EX1(read);
6380   INS01  : ISS;
6381   BRANCH : EX1;
6382 %}
6383 
6384 //------- Synchronisation operations ----------------------
6385 
6386 // Any operation requiring serialization.
6387 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6388 pipe_class pipe_serial()
6389 %{
6390   single_instruction;
6391   force_serialization;
6392   fixed_latency(16);
6393   INS01  : ISS(2); // Cannot dual issue with any other instruction
6394   LDST   : WR;
6395 %}
6396 
6397 // Generic big/slow expanded idiom - also serialized
6398 pipe_class pipe_slow()
6399 %{
6400   instruction_count(10);
6401   multiple_bundles;
6402   force_serialization;
6403   fixed_latency(16);
6404   INS01  : ISS(2); // Cannot dual issue with any other instruction
6405   LDST   : WR;
6406 %}
6407 
6408 // Empty pipeline class
6409 pipe_class pipe_class_empty()
6410 %{
6411   single_instruction;
6412   fixed_latency(0);
6413 %}
6414 
6415 // Default pipeline class.
6416 pipe_class pipe_class_default()
6417 %{
6418   single_instruction;
6419   fixed_latency(2);
6420 %}
6421 
6422 // Pipeline class for compares.
6423 pipe_class pipe_class_compare()
6424 %{
6425   single_instruction;
6426   fixed_latency(16);
6427 %}
6428 
6429 // Pipeline class for memory operations.
6430 pipe_class pipe_class_memory()
6431 %{
6432   single_instruction;
6433   fixed_latency(16);
6434 %}
6435 
6436 // Pipeline class for call.
6437 pipe_class pipe_class_call()
6438 %{
6439   single_instruction;
6440   fixed_latency(100);
6441 %}
6442 
6443 // Define the class for the Nop node.
6444 define %{
6445    MachNop = pipe_class_empty;
6446 %}
6447 
6448 %}
6449 //----------INSTRUCTIONS-------------------------------------------------------
6450 //
6451 // match      -- States which machine-independent subtree may be replaced
6452 //               by this instruction.
6453 // ins_cost   -- The estimated cost of this instruction is used by instruction
6454 //               selection to identify a minimum cost tree of machine
6455 //               instructions that matches a tree of machine-independent
6456 //               instructions.
6457 // format     -- A string providing the disassembly for this instruction.
6458 //               The value of an instruction's operand may be inserted
6459 //               by referring to it with a '$' prefix.
6460 // opcode     -- Three instruction opcodes may be provided.  These are referred
6461 //               to within an encode class as $primary, $secondary, and $tertiary
6462 //               rrspectively.  The primary opcode is commonly used to
6463 //               indicate the type of machine instruction, while secondary
6464 //               and tertiary are often used for prefix options or addressing
6465 //               modes.
6466 // ins_encode -- A list of encode classes with parameters. The encode class
6467 //               name must have been defined in an 'enc_class' specification
6468 //               in the encode section of the architecture description.
6469 
6470 // ============================================================================
6471 // Memory (Load/Store) Instructions
6472 
6473 // Load Instructions
6474 
6475 // Load Byte (8 bit signed)
6476 instruct loadB(iRegINoSp dst, memory mem)
6477 %{
6478   match(Set dst (LoadB mem));
6479   predicate(!needs_acquiring_load(n));
6480 
6481   ins_cost(4 * INSN_COST);
6482   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6483 
6484   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6485 
6486   ins_pipe(iload_reg_mem);
6487 %}
6488 
6489 // Load Byte (8 bit signed) into long
6490 instruct loadB2L(iRegLNoSp dst, memory mem)
6491 %{
6492   match(Set dst (ConvI2L (LoadB mem)));
6493   predicate(!needs_acquiring_load(n->in(1)));
6494 
6495   ins_cost(4 * INSN_COST);
6496   format %{ "ldrsb  $dst, $mem\t# byte" %}
6497 
6498   ins_encode(aarch64_enc_ldrsb(dst, mem));
6499 
6500   ins_pipe(iload_reg_mem);
6501 %}
6502 
6503 // Load Byte (8 bit unsigned)
6504 instruct loadUB(iRegINoSp dst, memory mem)
6505 %{
6506   match(Set dst (LoadUB mem));
6507   predicate(!needs_acquiring_load(n));
6508 
6509   ins_cost(4 * INSN_COST);
6510   format %{ "ldrbw  $dst, $mem\t# byte" %}
6511 
6512   ins_encode(aarch64_enc_ldrb(dst, mem));
6513 
6514   ins_pipe(iload_reg_mem);
6515 %}
6516 
6517 // Load Byte (8 bit unsigned) into long
6518 instruct loadUB2L(iRegLNoSp dst, memory mem)
6519 %{
6520   match(Set dst (ConvI2L (LoadUB mem)));
6521   predicate(!needs_acquiring_load(n->in(1)));
6522 
6523   ins_cost(4 * INSN_COST);
6524   format %{ "ldrb  $dst, $mem\t# byte" %}
6525 
6526   ins_encode(aarch64_enc_ldrb(dst, mem));
6527 
6528   ins_pipe(iload_reg_mem);
6529 %}
6530 
6531 // Load Short (16 bit signed)
6532 instruct loadS(iRegINoSp dst, memory mem)
6533 %{
6534   match(Set dst (LoadS mem));
6535   predicate(!needs_acquiring_load(n));
6536 
6537   ins_cost(4 * INSN_COST);
6538   format %{ "ldrshw  $dst, $mem\t# short" %}
6539 
6540   ins_encode(aarch64_enc_ldrshw(dst, mem));
6541 
6542   ins_pipe(iload_reg_mem);
6543 %}
6544 
6545 // Load Short (16 bit signed) into long
6546 instruct loadS2L(iRegLNoSp dst, memory mem)
6547 %{
6548   match(Set dst (ConvI2L (LoadS mem)));
6549   predicate(!needs_acquiring_load(n->in(1)));
6550 
6551   ins_cost(4 * INSN_COST);
6552   format %{ "ldrsh  $dst, $mem\t# short" %}
6553 
6554   ins_encode(aarch64_enc_ldrsh(dst, mem));
6555 
6556   ins_pipe(iload_reg_mem);
6557 %}
6558 
6559 // Load Char (16 bit unsigned)
6560 instruct loadUS(iRegINoSp dst, memory mem)
6561 %{
6562   match(Set dst (LoadUS mem));
6563   predicate(!needs_acquiring_load(n));
6564 
6565   ins_cost(4 * INSN_COST);
6566   format %{ "ldrh  $dst, $mem\t# short" %}
6567 
6568   ins_encode(aarch64_enc_ldrh(dst, mem));
6569 
6570   ins_pipe(iload_reg_mem);
6571 %}
6572 
6573 // Load Short/Char (16 bit unsigned) into long
6574 instruct loadUS2L(iRegLNoSp dst, memory mem)
6575 %{
6576   match(Set dst (ConvI2L (LoadUS mem)));
6577   predicate(!needs_acquiring_load(n->in(1)));
6578 
6579   ins_cost(4 * INSN_COST);
6580   format %{ "ldrh  $dst, $mem\t# short" %}
6581 
6582   ins_encode(aarch64_enc_ldrh(dst, mem));
6583 
6584   ins_pipe(iload_reg_mem);
6585 %}
6586 
6587 // Load Integer (32 bit signed)
6588 instruct loadI(iRegINoSp dst, memory mem)
6589 %{
6590   match(Set dst (LoadI mem));
6591   predicate(!needs_acquiring_load(n));
6592 
6593   ins_cost(4 * INSN_COST);
6594   format %{ "ldrw  $dst, $mem\t# int" %}
6595 
6596   ins_encode(aarch64_enc_ldrw(dst, mem));
6597 
6598   ins_pipe(iload_reg_mem);
6599 %}
6600 
6601 // Load Integer (32 bit signed) into long
6602 instruct loadI2L(iRegLNoSp dst, memory mem)
6603 %{
6604   match(Set dst (ConvI2L (LoadI mem)));
6605   predicate(!needs_acquiring_load(n->in(1)));
6606 
6607   ins_cost(4 * INSN_COST);
6608   format %{ "ldrsw  $dst, $mem\t# int" %}
6609 
6610   ins_encode(aarch64_enc_ldrsw(dst, mem));
6611 
6612   ins_pipe(iload_reg_mem);
6613 %}
6614 
6615 // Load Integer (32 bit unsigned) into long
6616 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6617 %{
6618   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6619   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6620 
6621   ins_cost(4 * INSN_COST);
6622   format %{ "ldrw  $dst, $mem\t# int" %}
6623 
6624   ins_encode(aarch64_enc_ldrw(dst, mem));
6625 
6626   ins_pipe(iload_reg_mem);
6627 %}
6628 
6629 // Load Long (64 bit signed)
6630 instruct loadL(iRegLNoSp dst, memory mem)
6631 %{
6632   match(Set dst (LoadL mem));
6633   predicate(!needs_acquiring_load(n));
6634 
6635   ins_cost(4 * INSN_COST);
6636   format %{ "ldr  $dst, $mem\t# int" %}
6637 
6638   ins_encode(aarch64_enc_ldr(dst, mem));
6639 
6640   ins_pipe(iload_reg_mem);
6641 %}
6642 
6643 // Load Range
6644 instruct loadRange(iRegINoSp dst, memory mem)
6645 %{
6646   match(Set dst (LoadRange mem));
6647 
6648   ins_cost(4 * INSN_COST);
6649   format %{ "ldrw  $dst, $mem\t# range" %}
6650 
6651   ins_encode(aarch64_enc_ldrw(dst, mem));
6652 
6653   ins_pipe(iload_reg_mem);
6654 %}
6655 
6656 // Load Pointer
6657 instruct loadP(iRegPNoSp dst, memory mem)
6658 %{
6659   match(Set dst (LoadP mem));
6660   predicate(!needs_acquiring_load(n));
6661 
6662   ins_cost(4 * INSN_COST);
6663   format %{ "ldr  $dst, $mem\t# ptr" %}
6664 
6665   ins_encode(aarch64_enc_ldr(dst, mem));
6666 
6667   ins_pipe(iload_reg_mem);
6668 %}
6669 
6670 // Load Compressed Pointer
6671 instruct loadN(iRegNNoSp dst, memory mem)
6672 %{
6673   match(Set dst (LoadN mem));
6674   predicate(!needs_acquiring_load(n));
6675 
6676   ins_cost(4 * INSN_COST);
6677   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6678 
6679   ins_encode(aarch64_enc_ldrw(dst, mem));
6680 
6681   ins_pipe(iload_reg_mem);
6682 %}
6683 
6684 // Load Klass Pointer
6685 instruct loadKlass(iRegPNoSp dst, memory mem)
6686 %{
6687   match(Set dst (LoadKlass mem));
6688   predicate(!needs_acquiring_load(n));
6689 
6690   ins_cost(4 * INSN_COST);
6691   format %{ "ldr  $dst, $mem\t# class" %}
6692 
6693   ins_encode(aarch64_enc_ldr(dst, mem));
6694 
6695   ins_pipe(iload_reg_mem);
6696 %}
6697 
6698 // Load Narrow Klass Pointer
6699 instruct loadNKlass(iRegNNoSp dst, memory mem)
6700 %{
6701   match(Set dst (LoadNKlass mem));
6702   predicate(!needs_acquiring_load(n));
6703 
6704   ins_cost(4 * INSN_COST);
6705   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6706 
6707   ins_encode(aarch64_enc_ldrw(dst, mem));
6708 
6709   ins_pipe(iload_reg_mem);
6710 %}
6711 
6712 // Load Float
6713 instruct loadF(vRegF dst, memory mem)
6714 %{
6715   match(Set dst (LoadF mem));
6716   predicate(!needs_acquiring_load(n));
6717 
6718   ins_cost(4 * INSN_COST);
6719   format %{ "ldrs  $dst, $mem\t# float" %}
6720 
6721   ins_encode( aarch64_enc_ldrs(dst, mem) );
6722 
6723   ins_pipe(pipe_class_memory);
6724 %}
6725 
6726 // Load Double
6727 instruct loadD(vRegD dst, memory mem)
6728 %{
6729   match(Set dst (LoadD mem));
6730   predicate(!needs_acquiring_load(n));
6731 
6732   ins_cost(4 * INSN_COST);
6733   format %{ "ldrd  $dst, $mem\t# double" %}
6734 
6735   ins_encode( aarch64_enc_ldrd(dst, mem) );
6736 
6737   ins_pipe(pipe_class_memory);
6738 %}
6739 
6740 
6741 // Load Int Constant
6742 instruct loadConI(iRegINoSp dst, immI src)
6743 %{
6744   match(Set dst src);
6745 
6746   ins_cost(INSN_COST);
6747   format %{ "mov $dst, $src\t# int" %}
6748 
6749   ins_encode( aarch64_enc_movw_imm(dst, src) );
6750 
6751   ins_pipe(ialu_imm);
6752 %}
6753 
6754 // Load Long Constant
6755 instruct loadConL(iRegLNoSp dst, immL src)
6756 %{
6757   match(Set dst src);
6758 
6759   ins_cost(INSN_COST);
6760   format %{ "mov $dst, $src\t# long" %}
6761 
6762   ins_encode( aarch64_enc_mov_imm(dst, src) );
6763 
6764   ins_pipe(ialu_imm);
6765 %}
6766 
6767 // Load Pointer Constant
6768 
6769 instruct loadConP(iRegPNoSp dst, immP con)
6770 %{
6771   match(Set dst con);
6772 
6773   ins_cost(INSN_COST * 4);
6774   format %{
6775     "mov  $dst, $con\t# ptr\n\t"
6776   %}
6777 
6778   ins_encode(aarch64_enc_mov_p(dst, con));
6779 
6780   ins_pipe(ialu_imm);
6781 %}
6782 
6783 // Load Null Pointer Constant
6784 
6785 instruct loadConP0(iRegPNoSp dst, immP0 con)
6786 %{
6787   match(Set dst con);
6788 
6789   ins_cost(INSN_COST);
6790   format %{ "mov  $dst, $con\t# NULL ptr" %}
6791 
6792   ins_encode(aarch64_enc_mov_p0(dst, con));
6793 
6794   ins_pipe(ialu_imm);
6795 %}
6796 
6797 // Load Pointer Constant One
6798 
6799 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6800 %{
6801   match(Set dst con);
6802 
6803   ins_cost(INSN_COST);
6804   format %{ "mov  $dst, $con\t# NULL ptr" %}
6805 
6806   ins_encode(aarch64_enc_mov_p1(dst, con));
6807 
6808   ins_pipe(ialu_imm);
6809 %}
6810 
6811 // Load Poll Page Constant
6812 
6813 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6814 %{
6815   match(Set dst con);
6816 
6817   ins_cost(INSN_COST);
6818   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6819 
6820   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6821 
6822   ins_pipe(ialu_imm);
6823 %}
6824 
6825 // Load Byte Map Base Constant
6826 
6827 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6828 %{
6829   match(Set dst con);
6830 
6831   ins_cost(INSN_COST);
6832   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6833 
6834   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6835 
6836   ins_pipe(ialu_imm);
6837 %}
6838 
6839 // Load Narrow Pointer Constant
6840 
6841 instruct loadConN(iRegNNoSp dst, immN con)
6842 %{
6843   match(Set dst con);
6844 
6845   ins_cost(INSN_COST * 4);
6846   format %{ "mov  $dst, $con\t# compressed ptr" %}
6847 
6848   ins_encode(aarch64_enc_mov_n(dst, con));
6849 
6850   ins_pipe(ialu_imm);
6851 %}
6852 
6853 // Load Narrow Null Pointer Constant
6854 
6855 instruct loadConN0(iRegNNoSp dst, immN0 con)
6856 %{
6857   match(Set dst con);
6858 
6859   ins_cost(INSN_COST);
6860   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6861 
6862   ins_encode(aarch64_enc_mov_n0(dst, con));
6863 
6864   ins_pipe(ialu_imm);
6865 %}
6866 
6867 // Load Narrow Klass Constant
6868 
6869 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6870 %{
6871   match(Set dst con);
6872 
6873   ins_cost(INSN_COST);
6874   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6875 
6876   ins_encode(aarch64_enc_mov_nk(dst, con));
6877 
6878   ins_pipe(ialu_imm);
6879 %}
6880 
6881 // Load Packed Float Constant
6882 
6883 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6884   match(Set dst con);
6885   ins_cost(INSN_COST * 4);
6886   format %{ "fmovs  $dst, $con"%}
6887   ins_encode %{
6888     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6889   %}
6890 
6891   ins_pipe(fp_imm_s);
6892 %}
6893 
6894 // Load Float Constant
6895 
6896 instruct loadConF(vRegF dst, immF con) %{
6897   match(Set dst con);
6898 
6899   ins_cost(INSN_COST * 4);
6900 
6901   format %{
6902     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6903   %}
6904 
6905   ins_encode %{
6906     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6907   %}
6908 
6909   ins_pipe(fp_load_constant_s);
6910 %}
6911 
6912 // Load Packed Double Constant
6913 
6914 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6915   match(Set dst con);
6916   ins_cost(INSN_COST);
6917   format %{ "fmovd  $dst, $con"%}
6918   ins_encode %{
6919     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6920   %}
6921 
6922   ins_pipe(fp_imm_d);
6923 %}
6924 
6925 // Load Double Constant
6926 
6927 instruct loadConD(vRegD dst, immD con) %{
6928   match(Set dst con);
6929 
6930   ins_cost(INSN_COST * 5);
6931   format %{
6932     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6933   %}
6934 
6935   ins_encode %{
6936     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6937   %}
6938 
6939   ins_pipe(fp_load_constant_d);
6940 %}
6941 
6942 // Store Instructions
6943 
6944 // Store CMS card-mark Immediate
6945 instruct storeimmCM0(immI0 zero, memory mem)
6946 %{
6947   match(Set mem (StoreCM mem zero));
6948   predicate(unnecessary_storestore(n));
6949 
6950   ins_cost(INSN_COST);
6951   format %{ "storestore (elided)\n\t"
6952             "strb zr, $mem\t# byte" %}
6953 
6954   ins_encode(aarch64_enc_strb0(mem));
6955 
6956   ins_pipe(istore_mem);
6957 %}
6958 
6959 // Store CMS card-mark Immediate with intervening StoreStore
6960 // needed when using CMS with no conditional card marking
6961 instruct storeimmCM0_ordered(immI0 zero, memory mem)
6962 %{
6963   match(Set mem (StoreCM mem zero));
6964 
6965   ins_cost(INSN_COST * 2);
6966   format %{ "storestore\n\t"
6967             "dmb ishst"
6968             "\n\tstrb zr, $mem\t# byte" %}
6969 
6970   ins_encode(aarch64_enc_strb0_ordered(mem));
6971 
6972   ins_pipe(istore_mem);
6973 %}
6974 
6975 // Store Byte
6976 instruct storeB(iRegIorL2I src, memory mem)
6977 %{
6978   match(Set mem (StoreB mem src));
6979   predicate(!needs_releasing_store(n));
6980 
6981   ins_cost(INSN_COST);
6982   format %{ "strb  $src, $mem\t# byte" %}
6983 
6984   ins_encode(aarch64_enc_strb(src, mem));
6985 
6986   ins_pipe(istore_reg_mem);
6987 %}
6988 
6989 
6990 instruct storeimmB0(immI0 zero, memory mem)
6991 %{
6992   match(Set mem (StoreB mem zero));
6993   predicate(!needs_releasing_store(n));
6994 
6995   ins_cost(INSN_COST);
6996   format %{ "strb rscractch2, $mem\t# byte" %}
6997 
6998   ins_encode(aarch64_enc_strb0(mem));
6999 
7000   ins_pipe(istore_mem);
7001 %}
7002 
7003 // Store Char/Short
7004 instruct storeC(iRegIorL2I src, memory mem)
7005 %{
7006   match(Set mem (StoreC mem src));
7007   predicate(!needs_releasing_store(n));
7008 
7009   ins_cost(INSN_COST);
7010   format %{ "strh  $src, $mem\t# short" %}
7011 
7012   ins_encode(aarch64_enc_strh(src, mem));
7013 
7014   ins_pipe(istore_reg_mem);
7015 %}
7016 
7017 instruct storeimmC0(immI0 zero, memory mem)
7018 %{
7019   match(Set mem (StoreC mem zero));
7020   predicate(!needs_releasing_store(n));
7021 
7022   ins_cost(INSN_COST);
7023   format %{ "strh  zr, $mem\t# short" %}
7024 
7025   ins_encode(aarch64_enc_strh0(mem));
7026 
7027   ins_pipe(istore_mem);
7028 %}
7029 
7030 // Store Integer
7031 
7032 instruct storeI(iRegIorL2I src, memory mem)
7033 %{
7034   match(Set mem(StoreI mem src));
7035   predicate(!needs_releasing_store(n));
7036 
7037   ins_cost(INSN_COST);
7038   format %{ "strw  $src, $mem\t# int" %}
7039 
7040   ins_encode(aarch64_enc_strw(src, mem));
7041 
7042   ins_pipe(istore_reg_mem);
7043 %}
7044 
7045 instruct storeimmI0(immI0 zero, memory mem)
7046 %{
7047   match(Set mem(StoreI mem zero));
7048   predicate(!needs_releasing_store(n));
7049 
7050   ins_cost(INSN_COST);
7051   format %{ "strw  zr, $mem\t# int" %}
7052 
7053   ins_encode(aarch64_enc_strw0(mem));
7054 
7055   ins_pipe(istore_mem);
7056 %}
7057 
7058 // Store Long (64 bit signed)
7059 instruct storeL(iRegL src, memory mem)
7060 %{
7061   match(Set mem (StoreL mem src));
7062   predicate(!needs_releasing_store(n));
7063 
7064   ins_cost(INSN_COST);
7065   format %{ "str  $src, $mem\t# int" %}
7066 
7067   ins_encode(aarch64_enc_str(src, mem));
7068 
7069   ins_pipe(istore_reg_mem);
7070 %}
7071 
7072 // Store Long (64 bit signed)
7073 instruct storeimmL0(immL0 zero, memory mem)
7074 %{
7075   match(Set mem (StoreL mem zero));
7076   predicate(!needs_releasing_store(n));
7077 
7078   ins_cost(INSN_COST);
7079   format %{ "str  zr, $mem\t# int" %}
7080 
7081   ins_encode(aarch64_enc_str0(mem));
7082 
7083   ins_pipe(istore_mem);
7084 %}
7085 
7086 // Store Pointer
7087 instruct storeP(iRegP src, memory mem)
7088 %{
7089   match(Set mem (StoreP mem src));
7090   predicate(!needs_releasing_store(n));
7091 
7092   ins_cost(INSN_COST);
7093   format %{ "str  $src, $mem\t# ptr" %}
7094 
7095   ins_encode(aarch64_enc_str(src, mem));
7096 
7097   ins_pipe(istore_reg_mem);
7098 %}
7099 
7100 // Store Pointer
7101 instruct storeimmP0(immP0 zero, memory mem)
7102 %{
7103   match(Set mem (StoreP mem zero));
7104   predicate(!needs_releasing_store(n));
7105 
7106   ins_cost(INSN_COST);
7107   format %{ "str zr, $mem\t# ptr" %}
7108 
7109   ins_encode(aarch64_enc_str0(mem));
7110 
7111   ins_pipe(istore_mem);
7112 %}
7113 
7114 // Store Compressed Pointer
7115 instruct storeN(iRegN src, memory mem)
7116 %{
7117   match(Set mem (StoreN mem src));
7118   predicate(!needs_releasing_store(n));
7119 
7120   ins_cost(INSN_COST);
7121   format %{ "strw  $src, $mem\t# compressed ptr" %}
7122 
7123   ins_encode(aarch64_enc_strw(src, mem));
7124 
7125   ins_pipe(istore_reg_mem);
7126 %}
7127 
7128 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7129 %{
7130   match(Set mem (StoreN mem zero));
7131   predicate(Universe::narrow_oop_base() == NULL &&
7132             Universe::narrow_klass_base() == NULL &&
7133             (!needs_releasing_store(n)));
7134 
7135   ins_cost(INSN_COST);
7136   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7137 
7138   ins_encode(aarch64_enc_strw(heapbase, mem));
7139 
7140   ins_pipe(istore_reg_mem);
7141 %}
7142 
7143 // Store Float
7144 instruct storeF(vRegF src, memory mem)
7145 %{
7146   match(Set mem (StoreF mem src));
7147   predicate(!needs_releasing_store(n));
7148 
7149   ins_cost(INSN_COST);
7150   format %{ "strs  $src, $mem\t# float" %}
7151 
7152   ins_encode( aarch64_enc_strs(src, mem) );
7153 
7154   ins_pipe(pipe_class_memory);
7155 %}
7156 
7157 // TODO
7158 // implement storeImmF0 and storeFImmPacked
7159 
7160 // Store Double
7161 instruct storeD(vRegD src, memory mem)
7162 %{
7163   match(Set mem (StoreD mem src));
7164   predicate(!needs_releasing_store(n));
7165 
7166   ins_cost(INSN_COST);
7167   format %{ "strd  $src, $mem\t# double" %}
7168 
7169   ins_encode( aarch64_enc_strd(src, mem) );
7170 
7171   ins_pipe(pipe_class_memory);
7172 %}
7173 
7174 // Store Compressed Klass Pointer
7175 instruct storeNKlass(iRegN src, memory mem)
7176 %{
7177   predicate(!needs_releasing_store(n));
7178   match(Set mem (StoreNKlass mem src));
7179 
7180   ins_cost(INSN_COST);
7181   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7182 
7183   ins_encode(aarch64_enc_strw(src, mem));
7184 
7185   ins_pipe(istore_reg_mem);
7186 %}
7187 
7188 // TODO
7189 // implement storeImmD0 and storeDImmPacked
7190 
7191 // prefetch instructions
7192 // Must be safe to execute with invalid address (cannot fault).
7193 
7194 instruct prefetchalloc( memory mem ) %{
7195   match(PrefetchAllocation mem);
7196 
7197   ins_cost(INSN_COST);
7198   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7199 
7200   ins_encode( aarch64_enc_prefetchw(mem) );
7201 
7202   ins_pipe(iload_prefetch);
7203 %}
7204 
7205 //  ---------------- volatile loads and stores ----------------
7206 
7207 // Load Byte (8 bit signed)
7208 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7209 %{
7210   match(Set dst (LoadB mem));
7211 
7212   ins_cost(VOLATILE_REF_COST);
7213   format %{ "ldarsb  $dst, $mem\t# byte" %}
7214 
7215   ins_encode(aarch64_enc_ldarsb(dst, mem));
7216 
7217   ins_pipe(pipe_serial);
7218 %}
7219 
7220 // Load Byte (8 bit signed) into long
7221 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7222 %{
7223   match(Set dst (ConvI2L (LoadB mem)));
7224 
7225   ins_cost(VOLATILE_REF_COST);
7226   format %{ "ldarsb  $dst, $mem\t# byte" %}
7227 
7228   ins_encode(aarch64_enc_ldarsb(dst, mem));
7229 
7230   ins_pipe(pipe_serial);
7231 %}
7232 
7233 // Load Byte (8 bit unsigned)
7234 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7235 %{
7236   match(Set dst (LoadUB mem));
7237 
7238   ins_cost(VOLATILE_REF_COST);
7239   format %{ "ldarb  $dst, $mem\t# byte" %}
7240 
7241   ins_encode(aarch64_enc_ldarb(dst, mem));
7242 
7243   ins_pipe(pipe_serial);
7244 %}
7245 
7246 // Load Byte (8 bit unsigned) into long
7247 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7248 %{
7249   match(Set dst (ConvI2L (LoadUB mem)));
7250 
7251   ins_cost(VOLATILE_REF_COST);
7252   format %{ "ldarb  $dst, $mem\t# byte" %}
7253 
7254   ins_encode(aarch64_enc_ldarb(dst, mem));
7255 
7256   ins_pipe(pipe_serial);
7257 %}
7258 
7259 // Load Short (16 bit signed)
7260 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7261 %{
7262   match(Set dst (LoadS mem));
7263 
7264   ins_cost(VOLATILE_REF_COST);
7265   format %{ "ldarshw  $dst, $mem\t# short" %}
7266 
7267   ins_encode(aarch64_enc_ldarshw(dst, mem));
7268 
7269   ins_pipe(pipe_serial);
7270 %}
7271 
7272 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7273 %{
7274   match(Set dst (LoadUS mem));
7275 
7276   ins_cost(VOLATILE_REF_COST);
7277   format %{ "ldarhw  $dst, $mem\t# short" %}
7278 
7279   ins_encode(aarch64_enc_ldarhw(dst, mem));
7280 
7281   ins_pipe(pipe_serial);
7282 %}
7283 
7284 // Load Short/Char (16 bit unsigned) into long
7285 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7286 %{
7287   match(Set dst (ConvI2L (LoadUS mem)));
7288 
7289   ins_cost(VOLATILE_REF_COST);
7290   format %{ "ldarh  $dst, $mem\t# short" %}
7291 
7292   ins_encode(aarch64_enc_ldarh(dst, mem));
7293 
7294   ins_pipe(pipe_serial);
7295 %}
7296 
7297 // Load Short/Char (16 bit signed) into long
7298 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7299 %{
7300   match(Set dst (ConvI2L (LoadS mem)));
7301 
7302   ins_cost(VOLATILE_REF_COST);
7303   format %{ "ldarh  $dst, $mem\t# short" %}
7304 
7305   ins_encode(aarch64_enc_ldarsh(dst, mem));
7306 
7307   ins_pipe(pipe_serial);
7308 %}
7309 
7310 // Load Integer (32 bit signed)
7311 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7312 %{
7313   match(Set dst (LoadI mem));
7314 
7315   ins_cost(VOLATILE_REF_COST);
7316   format %{ "ldarw  $dst, $mem\t# int" %}
7317 
7318   ins_encode(aarch64_enc_ldarw(dst, mem));
7319 
7320   ins_pipe(pipe_serial);
7321 %}
7322 
7323 // Load Integer (32 bit unsigned) into long
7324 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7325 %{
7326   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7327 
7328   ins_cost(VOLATILE_REF_COST);
7329   format %{ "ldarw  $dst, $mem\t# int" %}
7330 
7331   ins_encode(aarch64_enc_ldarw(dst, mem));
7332 
7333   ins_pipe(pipe_serial);
7334 %}
7335 
7336 // Load Long (64 bit signed)
7337 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7338 %{
7339   match(Set dst (LoadL mem));
7340 
7341   ins_cost(VOLATILE_REF_COST);
7342   format %{ "ldar  $dst, $mem\t# int" %}
7343 
7344   ins_encode(aarch64_enc_ldar(dst, mem));
7345 
7346   ins_pipe(pipe_serial);
7347 %}
7348 
7349 // Load Pointer
7350 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7351 %{
7352   match(Set dst (LoadP mem));
7353 
7354   ins_cost(VOLATILE_REF_COST);
7355   format %{ "ldar  $dst, $mem\t# ptr" %}
7356 
7357   ins_encode(aarch64_enc_ldar(dst, mem));
7358 
7359   ins_pipe(pipe_serial);
7360 %}
7361 
7362 // Load Compressed Pointer
7363 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7364 %{
7365   match(Set dst (LoadN mem));
7366 
7367   ins_cost(VOLATILE_REF_COST);
7368   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7369 
7370   ins_encode(aarch64_enc_ldarw(dst, mem));
7371 
7372   ins_pipe(pipe_serial);
7373 %}
7374 
7375 // Load Float
7376 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7377 %{
7378   match(Set dst (LoadF mem));
7379 
7380   ins_cost(VOLATILE_REF_COST);
7381   format %{ "ldars  $dst, $mem\t# float" %}
7382 
7383   ins_encode( aarch64_enc_fldars(dst, mem) );
7384 
7385   ins_pipe(pipe_serial);
7386 %}
7387 
7388 // Load Double
7389 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7390 %{
7391   match(Set dst (LoadD mem));
7392 
7393   ins_cost(VOLATILE_REF_COST);
7394   format %{ "ldard  $dst, $mem\t# double" %}
7395 
7396   ins_encode( aarch64_enc_fldard(dst, mem) );
7397 
7398   ins_pipe(pipe_serial);
7399 %}
7400 
7401 // Store Byte
7402 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7403 %{
7404   match(Set mem (StoreB mem src));
7405 
7406   ins_cost(VOLATILE_REF_COST);
7407   format %{ "stlrb  $src, $mem\t# byte" %}
7408 
7409   ins_encode(aarch64_enc_stlrb(src, mem));
7410 
7411   ins_pipe(pipe_class_memory);
7412 %}
7413 
7414 // Store Char/Short
7415 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7416 %{
7417   match(Set mem (StoreC mem src));
7418 
7419   ins_cost(VOLATILE_REF_COST);
7420   format %{ "stlrh  $src, $mem\t# short" %}
7421 
7422   ins_encode(aarch64_enc_stlrh(src, mem));
7423 
7424   ins_pipe(pipe_class_memory);
7425 %}
7426 
7427 // Store Integer
7428 
7429 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7430 %{
7431   match(Set mem(StoreI mem src));
7432 
7433   ins_cost(VOLATILE_REF_COST);
7434   format %{ "stlrw  $src, $mem\t# int" %}
7435 
7436   ins_encode(aarch64_enc_stlrw(src, mem));
7437 
7438   ins_pipe(pipe_class_memory);
7439 %}
7440 
7441 // Store Long (64 bit signed)
7442 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7443 %{
7444   match(Set mem (StoreL mem src));
7445 
7446   ins_cost(VOLATILE_REF_COST);
7447   format %{ "stlr  $src, $mem\t# int" %}
7448 
7449   ins_encode(aarch64_enc_stlr(src, mem));
7450 
7451   ins_pipe(pipe_class_memory);
7452 %}
7453 
7454 // Store Pointer
7455 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7456 %{
7457   match(Set mem (StoreP mem src));
7458 
7459   ins_cost(VOLATILE_REF_COST);
7460   format %{ "stlr  $src, $mem\t# ptr" %}
7461 
7462   ins_encode(aarch64_enc_stlr(src, mem));
7463 
7464   ins_pipe(pipe_class_memory);
7465 %}
7466 
7467 // Store Compressed Pointer
7468 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7469 %{
7470   match(Set mem (StoreN mem src));
7471 
7472   ins_cost(VOLATILE_REF_COST);
7473   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7474 
7475   ins_encode(aarch64_enc_stlrw(src, mem));
7476 
7477   ins_pipe(pipe_class_memory);
7478 %}
7479 
7480 // Store Float
7481 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7482 %{
7483   match(Set mem (StoreF mem src));
7484 
7485   ins_cost(VOLATILE_REF_COST);
7486   format %{ "stlrs  $src, $mem\t# float" %}
7487 
7488   ins_encode( aarch64_enc_fstlrs(src, mem) );
7489 
7490   ins_pipe(pipe_class_memory);
7491 %}
7492 
7493 // TODO
7494 // implement storeImmF0 and storeFImmPacked
7495 
7496 // Store Double
7497 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7498 %{
7499   match(Set mem (StoreD mem src));
7500 
7501   ins_cost(VOLATILE_REF_COST);
7502   format %{ "stlrd  $src, $mem\t# double" %}
7503 
7504   ins_encode( aarch64_enc_fstlrd(src, mem) );
7505 
7506   ins_pipe(pipe_class_memory);
7507 %}
7508 
7509 //  ---------------- end of volatile loads and stores ----------------
7510 
7511 // ============================================================================
7512 // BSWAP Instructions
7513 
7514 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7515   match(Set dst (ReverseBytesI src));
7516 
7517   ins_cost(INSN_COST);
7518   format %{ "revw  $dst, $src" %}
7519 
7520   ins_encode %{
7521     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7522   %}
7523 
7524   ins_pipe(ialu_reg);
7525 %}
7526 
7527 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7528   match(Set dst (ReverseBytesL src));
7529 
7530   ins_cost(INSN_COST);
7531   format %{ "rev  $dst, $src" %}
7532 
7533   ins_encode %{
7534     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7535   %}
7536 
7537   ins_pipe(ialu_reg);
7538 %}
7539 
7540 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7541   match(Set dst (ReverseBytesUS src));
7542 
7543   ins_cost(INSN_COST);
7544   format %{ "rev16w  $dst, $src" %}
7545 
7546   ins_encode %{
7547     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7548   %}
7549 
7550   ins_pipe(ialu_reg);
7551 %}
7552 
7553 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7554   match(Set dst (ReverseBytesS src));
7555 
7556   ins_cost(INSN_COST);
7557   format %{ "rev16w  $dst, $src\n\t"
7558             "sbfmw $dst, $dst, #0, #15" %}
7559 
7560   ins_encode %{
7561     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7562     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7563   %}
7564 
7565   ins_pipe(ialu_reg);
7566 %}
7567 
7568 // ============================================================================
7569 // Zero Count Instructions
7570 
7571 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7572   match(Set dst (CountLeadingZerosI src));
7573 
7574   ins_cost(INSN_COST);
7575   format %{ "clzw  $dst, $src" %}
7576   ins_encode %{
7577     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7578   %}
7579 
7580   ins_pipe(ialu_reg);
7581 %}
7582 
7583 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7584   match(Set dst (CountLeadingZerosL src));
7585 
7586   ins_cost(INSN_COST);
7587   format %{ "clz   $dst, $src" %}
7588   ins_encode %{
7589     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7590   %}
7591 
7592   ins_pipe(ialu_reg);
7593 %}
7594 
7595 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7596   match(Set dst (CountTrailingZerosI src));
7597 
7598   ins_cost(INSN_COST * 2);
7599   format %{ "rbitw  $dst, $src\n\t"
7600             "clzw   $dst, $dst" %}
7601   ins_encode %{
7602     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7603     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7604   %}
7605 
7606   ins_pipe(ialu_reg);
7607 %}
7608 
7609 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7610   match(Set dst (CountTrailingZerosL src));
7611 
7612   ins_cost(INSN_COST * 2);
7613   format %{ "rbit   $dst, $src\n\t"
7614             "clz    $dst, $dst" %}
7615   ins_encode %{
7616     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7617     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7618   %}
7619 
7620   ins_pipe(ialu_reg);
7621 %}
7622 
7623 //---------- Population Count Instructions -------------------------------------
7624 //
7625 
7626 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7627   predicate(UsePopCountInstruction);
7628   match(Set dst (PopCountI src));
7629   effect(TEMP tmp);
7630   ins_cost(INSN_COST * 13);
7631 
7632   format %{ "movw   $src, $src\n\t"
7633             "mov    $tmp, $src\t# vector (1D)\n\t"
7634             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7635             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7636             "mov    $dst, $tmp\t# vector (1D)" %}
7637   ins_encode %{
7638     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7639     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7640     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7641     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7642     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7643   %}
7644 
7645   ins_pipe(pipe_class_default);
7646 %}
7647 
7648 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7649   predicate(UsePopCountInstruction);
7650   match(Set dst (PopCountI (LoadI mem)));
7651   effect(TEMP tmp);
7652   ins_cost(INSN_COST * 13);
7653 
7654   format %{ "ldrs   $tmp, $mem\n\t"
7655             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7656             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7657             "mov    $dst, $tmp\t# vector (1D)" %}
7658   ins_encode %{
7659     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7660     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7661                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7662     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7663     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7664     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7665   %}
7666 
7667   ins_pipe(pipe_class_default);
7668 %}
7669 
7670 // Note: Long.bitCount(long) returns an int.
7671 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7672   predicate(UsePopCountInstruction);
7673   match(Set dst (PopCountL src));
7674   effect(TEMP tmp);
7675   ins_cost(INSN_COST * 13);
7676 
7677   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7678             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7679             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7680             "mov    $dst, $tmp\t# vector (1D)" %}
7681   ins_encode %{
7682     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7683     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7684     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7685     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7686   %}
7687 
7688   ins_pipe(pipe_class_default);
7689 %}
7690 
7691 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7692   predicate(UsePopCountInstruction);
7693   match(Set dst (PopCountL (LoadL mem)));
7694   effect(TEMP tmp);
7695   ins_cost(INSN_COST * 13);
7696 
7697   format %{ "ldrd   $tmp, $mem\n\t"
7698             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7699             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7700             "mov    $dst, $tmp\t# vector (1D)" %}
7701   ins_encode %{
7702     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7703     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7704                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7705     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7706     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7707     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7708   %}
7709 
7710   ins_pipe(pipe_class_default);
7711 %}
7712 
7713 // ============================================================================
7714 // MemBar Instruction
7715 
7716 instruct load_fence() %{
7717   match(LoadFence);
7718   ins_cost(VOLATILE_REF_COST);
7719 
7720   format %{ "load_fence" %}
7721 
7722   ins_encode %{
7723     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7724   %}
7725   ins_pipe(pipe_serial);
7726 %}
7727 
7728 instruct unnecessary_membar_acquire() %{
7729   predicate(unnecessary_acquire(n));
7730   match(MemBarAcquire);
7731   ins_cost(0);
7732 
7733   format %{ "membar_acquire (elided)" %}
7734 
7735   ins_encode %{
7736     __ block_comment("membar_acquire (elided)");
7737   %}
7738 
7739   ins_pipe(pipe_class_empty);
7740 %}
7741 
7742 instruct membar_acquire() %{
7743   match(MemBarAcquire);
7744   ins_cost(VOLATILE_REF_COST);
7745 
7746   format %{ "membar_acquire\n\t"
7747             "dmb ish" %}
7748 
7749   ins_encode %{
7750     __ block_comment("membar_acquire");
7751     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7752   %}
7753 
7754   ins_pipe(pipe_serial);
7755 %}
7756 
7757 
7758 instruct membar_acquire_lock() %{
7759   match(MemBarAcquireLock);
7760   ins_cost(VOLATILE_REF_COST);
7761 
7762   format %{ "membar_acquire_lock (elided)" %}
7763 
7764   ins_encode %{
7765     __ block_comment("membar_acquire_lock (elided)");
7766   %}
7767 
7768   ins_pipe(pipe_serial);
7769 %}
7770 
7771 instruct store_fence() %{
7772   match(StoreFence);
7773   ins_cost(VOLATILE_REF_COST);
7774 
7775   format %{ "store_fence" %}
7776 
7777   ins_encode %{
7778     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7779   %}
7780   ins_pipe(pipe_serial);
7781 %}
7782 
7783 instruct unnecessary_membar_release() %{
7784   predicate(unnecessary_release(n));
7785   match(MemBarRelease);
7786   ins_cost(0);
7787 
7788   format %{ "membar_release (elided)" %}
7789 
7790   ins_encode %{
7791     __ block_comment("membar_release (elided)");
7792   %}
7793   ins_pipe(pipe_serial);
7794 %}
7795 
7796 instruct membar_release() %{
7797   match(MemBarRelease);
7798   ins_cost(VOLATILE_REF_COST);
7799 
7800   format %{ "membar_release\n\t"
7801             "dmb ish" %}
7802 
7803   ins_encode %{
7804     __ block_comment("membar_release");
7805     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7806   %}
7807   ins_pipe(pipe_serial);
7808 %}
7809 
7810 instruct membar_storestore() %{
7811   match(MemBarStoreStore);
7812   ins_cost(VOLATILE_REF_COST);
7813 
7814   format %{ "MEMBAR-store-store" %}
7815 
7816   ins_encode %{
7817     __ membar(Assembler::StoreStore);
7818   %}
7819   ins_pipe(pipe_serial);
7820 %}
7821 
7822 instruct membar_release_lock() %{
7823   match(MemBarReleaseLock);
7824   ins_cost(VOLATILE_REF_COST);
7825 
7826   format %{ "membar_release_lock (elided)" %}
7827 
7828   ins_encode %{
7829     __ block_comment("membar_release_lock (elided)");
7830   %}
7831 
7832   ins_pipe(pipe_serial);
7833 %}
7834 
7835 instruct unnecessary_membar_volatile() %{
7836   predicate(unnecessary_volatile(n));
7837   match(MemBarVolatile);
7838   ins_cost(0);
7839 
7840   format %{ "membar_volatile (elided)" %}
7841 
7842   ins_encode %{
7843     __ block_comment("membar_volatile (elided)");
7844   %}
7845 
7846   ins_pipe(pipe_serial);
7847 %}
7848 
7849 instruct membar_volatile() %{
7850   match(MemBarVolatile);
7851   ins_cost(VOLATILE_REF_COST*100);
7852 
7853   format %{ "membar_volatile\n\t"
7854              "dmb ish"%}
7855 
7856   ins_encode %{
7857     __ block_comment("membar_volatile");
7858     __ membar(Assembler::StoreLoad);
7859   %}
7860 
7861   ins_pipe(pipe_serial);
7862 %}
7863 
7864 // ============================================================================
7865 // Cast/Convert Instructions
7866 
7867 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7868   match(Set dst (CastX2P src));
7869 
7870   ins_cost(INSN_COST);
7871   format %{ "mov $dst, $src\t# long -> ptr" %}
7872 
7873   ins_encode %{
7874     if ($dst$$reg != $src$$reg) {
7875       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7876     }
7877   %}
7878 
7879   ins_pipe(ialu_reg);
7880 %}
7881 
7882 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7883   match(Set dst (CastP2X src));
7884 
7885   ins_cost(INSN_COST);
7886   format %{ "mov $dst, $src\t# ptr -> long" %}
7887 
7888   ins_encode %{
7889     if ($dst$$reg != $src$$reg) {
7890       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7891     }
7892   %}
7893 
7894   ins_pipe(ialu_reg);
7895 %}
7896 
7897 // Convert oop into int for vectors alignment masking
7898 instruct convP2I(iRegINoSp dst, iRegP src) %{
7899   match(Set dst (ConvL2I (CastP2X src)));
7900 
7901   ins_cost(INSN_COST);
7902   format %{ "movw $dst, $src\t# ptr -> int" %}
7903   ins_encode %{
7904     __ movw($dst$$Register, $src$$Register);
7905   %}
7906 
7907   ins_pipe(ialu_reg);
7908 %}
7909 
7910 // Convert compressed oop into int for vectors alignment masking
7911 // in case of 32bit oops (heap < 4Gb).
7912 instruct convN2I(iRegINoSp dst, iRegN src)
7913 %{
7914   predicate(Universe::narrow_oop_shift() == 0);
7915   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7916 
7917   ins_cost(INSN_COST);
7918   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7919   ins_encode %{
7920     __ movw($dst$$Register, $src$$Register);
7921   %}
7922 
7923   ins_pipe(ialu_reg);
7924 %}
7925 
7926 
7927 // Convert oop pointer into compressed form
7928 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7929   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7930   match(Set dst (EncodeP src));
7931   effect(KILL cr);
7932   ins_cost(INSN_COST * 3);
7933   format %{ "encode_heap_oop $dst, $src" %}
7934   ins_encode %{
7935     Register s = $src$$Register;
7936     Register d = $dst$$Register;
7937     __ encode_heap_oop(d, s);
7938   %}
7939   ins_pipe(ialu_reg);
7940 %}
7941 
7942 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7943   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7944   match(Set dst (EncodeP src));
7945   ins_cost(INSN_COST * 3);
7946   format %{ "encode_heap_oop_not_null $dst, $src" %}
7947   ins_encode %{
7948     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7949   %}
7950   ins_pipe(ialu_reg);
7951 %}
7952 
7953 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7954   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7955             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7956   match(Set dst (DecodeN src));
7957   ins_cost(INSN_COST * 3);
7958   format %{ "decode_heap_oop $dst, $src" %}
7959   ins_encode %{
7960     Register s = $src$$Register;
7961     Register d = $dst$$Register;
7962     __ decode_heap_oop(d, s);
7963   %}
7964   ins_pipe(ialu_reg);
7965 %}
7966 
7967 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7968   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7969             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7970   match(Set dst (DecodeN src));
7971   ins_cost(INSN_COST * 3);
7972   format %{ "decode_heap_oop_not_null $dst, $src" %}
7973   ins_encode %{
7974     Register s = $src$$Register;
7975     Register d = $dst$$Register;
7976     __ decode_heap_oop_not_null(d, s);
7977   %}
7978   ins_pipe(ialu_reg);
7979 %}
7980 
7981 // n.b. AArch64 implementations of encode_klass_not_null and
7982 // decode_klass_not_null do not modify the flags register so, unlike
7983 // Intel, we don't kill CR as a side effect here
7984 
7985 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7986   match(Set dst (EncodePKlass src));
7987 
7988   ins_cost(INSN_COST * 3);
7989   format %{ "encode_klass_not_null $dst,$src" %}
7990 
7991   ins_encode %{
7992     Register src_reg = as_Register($src$$reg);
7993     Register dst_reg = as_Register($dst$$reg);
7994     __ encode_klass_not_null(dst_reg, src_reg);
7995   %}
7996 
7997    ins_pipe(ialu_reg);
7998 %}
7999 
8000 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8001   match(Set dst (DecodeNKlass src));
8002 
8003   ins_cost(INSN_COST * 3);
8004   format %{ "decode_klass_not_null $dst,$src" %}
8005 
8006   ins_encode %{
8007     Register src_reg = as_Register($src$$reg);
8008     Register dst_reg = as_Register($dst$$reg);
8009     if (dst_reg != src_reg) {
8010       __ decode_klass_not_null(dst_reg, src_reg);
8011     } else {
8012       __ decode_klass_not_null(dst_reg);
8013     }
8014   %}
8015 
8016    ins_pipe(ialu_reg);
8017 %}
8018 
8019 instruct checkCastPP(iRegPNoSp dst)
8020 %{
8021   match(Set dst (CheckCastPP dst));
8022 
8023   size(0);
8024   format %{ "# checkcastPP of $dst" %}
8025   ins_encode(/* empty encoding */);
8026   ins_pipe(pipe_class_empty);
8027 %}
8028 
8029 instruct castPP(iRegPNoSp dst)
8030 %{
8031   match(Set dst (CastPP dst));
8032 
8033   size(0);
8034   format %{ "# castPP of $dst" %}
8035   ins_encode(/* empty encoding */);
8036   ins_pipe(pipe_class_empty);
8037 %}
8038 
8039 instruct castII(iRegI dst)
8040 %{
8041   match(Set dst (CastII dst));
8042 
8043   size(0);
8044   format %{ "# castII of $dst" %}
8045   ins_encode(/* empty encoding */);
8046   ins_cost(0);
8047   ins_pipe(pipe_class_empty);
8048 %}
8049 
8050 // ============================================================================
8051 // Atomic operation instructions
8052 //
8053 // Intel and SPARC both implement Ideal Node LoadPLocked and
8054 // Store{PIL}Conditional instructions using a normal load for the
8055 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8056 //
8057 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8058 // pair to lock object allocations from Eden space when not using
8059 // TLABs.
8060 //
8061 // There does not appear to be a Load{IL}Locked Ideal Node and the
8062 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8063 // and to use StoreIConditional only for 32-bit and StoreLConditional
8064 // only for 64-bit.
8065 //
8066 // We implement LoadPLocked and StorePLocked instructions using,
8067 // respectively the AArch64 hw load-exclusive and store-conditional
8068 // instructions. Whereas we must implement each of
8069 // Store{IL}Conditional using a CAS which employs a pair of
8070 // instructions comprising a load-exclusive followed by a
8071 // store-conditional.
8072 
8073 
8074 // Locked-load (linked load) of the current heap-top
8075 // used when updating the eden heap top
8076 // implemented using ldaxr on AArch64
8077 
8078 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8079 %{
8080   match(Set dst (LoadPLocked mem));
8081 
8082   ins_cost(VOLATILE_REF_COST);
8083 
8084   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8085 
8086   ins_encode(aarch64_enc_ldaxr(dst, mem));
8087 
8088   ins_pipe(pipe_serial);
8089 %}
8090 
8091 // Conditional-store of the updated heap-top.
8092 // Used during allocation of the shared heap.
8093 // Sets flag (EQ) on success.
8094 // implemented using stlxr on AArch64.
8095 
8096 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8097 %{
8098   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8099 
8100   ins_cost(VOLATILE_REF_COST);
8101 
8102  // TODO
8103  // do we need to do a store-conditional release or can we just use a
8104  // plain store-conditional?
8105 
8106   format %{
8107     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8108     "cmpw rscratch1, zr\t# EQ on successful write"
8109   %}
8110 
8111   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8112 
8113   ins_pipe(pipe_serial);
8114 %}
8115 
8116 
8117 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8118 // when attempting to rebias a lock towards the current thread.  We
8119 // must use the acquire form of cmpxchg in order to guarantee acquire
8120 // semantics in this case.
8121 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8122 %{
8123   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8124 
8125   ins_cost(VOLATILE_REF_COST);
8126 
8127   format %{
8128     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8129     "cmpw rscratch1, zr\t# EQ on successful write"
8130   %}
8131 
8132   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8133 
8134   ins_pipe(pipe_slow);
8135 %}
8136 
8137 // storeIConditional also has acquire semantics, for no better reason
8138 // than matching storeLConditional.  At the time of writing this
8139 // comment storeIConditional was not used anywhere by AArch64.
8140 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8141 %{
8142   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8143 
8144   ins_cost(VOLATILE_REF_COST);
8145 
8146   format %{
8147     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8148     "cmpw rscratch1, zr\t# EQ on successful write"
8149   %}
8150 
8151   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8152 
8153   ins_pipe(pipe_slow);
8154 %}
8155 
8156 // standard CompareAndSwapX when we are using barriers
8157 // these have higher priority than the rules selected by a predicate
8158 
8159 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8160 // can't match them
8161 
8162 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8163 
8164   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8165   ins_cost(2 * VOLATILE_REF_COST);
8166 
8167   effect(KILL cr);
8168 
8169   format %{
8170     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8171     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8172   %}
8173 
8174   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8175             aarch64_enc_cset_eq(res));
8176 
8177   ins_pipe(pipe_slow);
8178 %}
8179 
8180 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8181 
8182   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8183   ins_cost(2 * VOLATILE_REF_COST);
8184 
8185   effect(KILL cr);
8186 
8187   format %{
8188     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8189     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8190   %}
8191 
8192   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8193             aarch64_enc_cset_eq(res));
8194 
8195   ins_pipe(pipe_slow);
8196 %}
8197 
8198 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8199 
8200   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8201   ins_cost(2 * VOLATILE_REF_COST);
8202 
8203   effect(KILL cr);
8204 
8205  format %{
8206     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8207     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8208  %}
8209 
8210  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8211             aarch64_enc_cset_eq(res));
8212 
8213   ins_pipe(pipe_slow);
8214 %}
8215 
8216 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8217 
8218   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8219   ins_cost(2 * VOLATILE_REF_COST);
8220 
8221   effect(KILL cr);
8222 
8223  format %{
8224     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8225     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8226  %}
8227 
8228  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8229             aarch64_enc_cset_eq(res));
8230 
8231   ins_pipe(pipe_slow);
8232 %}
8233 
8234 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8235 
8236   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8237   ins_cost(2 * VOLATILE_REF_COST);
8238 
8239   effect(KILL cr);
8240 
8241  format %{
8242     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8243     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8244  %}
8245 
8246  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8247             aarch64_enc_cset_eq(res));
8248 
8249   ins_pipe(pipe_slow);
8250 %}
8251 
8252 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8253 
8254   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8255   ins_cost(2 * VOLATILE_REF_COST);
8256 
8257   effect(KILL cr);
8258 
8259  format %{
8260     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8261     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8262  %}
8263 
8264  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8265             aarch64_enc_cset_eq(res));
8266 
8267   ins_pipe(pipe_slow);
8268 %}
8269 
8270 // alternative CompareAndSwapX when we are eliding barriers
8271 
8272 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8273 
8274   predicate(needs_acquiring_load_exclusive(n));
8275   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8276   ins_cost(VOLATILE_REF_COST);
8277 
8278   effect(KILL cr);
8279 
8280   format %{
8281     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8282     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8283   %}
8284 
8285   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8286             aarch64_enc_cset_eq(res));
8287 
8288   ins_pipe(pipe_slow);
8289 %}
8290 
8291 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8292 
8293   predicate(needs_acquiring_load_exclusive(n));
8294   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8295   ins_cost(VOLATILE_REF_COST);
8296 
8297   effect(KILL cr);
8298 
8299   format %{
8300     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8301     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8302   %}
8303 
8304   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8305             aarch64_enc_cset_eq(res));
8306 
8307   ins_pipe(pipe_slow);
8308 %}
8309 
8310 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8311 
8312   predicate(needs_acquiring_load_exclusive(n));
8313   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8314   ins_cost(VOLATILE_REF_COST);
8315 
8316   effect(KILL cr);
8317 
8318  format %{
8319     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8320     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8321  %}
8322 
8323  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8324             aarch64_enc_cset_eq(res));
8325 
8326   ins_pipe(pipe_slow);
8327 %}
8328 
8329 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8330 
8331   predicate(needs_acquiring_load_exclusive(n));
8332   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8333   ins_cost(VOLATILE_REF_COST);
8334 
8335   effect(KILL cr);
8336 
8337  format %{
8338     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8339     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8340  %}
8341 
8342  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8343             aarch64_enc_cset_eq(res));
8344 
8345   ins_pipe(pipe_slow);
8346 %}
8347 
8348 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8349 
8350   predicate(needs_acquiring_load_exclusive(n));
8351   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8352   ins_cost(VOLATILE_REF_COST);
8353 
8354   effect(KILL cr);
8355 
8356  format %{
8357     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8358     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8359  %}
8360 
8361  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8362             aarch64_enc_cset_eq(res));
8363 
8364   ins_pipe(pipe_slow);
8365 %}
8366 
8367 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8368 
8369   predicate(needs_acquiring_load_exclusive(n));
8370   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8371   ins_cost(VOLATILE_REF_COST);
8372 
8373   effect(KILL cr);
8374 
8375  format %{
8376     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8377     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8378  %}
8379 
8380  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8381             aarch64_enc_cset_eq(res));
8382 
8383   ins_pipe(pipe_slow);
8384 %}
8385 
8386 
8387 // ---------------------------------------------------------------------
8388 
8389 
8390 // BEGIN This section of the file is automatically generated. Do not edit --------------
8391 
8392 // Sundry CAS operations.  Note that release is always true,
8393 // regardless of the memory ordering of the CAS.  This is because we
8394 // need the volatile case to be sequentially consistent but there is
8395 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8396 // can't check the type of memory ordering here, so we always emit a
8397 // STLXR.
8398 
8399 // This section is generated from aarch64_ad_cas.m4
8400 
8401 
8402 
8403 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8404   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8405   ins_cost(2 * VOLATILE_REF_COST);
8406   effect(TEMP_DEF res, KILL cr);
8407   format %{
8408     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8409   %}
8410   ins_encode %{
8411     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8412                Assembler::byte, /*acquire*/ false, /*release*/ true,
8413                /*weak*/ false, $res$$Register);
8414     __ sxtbw($res$$Register, $res$$Register);
8415   %}
8416   ins_pipe(pipe_slow);
8417 %}
8418 
8419 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8420   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8421   ins_cost(2 * VOLATILE_REF_COST);
8422   effect(TEMP_DEF res, KILL cr);
8423   format %{
8424     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8425   %}
8426   ins_encode %{
8427     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8428                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8429                /*weak*/ false, $res$$Register);
8430     __ sxthw($res$$Register, $res$$Register);
8431   %}
8432   ins_pipe(pipe_slow);
8433 %}
8434 
8435 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8436   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8437   ins_cost(2 * VOLATILE_REF_COST);
8438   effect(TEMP_DEF res, KILL cr);
8439   format %{
8440     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8441   %}
8442   ins_encode %{
8443     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8444                Assembler::word, /*acquire*/ false, /*release*/ true,
8445                /*weak*/ false, $res$$Register);
8446   %}
8447   ins_pipe(pipe_slow);
8448 %}
8449 
8450 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8451   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8452   ins_cost(2 * VOLATILE_REF_COST);
8453   effect(TEMP_DEF res, KILL cr);
8454   format %{
8455     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8456   %}
8457   ins_encode %{
8458     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8459                Assembler::xword, /*acquire*/ false, /*release*/ true,
8460                /*weak*/ false, $res$$Register);
8461   %}
8462   ins_pipe(pipe_slow);
8463 %}
8464 
8465 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8466   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8467   ins_cost(2 * VOLATILE_REF_COST);
8468   effect(TEMP_DEF res, KILL cr);
8469   format %{
8470     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8471   %}
8472   ins_encode %{
8473     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8474                Assembler::word, /*acquire*/ false, /*release*/ true,
8475                /*weak*/ false, $res$$Register);
8476   %}
8477   ins_pipe(pipe_slow);
8478 %}
8479 
8480 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8481   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8482   ins_cost(2 * VOLATILE_REF_COST);
8483   effect(TEMP_DEF res, KILL cr);
8484   format %{
8485     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8486   %}
8487   ins_encode %{
8488     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8489                Assembler::xword, /*acquire*/ false, /*release*/ true,
8490                /*weak*/ false, $res$$Register);
8491   %}
8492   ins_pipe(pipe_slow);
8493 %}
8494 
8495 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8496   predicate(needs_acquiring_load_exclusive(n));
8497   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8498   ins_cost(VOLATILE_REF_COST);
8499   effect(TEMP_DEF res, KILL cr);
8500   format %{
8501     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8502   %}
8503   ins_encode %{
8504     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8505                Assembler::byte, /*acquire*/ true, /*release*/ true,
8506                /*weak*/ false, $res$$Register);
8507     __ sxtbw($res$$Register, $res$$Register);
8508   %}
8509   ins_pipe(pipe_slow);
8510 %}
8511 
8512 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8513   predicate(needs_acquiring_load_exclusive(n));
8514   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8515   ins_cost(VOLATILE_REF_COST);
8516   effect(TEMP_DEF res, KILL cr);
8517   format %{
8518     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8519   %}
8520   ins_encode %{
8521     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8522                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8523                /*weak*/ false, $res$$Register);
8524     __ sxthw($res$$Register, $res$$Register);
8525   %}
8526   ins_pipe(pipe_slow);
8527 %}
8528 
8529 
8530 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8531   predicate(needs_acquiring_load_exclusive(n));
8532   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8533   ins_cost(VOLATILE_REF_COST);
8534   effect(TEMP_DEF res, KILL cr);
8535   format %{
8536     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8537   %}
8538   ins_encode %{
8539     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8540                Assembler::word, /*acquire*/ true, /*release*/ true,
8541                /*weak*/ false, $res$$Register);
8542   %}
8543   ins_pipe(pipe_slow);
8544 %}
8545 
8546 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8547   predicate(needs_acquiring_load_exclusive(n));
8548   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8549   ins_cost(VOLATILE_REF_COST);
8550   effect(TEMP_DEF res, KILL cr);
8551   format %{
8552     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8553   %}
8554   ins_encode %{
8555     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8556                Assembler::xword, /*acquire*/ true, /*release*/ true,
8557                /*weak*/ false, $res$$Register);
8558   %}
8559   ins_pipe(pipe_slow);
8560 %}
8561 
8562 
8563 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8564   predicate(needs_acquiring_load_exclusive(n));
8565   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8566   ins_cost(VOLATILE_REF_COST);
8567   effect(TEMP_DEF res, KILL cr);
8568   format %{
8569     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8570   %}
8571   ins_encode %{
8572     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8573                Assembler::word, /*acquire*/ true, /*release*/ true,
8574                /*weak*/ false, $res$$Register);
8575   %}
8576   ins_pipe(pipe_slow);
8577 %}
8578 
8579 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8580   predicate(needs_acquiring_load_exclusive(n));
8581   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8582   ins_cost(VOLATILE_REF_COST);
8583   effect(TEMP_DEF res, KILL cr);
8584   format %{
8585     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8586   %}
8587   ins_encode %{
8588     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8589                Assembler::xword, /*acquire*/ true, /*release*/ true,
8590                /*weak*/ false, $res$$Register);
8591   %}
8592   ins_pipe(pipe_slow);
8593 %}
8594 
8595 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8596   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8597   ins_cost(2 * VOLATILE_REF_COST);
8598   effect(KILL cr);
8599   format %{
8600     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8601     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8602   %}
8603   ins_encode %{
8604     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8605                Assembler::byte, /*acquire*/ false, /*release*/ true,
8606                /*weak*/ true, noreg);
8607     __ csetw($res$$Register, Assembler::EQ);
8608   %}
8609   ins_pipe(pipe_slow);
8610 %}
8611 
8612 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8613   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8614   ins_cost(2 * VOLATILE_REF_COST);
8615   effect(KILL cr);
8616   format %{
8617     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8618     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8619   %}
8620   ins_encode %{
8621     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8622                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8623                /*weak*/ true, noreg);
8624     __ csetw($res$$Register, Assembler::EQ);
8625   %}
8626   ins_pipe(pipe_slow);
8627 %}
8628 
8629 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8630   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8631   ins_cost(2 * VOLATILE_REF_COST);
8632   effect(KILL cr);
8633   format %{
8634     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8635     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8636   %}
8637   ins_encode %{
8638     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8639                Assembler::word, /*acquire*/ false, /*release*/ true,
8640                /*weak*/ true, noreg);
8641     __ csetw($res$$Register, Assembler::EQ);
8642   %}
8643   ins_pipe(pipe_slow);
8644 %}
8645 
8646 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8647   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8648   ins_cost(2 * VOLATILE_REF_COST);
8649   effect(KILL cr);
8650   format %{
8651     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8652     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8653   %}
8654   ins_encode %{
8655     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8656                Assembler::xword, /*acquire*/ false, /*release*/ true,
8657                /*weak*/ true, noreg);
8658     __ csetw($res$$Register, Assembler::EQ);
8659   %}
8660   ins_pipe(pipe_slow);
8661 %}
8662 
8663 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8664   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8665   ins_cost(2 * VOLATILE_REF_COST);
8666   effect(KILL cr);
8667   format %{
8668     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8669     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8670   %}
8671   ins_encode %{
8672     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8673                Assembler::word, /*acquire*/ false, /*release*/ true,
8674                /*weak*/ true, noreg);
8675     __ csetw($res$$Register, Assembler::EQ);
8676   %}
8677   ins_pipe(pipe_slow);
8678 %}
8679 
8680 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8681   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8682   ins_cost(2 * VOLATILE_REF_COST);
8683   effect(KILL cr);
8684   format %{
8685     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8686     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8687   %}
8688   ins_encode %{
8689     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8690                Assembler::xword, /*acquire*/ false, /*release*/ true,
8691                /*weak*/ true, noreg);
8692     __ csetw($res$$Register, Assembler::EQ);
8693   %}
8694   ins_pipe(pipe_slow);
8695 %}
8696 
8697 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8698   predicate(needs_acquiring_load_exclusive(n));
8699   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8700   ins_cost(VOLATILE_REF_COST);
8701   effect(KILL cr);
8702   format %{
8703     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8704     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8705   %}
8706   ins_encode %{
8707     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8708                Assembler::byte, /*acquire*/ true, /*release*/ true,
8709                /*weak*/ true, noreg);
8710     __ csetw($res$$Register, Assembler::EQ);
8711   %}
8712   ins_pipe(pipe_slow);
8713 %}
8714 
8715 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8716   predicate(needs_acquiring_load_exclusive(n));
8717   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8718   ins_cost(VOLATILE_REF_COST);
8719   effect(KILL cr);
8720   format %{
8721     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8722     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8723   %}
8724   ins_encode %{
8725     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8726                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8727                /*weak*/ true, noreg);
8728     __ csetw($res$$Register, Assembler::EQ);
8729   %}
8730   ins_pipe(pipe_slow);
8731 %}
8732 
8733 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8734   predicate(needs_acquiring_load_exclusive(n));
8735   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8736   ins_cost(VOLATILE_REF_COST);
8737   effect(KILL cr);
8738   format %{
8739     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8740     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8741   %}
8742   ins_encode %{
8743     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8744                Assembler::word, /*acquire*/ true, /*release*/ true,
8745                /*weak*/ true, noreg);
8746     __ csetw($res$$Register, Assembler::EQ);
8747   %}
8748   ins_pipe(pipe_slow);
8749 %}
8750 
8751 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8752   predicate(needs_acquiring_load_exclusive(n));
8753   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8754   ins_cost(VOLATILE_REF_COST);
8755   effect(KILL cr);
8756   format %{
8757     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8758     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8759   %}
8760   ins_encode %{
8761     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8762                Assembler::xword, /*acquire*/ true, /*release*/ true,
8763                /*weak*/ true, noreg);
8764     __ csetw($res$$Register, Assembler::EQ);
8765   %}
8766   ins_pipe(pipe_slow);
8767 %}
8768 
8769 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8770   predicate(needs_acquiring_load_exclusive(n));
8771   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8772   ins_cost(VOLATILE_REF_COST);
8773   effect(KILL cr);
8774   format %{
8775     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8776     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8777   %}
8778   ins_encode %{
8779     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8780                Assembler::word, /*acquire*/ true, /*release*/ true,
8781                /*weak*/ true, noreg);
8782     __ csetw($res$$Register, Assembler::EQ);
8783   %}
8784   ins_pipe(pipe_slow);
8785 %}
8786 
8787 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8788   predicate(needs_acquiring_load_exclusive(n));
8789   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8790   ins_cost(VOLATILE_REF_COST);
8791   effect(KILL cr);
8792   format %{
8793     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8794     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8795   %}
8796   ins_encode %{
8797     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8798                Assembler::xword, /*acquire*/ true, /*release*/ true,
8799                /*weak*/ true, noreg);
8800     __ csetw($res$$Register, Assembler::EQ);
8801   %}
8802   ins_pipe(pipe_slow);
8803 %}
8804 
8805 // END This section of the file is automatically generated. Do not edit --------------
8806 // ---------------------------------------------------------------------
8807 
8808 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
8809   match(Set prev (GetAndSetI mem newv));
8810   ins_cost(2 * VOLATILE_REF_COST);
8811   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8812   ins_encode %{
8813     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8814   %}
8815   ins_pipe(pipe_serial);
8816 %}
8817 
8818 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
8819   match(Set prev (GetAndSetL mem newv));
8820   ins_cost(2 * VOLATILE_REF_COST);
8821   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8822   ins_encode %{
8823     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8824   %}
8825   ins_pipe(pipe_serial);
8826 %}
8827 
8828 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
8829   match(Set prev (GetAndSetN mem newv));
8830   ins_cost(2 * VOLATILE_REF_COST);
8831   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8832   ins_encode %{
8833     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8834   %}
8835   ins_pipe(pipe_serial);
8836 %}
8837 
8838 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
8839   match(Set prev (GetAndSetP mem newv));
8840   ins_cost(2 * VOLATILE_REF_COST);
8841   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8842   ins_encode %{
8843     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8844   %}
8845   ins_pipe(pipe_serial);
8846 %}
8847 
8848 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
8849   predicate(needs_acquiring_load_exclusive(n));
8850   match(Set prev (GetAndSetI mem newv));
8851   ins_cost(VOLATILE_REF_COST);
8852   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
8853   ins_encode %{
8854     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8855   %}
8856   ins_pipe(pipe_serial);
8857 %}
8858 
8859 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
8860   predicate(needs_acquiring_load_exclusive(n));
8861   match(Set prev (GetAndSetL mem newv));
8862   ins_cost(VOLATILE_REF_COST);
8863   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8864   ins_encode %{
8865     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8866   %}
8867   ins_pipe(pipe_serial);
8868 %}
8869 
8870 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
8871   predicate(needs_acquiring_load_exclusive(n));
8872   match(Set prev (GetAndSetN mem newv));
8873   ins_cost(VOLATILE_REF_COST);
8874   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
8875   ins_encode %{
8876     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8877   %}
8878   ins_pipe(pipe_serial);
8879 %}
8880 
8881 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
8882   predicate(needs_acquiring_load_exclusive(n));
8883   match(Set prev (GetAndSetP mem newv));
8884   ins_cost(VOLATILE_REF_COST);
8885   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8886   ins_encode %{
8887     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8888   %}
8889   ins_pipe(pipe_serial);
8890 %}
8891 
8892 
8893 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8894   match(Set newval (GetAndAddL mem incr));
8895   ins_cost(2 * VOLATILE_REF_COST + 1);
8896   format %{ "get_and_addL $newval, [$mem], $incr" %}
8897   ins_encode %{
8898     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8899   %}
8900   ins_pipe(pipe_serial);
8901 %}
8902 
8903 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8904   predicate(n->as_LoadStore()->result_not_used());
8905   match(Set dummy (GetAndAddL mem incr));
8906   ins_cost(2 * VOLATILE_REF_COST);
8907   format %{ "get_and_addL [$mem], $incr" %}
8908   ins_encode %{
8909     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8910   %}
8911   ins_pipe(pipe_serial);
8912 %}
8913 
8914 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8915   match(Set newval (GetAndAddL mem incr));
8916   ins_cost(2 * VOLATILE_REF_COST + 1);
8917   format %{ "get_and_addL $newval, [$mem], $incr" %}
8918   ins_encode %{
8919     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8920   %}
8921   ins_pipe(pipe_serial);
8922 %}
8923 
8924 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8925   predicate(n->as_LoadStore()->result_not_used());
8926   match(Set dummy (GetAndAddL mem incr));
8927   ins_cost(2 * VOLATILE_REF_COST);
8928   format %{ "get_and_addL [$mem], $incr" %}
8929   ins_encode %{
8930     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8931   %}
8932   ins_pipe(pipe_serial);
8933 %}
8934 
8935 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8936   match(Set newval (GetAndAddI mem incr));
8937   ins_cost(2 * VOLATILE_REF_COST + 1);
8938   format %{ "get_and_addI $newval, [$mem], $incr" %}
8939   ins_encode %{
8940     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8941   %}
8942   ins_pipe(pipe_serial);
8943 %}
8944 
8945 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8946   predicate(n->as_LoadStore()->result_not_used());
8947   match(Set dummy (GetAndAddI mem incr));
8948   ins_cost(2 * VOLATILE_REF_COST);
8949   format %{ "get_and_addI [$mem], $incr" %}
8950   ins_encode %{
8951     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8952   %}
8953   ins_pipe(pipe_serial);
8954 %}
8955 
8956 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8957   match(Set newval (GetAndAddI mem incr));
8958   ins_cost(2 * VOLATILE_REF_COST + 1);
8959   format %{ "get_and_addI $newval, [$mem], $incr" %}
8960   ins_encode %{
8961     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8962   %}
8963   ins_pipe(pipe_serial);
8964 %}
8965 
8966 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
8967   predicate(n->as_LoadStore()->result_not_used());
8968   match(Set dummy (GetAndAddI mem incr));
8969   ins_cost(2 * VOLATILE_REF_COST);
8970   format %{ "get_and_addI [$mem], $incr" %}
8971   ins_encode %{
8972     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
8973   %}
8974   ins_pipe(pipe_serial);
8975 %}
8976 
8977 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
8978   predicate(needs_acquiring_load_exclusive(n));
8979   match(Set newval (GetAndAddL mem incr));
8980   ins_cost(VOLATILE_REF_COST + 1);
8981   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
8982   ins_encode %{
8983     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
8984   %}
8985   ins_pipe(pipe_serial);
8986 %}
8987 
8988 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
8989   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8990   match(Set dummy (GetAndAddL mem incr));
8991   ins_cost(VOLATILE_REF_COST);
8992   format %{ "get_and_addL_acq [$mem], $incr" %}
8993   ins_encode %{
8994     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
8995   %}
8996   ins_pipe(pipe_serial);
8997 %}
8998 
8999 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9000   predicate(needs_acquiring_load_exclusive(n));
9001   match(Set newval (GetAndAddL mem incr));
9002   ins_cost(VOLATILE_REF_COST + 1);
9003   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9004   ins_encode %{
9005     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9006   %}
9007   ins_pipe(pipe_serial);
9008 %}
9009 
9010 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9011   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9012   match(Set dummy (GetAndAddL mem incr));
9013   ins_cost(VOLATILE_REF_COST);
9014   format %{ "get_and_addL_acq [$mem], $incr" %}
9015   ins_encode %{
9016     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9017   %}
9018   ins_pipe(pipe_serial);
9019 %}
9020 
9021 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9022   predicate(needs_acquiring_load_exclusive(n));
9023   match(Set newval (GetAndAddI mem incr));
9024   ins_cost(VOLATILE_REF_COST + 1);
9025   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9026   ins_encode %{
9027     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9028   %}
9029   ins_pipe(pipe_serial);
9030 %}
9031 
9032 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9033   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9034   match(Set dummy (GetAndAddI mem incr));
9035   ins_cost(VOLATILE_REF_COST);
9036   format %{ "get_and_addI_acq [$mem], $incr" %}
9037   ins_encode %{
9038     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9039   %}
9040   ins_pipe(pipe_serial);
9041 %}
9042 
9043 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9044   predicate(needs_acquiring_load_exclusive(n));
9045   match(Set newval (GetAndAddI mem incr));
9046   ins_cost(VOLATILE_REF_COST + 1);
9047   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9048   ins_encode %{
9049     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9050   %}
9051   ins_pipe(pipe_serial);
9052 %}
9053 
9054 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9055   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9056   match(Set dummy (GetAndAddI mem incr));
9057   ins_cost(VOLATILE_REF_COST);
9058   format %{ "get_and_addI_acq [$mem], $incr" %}
9059   ins_encode %{
9060     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9061   %}
9062   ins_pipe(pipe_serial);
9063 %}
9064 
9065 // Manifest a CmpL result in an integer register.
9066 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9067 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9068 %{
9069   match(Set dst (CmpL3 src1 src2));
9070   effect(KILL flags);
9071 
9072   ins_cost(INSN_COST * 6);
9073   format %{
9074       "cmp $src1, $src2"
9075       "csetw $dst, ne"
9076       "cnegw $dst, lt"
9077   %}
9078   // format %{ "CmpL3 $dst, $src1, $src2" %}
9079   ins_encode %{
9080     __ cmp($src1$$Register, $src2$$Register);
9081     __ csetw($dst$$Register, Assembler::NE);
9082     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9083   %}
9084 
9085   ins_pipe(pipe_class_default);
9086 %}
9087 
9088 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9089 %{
9090   match(Set dst (CmpL3 src1 src2));
9091   effect(KILL flags);
9092 
9093   ins_cost(INSN_COST * 6);
9094   format %{
9095       "cmp $src1, $src2"
9096       "csetw $dst, ne"
9097       "cnegw $dst, lt"
9098   %}
9099   ins_encode %{
9100     int32_t con = (int32_t)$src2$$constant;
9101      if (con < 0) {
9102       __ adds(zr, $src1$$Register, -con);
9103     } else {
9104       __ subs(zr, $src1$$Register, con);
9105     }
9106     __ csetw($dst$$Register, Assembler::NE);
9107     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9108   %}
9109 
9110   ins_pipe(pipe_class_default);
9111 %}
9112 
9113 // ============================================================================
9114 // Conditional Move Instructions
9115 
9116 // n.b. we have identical rules for both a signed compare op (cmpOp)
9117 // and an unsigned compare op (cmpOpU). it would be nice if we could
9118 // define an op class which merged both inputs and use it to type the
9119 // argument to a single rule. unfortunatelyt his fails because the
9120 // opclass does not live up to the COND_INTER interface of its
9121 // component operands. When the generic code tries to negate the
9122 // operand it ends up running the generci Machoper::negate method
9123 // which throws a ShouldNotHappen. So, we have to provide two flavours
9124 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9125 
9126 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9127   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9128 
9129   ins_cost(INSN_COST * 2);
9130   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9131 
9132   ins_encode %{
9133     __ cselw(as_Register($dst$$reg),
9134              as_Register($src2$$reg),
9135              as_Register($src1$$reg),
9136              (Assembler::Condition)$cmp$$cmpcode);
9137   %}
9138 
9139   ins_pipe(icond_reg_reg);
9140 %}
9141 
9142 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9143   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9144 
9145   ins_cost(INSN_COST * 2);
9146   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9147 
9148   ins_encode %{
9149     __ cselw(as_Register($dst$$reg),
9150              as_Register($src2$$reg),
9151              as_Register($src1$$reg),
9152              (Assembler::Condition)$cmp$$cmpcode);
9153   %}
9154 
9155   ins_pipe(icond_reg_reg);
9156 %}
9157 
9158 // special cases where one arg is zero
9159 
9160 // n.b. this is selected in preference to the rule above because it
9161 // avoids loading constant 0 into a source register
9162 
9163 // TODO
9164 // we ought only to be able to cull one of these variants as the ideal
9165 // transforms ought always to order the zero consistently (to left/right?)
9166 
9167 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9168   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9169 
9170   ins_cost(INSN_COST * 2);
9171   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9172 
9173   ins_encode %{
9174     __ cselw(as_Register($dst$$reg),
9175              as_Register($src$$reg),
9176              zr,
9177              (Assembler::Condition)$cmp$$cmpcode);
9178   %}
9179 
9180   ins_pipe(icond_reg);
9181 %}
9182 
9183 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9184   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9185 
9186   ins_cost(INSN_COST * 2);
9187   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9188 
9189   ins_encode %{
9190     __ cselw(as_Register($dst$$reg),
9191              as_Register($src$$reg),
9192              zr,
9193              (Assembler::Condition)$cmp$$cmpcode);
9194   %}
9195 
9196   ins_pipe(icond_reg);
9197 %}
9198 
9199 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9200   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9201 
9202   ins_cost(INSN_COST * 2);
9203   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9204 
9205   ins_encode %{
9206     __ cselw(as_Register($dst$$reg),
9207              zr,
9208              as_Register($src$$reg),
9209              (Assembler::Condition)$cmp$$cmpcode);
9210   %}
9211 
9212   ins_pipe(icond_reg);
9213 %}
9214 
9215 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9216   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9217 
9218   ins_cost(INSN_COST * 2);
9219   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9220 
9221   ins_encode %{
9222     __ cselw(as_Register($dst$$reg),
9223              zr,
9224              as_Register($src$$reg),
9225              (Assembler::Condition)$cmp$$cmpcode);
9226   %}
9227 
9228   ins_pipe(icond_reg);
9229 %}
9230 
9231 // special case for creating a boolean 0 or 1
9232 
9233 // n.b. this is selected in preference to the rule above because it
9234 // avoids loading constants 0 and 1 into a source register
9235 
9236 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9237   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9238 
9239   ins_cost(INSN_COST * 2);
9240   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9241 
9242   ins_encode %{
9243     // equivalently
9244     // cset(as_Register($dst$$reg),
9245     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9246     __ csincw(as_Register($dst$$reg),
9247              zr,
9248              zr,
9249              (Assembler::Condition)$cmp$$cmpcode);
9250   %}
9251 
9252   ins_pipe(icond_none);
9253 %}
9254 
9255 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9256   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9257 
9258   ins_cost(INSN_COST * 2);
9259   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9260 
9261   ins_encode %{
9262     // equivalently
9263     // cset(as_Register($dst$$reg),
9264     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9265     __ csincw(as_Register($dst$$reg),
9266              zr,
9267              zr,
9268              (Assembler::Condition)$cmp$$cmpcode);
9269   %}
9270 
9271   ins_pipe(icond_none);
9272 %}
9273 
9274 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9275   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9276 
9277   ins_cost(INSN_COST * 2);
9278   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9279 
9280   ins_encode %{
9281     __ csel(as_Register($dst$$reg),
9282             as_Register($src2$$reg),
9283             as_Register($src1$$reg),
9284             (Assembler::Condition)$cmp$$cmpcode);
9285   %}
9286 
9287   ins_pipe(icond_reg_reg);
9288 %}
9289 
9290 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9291   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9292 
9293   ins_cost(INSN_COST * 2);
9294   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9295 
9296   ins_encode %{
9297     __ csel(as_Register($dst$$reg),
9298             as_Register($src2$$reg),
9299             as_Register($src1$$reg),
9300             (Assembler::Condition)$cmp$$cmpcode);
9301   %}
9302 
9303   ins_pipe(icond_reg_reg);
9304 %}
9305 
9306 // special cases where one arg is zero
9307 
9308 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9309   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9310 
9311   ins_cost(INSN_COST * 2);
9312   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9313 
9314   ins_encode %{
9315     __ csel(as_Register($dst$$reg),
9316             zr,
9317             as_Register($src$$reg),
9318             (Assembler::Condition)$cmp$$cmpcode);
9319   %}
9320 
9321   ins_pipe(icond_reg);
9322 %}
9323 
9324 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9325   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9326 
9327   ins_cost(INSN_COST * 2);
9328   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9329 
9330   ins_encode %{
9331     __ csel(as_Register($dst$$reg),
9332             zr,
9333             as_Register($src$$reg),
9334             (Assembler::Condition)$cmp$$cmpcode);
9335   %}
9336 
9337   ins_pipe(icond_reg);
9338 %}
9339 
9340 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9341   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9342 
9343   ins_cost(INSN_COST * 2);
9344   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9345 
9346   ins_encode %{
9347     __ csel(as_Register($dst$$reg),
9348             as_Register($src$$reg),
9349             zr,
9350             (Assembler::Condition)$cmp$$cmpcode);
9351   %}
9352 
9353   ins_pipe(icond_reg);
9354 %}
9355 
9356 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9357   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9358 
9359   ins_cost(INSN_COST * 2);
9360   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9361 
9362   ins_encode %{
9363     __ csel(as_Register($dst$$reg),
9364             as_Register($src$$reg),
9365             zr,
9366             (Assembler::Condition)$cmp$$cmpcode);
9367   %}
9368 
9369   ins_pipe(icond_reg);
9370 %}
9371 
9372 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9373   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9374 
9375   ins_cost(INSN_COST * 2);
9376   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9377 
9378   ins_encode %{
9379     __ csel(as_Register($dst$$reg),
9380             as_Register($src2$$reg),
9381             as_Register($src1$$reg),
9382             (Assembler::Condition)$cmp$$cmpcode);
9383   %}
9384 
9385   ins_pipe(icond_reg_reg);
9386 %}
9387 
9388 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9389   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9390 
9391   ins_cost(INSN_COST * 2);
9392   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9393 
9394   ins_encode %{
9395     __ csel(as_Register($dst$$reg),
9396             as_Register($src2$$reg),
9397             as_Register($src1$$reg),
9398             (Assembler::Condition)$cmp$$cmpcode);
9399   %}
9400 
9401   ins_pipe(icond_reg_reg);
9402 %}
9403 
9404 // special cases where one arg is zero
9405 
9406 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9407   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9408 
9409   ins_cost(INSN_COST * 2);
9410   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9411 
9412   ins_encode %{
9413     __ csel(as_Register($dst$$reg),
9414             zr,
9415             as_Register($src$$reg),
9416             (Assembler::Condition)$cmp$$cmpcode);
9417   %}
9418 
9419   ins_pipe(icond_reg);
9420 %}
9421 
9422 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9423   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9424 
9425   ins_cost(INSN_COST * 2);
9426   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9427 
9428   ins_encode %{
9429     __ csel(as_Register($dst$$reg),
9430             zr,
9431             as_Register($src$$reg),
9432             (Assembler::Condition)$cmp$$cmpcode);
9433   %}
9434 
9435   ins_pipe(icond_reg);
9436 %}
9437 
9438 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9439   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9440 
9441   ins_cost(INSN_COST * 2);
9442   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9443 
9444   ins_encode %{
9445     __ csel(as_Register($dst$$reg),
9446             as_Register($src$$reg),
9447             zr,
9448             (Assembler::Condition)$cmp$$cmpcode);
9449   %}
9450 
9451   ins_pipe(icond_reg);
9452 %}
9453 
9454 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9455   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9456 
9457   ins_cost(INSN_COST * 2);
9458   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9459 
9460   ins_encode %{
9461     __ csel(as_Register($dst$$reg),
9462             as_Register($src$$reg),
9463             zr,
9464             (Assembler::Condition)$cmp$$cmpcode);
9465   %}
9466 
9467   ins_pipe(icond_reg);
9468 %}
9469 
9470 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9471   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9472 
9473   ins_cost(INSN_COST * 2);
9474   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9475 
9476   ins_encode %{
9477     __ cselw(as_Register($dst$$reg),
9478              as_Register($src2$$reg),
9479              as_Register($src1$$reg),
9480              (Assembler::Condition)$cmp$$cmpcode);
9481   %}
9482 
9483   ins_pipe(icond_reg_reg);
9484 %}
9485 
9486 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9487   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9488 
9489   ins_cost(INSN_COST * 2);
9490   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9491 
9492   ins_encode %{
9493     __ cselw(as_Register($dst$$reg),
9494              as_Register($src2$$reg),
9495              as_Register($src1$$reg),
9496              (Assembler::Condition)$cmp$$cmpcode);
9497   %}
9498 
9499   ins_pipe(icond_reg_reg);
9500 %}
9501 
9502 // special cases where one arg is zero
9503 
9504 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9505   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9506 
9507   ins_cost(INSN_COST * 2);
9508   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9509 
9510   ins_encode %{
9511     __ cselw(as_Register($dst$$reg),
9512              zr,
9513              as_Register($src$$reg),
9514              (Assembler::Condition)$cmp$$cmpcode);
9515   %}
9516 
9517   ins_pipe(icond_reg);
9518 %}
9519 
9520 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9521   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9522 
9523   ins_cost(INSN_COST * 2);
9524   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9525 
9526   ins_encode %{
9527     __ cselw(as_Register($dst$$reg),
9528              zr,
9529              as_Register($src$$reg),
9530              (Assembler::Condition)$cmp$$cmpcode);
9531   %}
9532 
9533   ins_pipe(icond_reg);
9534 %}
9535 
9536 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9537   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9538 
9539   ins_cost(INSN_COST * 2);
9540   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9541 
9542   ins_encode %{
9543     __ cselw(as_Register($dst$$reg),
9544              as_Register($src$$reg),
9545              zr,
9546              (Assembler::Condition)$cmp$$cmpcode);
9547   %}
9548 
9549   ins_pipe(icond_reg);
9550 %}
9551 
9552 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9553   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9554 
9555   ins_cost(INSN_COST * 2);
9556   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9557 
9558   ins_encode %{
9559     __ cselw(as_Register($dst$$reg),
9560              as_Register($src$$reg),
9561              zr,
9562              (Assembler::Condition)$cmp$$cmpcode);
9563   %}
9564 
9565   ins_pipe(icond_reg);
9566 %}
9567 
9568 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9569 %{
9570   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9571 
9572   ins_cost(INSN_COST * 3);
9573 
9574   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9575   ins_encode %{
9576     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9577     __ fcsels(as_FloatRegister($dst$$reg),
9578               as_FloatRegister($src2$$reg),
9579               as_FloatRegister($src1$$reg),
9580               cond);
9581   %}
9582 
9583   ins_pipe(fp_cond_reg_reg_s);
9584 %}
9585 
9586 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9587 %{
9588   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9589 
9590   ins_cost(INSN_COST * 3);
9591 
9592   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9593   ins_encode %{
9594     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9595     __ fcsels(as_FloatRegister($dst$$reg),
9596               as_FloatRegister($src2$$reg),
9597               as_FloatRegister($src1$$reg),
9598               cond);
9599   %}
9600 
9601   ins_pipe(fp_cond_reg_reg_s);
9602 %}
9603 
9604 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9605 %{
9606   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9607 
9608   ins_cost(INSN_COST * 3);
9609 
9610   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9611   ins_encode %{
9612     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9613     __ fcseld(as_FloatRegister($dst$$reg),
9614               as_FloatRegister($src2$$reg),
9615               as_FloatRegister($src1$$reg),
9616               cond);
9617   %}
9618 
9619   ins_pipe(fp_cond_reg_reg_d);
9620 %}
9621 
9622 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9623 %{
9624   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9625 
9626   ins_cost(INSN_COST * 3);
9627 
9628   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9629   ins_encode %{
9630     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9631     __ fcseld(as_FloatRegister($dst$$reg),
9632               as_FloatRegister($src2$$reg),
9633               as_FloatRegister($src1$$reg),
9634               cond);
9635   %}
9636 
9637   ins_pipe(fp_cond_reg_reg_d);
9638 %}
9639 
9640 // ============================================================================
9641 // Arithmetic Instructions
9642 //
9643 
9644 // Integer Addition
9645 
9646 // TODO
9647 // these currently employ operations which do not set CR and hence are
9648 // not flagged as killing CR but we would like to isolate the cases
9649 // where we want to set flags from those where we don't. need to work
9650 // out how to do that.
9651 
9652 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9653   match(Set dst (AddI src1 src2));
9654 
9655   ins_cost(INSN_COST);
9656   format %{ "addw  $dst, $src1, $src2" %}
9657 
9658   ins_encode %{
9659     __ addw(as_Register($dst$$reg),
9660             as_Register($src1$$reg),
9661             as_Register($src2$$reg));
9662   %}
9663 
9664   ins_pipe(ialu_reg_reg);
9665 %}
9666 
9667 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9668   match(Set dst (AddI src1 src2));
9669 
9670   ins_cost(INSN_COST);
9671   format %{ "addw $dst, $src1, $src2" %}
9672 
9673   // use opcode to indicate that this is an add not a sub
9674   opcode(0x0);
9675 
9676   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9677 
9678   ins_pipe(ialu_reg_imm);
9679 %}
9680 
9681 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9682   match(Set dst (AddI (ConvL2I src1) src2));
9683 
9684   ins_cost(INSN_COST);
9685   format %{ "addw $dst, $src1, $src2" %}
9686 
9687   // use opcode to indicate that this is an add not a sub
9688   opcode(0x0);
9689 
9690   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9691 
9692   ins_pipe(ialu_reg_imm);
9693 %}
9694 
9695 // Pointer Addition
9696 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9697   match(Set dst (AddP src1 src2));
9698 
9699   ins_cost(INSN_COST);
9700   format %{ "add $dst, $src1, $src2\t# ptr" %}
9701 
9702   ins_encode %{
9703     __ add(as_Register($dst$$reg),
9704            as_Register($src1$$reg),
9705            as_Register($src2$$reg));
9706   %}
9707 
9708   ins_pipe(ialu_reg_reg);
9709 %}
9710 
9711 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9712   match(Set dst (AddP src1 (ConvI2L src2)));
9713 
9714   ins_cost(1.9 * INSN_COST);
9715   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9716 
9717   ins_encode %{
9718     __ add(as_Register($dst$$reg),
9719            as_Register($src1$$reg),
9720            as_Register($src2$$reg), ext::sxtw);
9721   %}
9722 
9723   ins_pipe(ialu_reg_reg);
9724 %}
9725 
9726 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9727   match(Set dst (AddP src1 (LShiftL src2 scale)));
9728 
9729   ins_cost(1.9 * INSN_COST);
9730   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9731 
9732   ins_encode %{
9733     __ lea(as_Register($dst$$reg),
9734            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9735                    Address::lsl($scale$$constant)));
9736   %}
9737 
9738   ins_pipe(ialu_reg_reg_shift);
9739 %}
9740 
9741 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9742   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9743 
9744   ins_cost(1.9 * INSN_COST);
9745   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9746 
9747   ins_encode %{
9748     __ lea(as_Register($dst$$reg),
9749            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9750                    Address::sxtw($scale$$constant)));
9751   %}
9752 
9753   ins_pipe(ialu_reg_reg_shift);
9754 %}
9755 
9756 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9757   match(Set dst (LShiftL (ConvI2L src) scale));
9758 
9759   ins_cost(INSN_COST);
9760   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9761 
9762   ins_encode %{
9763     __ sbfiz(as_Register($dst$$reg),
9764           as_Register($src$$reg),
9765           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9766   %}
9767 
9768   ins_pipe(ialu_reg_shift);
9769 %}
9770 
9771 // Pointer Immediate Addition
9772 // n.b. this needs to be more expensive than using an indirect memory
9773 // operand
9774 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9775   match(Set dst (AddP src1 src2));
9776 
9777   ins_cost(INSN_COST);
9778   format %{ "add $dst, $src1, $src2\t# ptr" %}
9779 
9780   // use opcode to indicate that this is an add not a sub
9781   opcode(0x0);
9782 
9783   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9784 
9785   ins_pipe(ialu_reg_imm);
9786 %}
9787 
9788 // Long Addition
9789 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9790 
9791   match(Set dst (AddL src1 src2));
9792 
9793   ins_cost(INSN_COST);
9794   format %{ "add  $dst, $src1, $src2" %}
9795 
9796   ins_encode %{
9797     __ add(as_Register($dst$$reg),
9798            as_Register($src1$$reg),
9799            as_Register($src2$$reg));
9800   %}
9801 
9802   ins_pipe(ialu_reg_reg);
9803 %}
9804 
9805 // No constant pool entries requiredLong Immediate Addition.
9806 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9807   match(Set dst (AddL src1 src2));
9808 
9809   ins_cost(INSN_COST);
9810   format %{ "add $dst, $src1, $src2" %}
9811 
9812   // use opcode to indicate that this is an add not a sub
9813   opcode(0x0);
9814 
9815   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9816 
9817   ins_pipe(ialu_reg_imm);
9818 %}
9819 
9820 // Integer Subtraction
9821 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9822   match(Set dst (SubI src1 src2));
9823 
9824   ins_cost(INSN_COST);
9825   format %{ "subw  $dst, $src1, $src2" %}
9826 
9827   ins_encode %{
9828     __ subw(as_Register($dst$$reg),
9829             as_Register($src1$$reg),
9830             as_Register($src2$$reg));
9831   %}
9832 
9833   ins_pipe(ialu_reg_reg);
9834 %}
9835 
9836 // Immediate Subtraction
9837 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9838   match(Set dst (SubI src1 src2));
9839 
9840   ins_cost(INSN_COST);
9841   format %{ "subw $dst, $src1, $src2" %}
9842 
9843   // use opcode to indicate that this is a sub not an add
9844   opcode(0x1);
9845 
9846   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9847 
9848   ins_pipe(ialu_reg_imm);
9849 %}
9850 
9851 // Long Subtraction
9852 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9853 
9854   match(Set dst (SubL src1 src2));
9855 
9856   ins_cost(INSN_COST);
9857   format %{ "sub  $dst, $src1, $src2" %}
9858 
9859   ins_encode %{
9860     __ sub(as_Register($dst$$reg),
9861            as_Register($src1$$reg),
9862            as_Register($src2$$reg));
9863   %}
9864 
9865   ins_pipe(ialu_reg_reg);
9866 %}
9867 
9868 // No constant pool entries requiredLong Immediate Subtraction.
9869 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9870   match(Set dst (SubL src1 src2));
9871 
9872   ins_cost(INSN_COST);
9873   format %{ "sub$dst, $src1, $src2" %}
9874 
9875   // use opcode to indicate that this is a sub not an add
9876   opcode(0x1);
9877 
9878   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9879 
9880   ins_pipe(ialu_reg_imm);
9881 %}
9882 
9883 // Integer Negation (special case for sub)
9884 
9885 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9886   match(Set dst (SubI zero src));
9887 
9888   ins_cost(INSN_COST);
9889   format %{ "negw $dst, $src\t# int" %}
9890 
9891   ins_encode %{
9892     __ negw(as_Register($dst$$reg),
9893             as_Register($src$$reg));
9894   %}
9895 
9896   ins_pipe(ialu_reg);
9897 %}
9898 
9899 // Long Negation
9900 
9901 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
9902   match(Set dst (SubL zero src));
9903 
9904   ins_cost(INSN_COST);
9905   format %{ "neg $dst, $src\t# long" %}
9906 
9907   ins_encode %{
9908     __ neg(as_Register($dst$$reg),
9909            as_Register($src$$reg));
9910   %}
9911 
9912   ins_pipe(ialu_reg);
9913 %}
9914 
9915 // Integer Multiply
9916 
9917 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9918   match(Set dst (MulI src1 src2));
9919 
9920   ins_cost(INSN_COST * 3);
9921   format %{ "mulw  $dst, $src1, $src2" %}
9922 
9923   ins_encode %{
9924     __ mulw(as_Register($dst$$reg),
9925             as_Register($src1$$reg),
9926             as_Register($src2$$reg));
9927   %}
9928 
9929   ins_pipe(imul_reg_reg);
9930 %}
9931 
9932 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9933   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9934 
9935   ins_cost(INSN_COST * 3);
9936   format %{ "smull  $dst, $src1, $src2" %}
9937 
9938   ins_encode %{
9939     __ smull(as_Register($dst$$reg),
9940              as_Register($src1$$reg),
9941              as_Register($src2$$reg));
9942   %}
9943 
9944   ins_pipe(imul_reg_reg);
9945 %}
9946 
9947 // Long Multiply
9948 
9949 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9950   match(Set dst (MulL src1 src2));
9951 
9952   ins_cost(INSN_COST * 5);
9953   format %{ "mul  $dst, $src1, $src2" %}
9954 
9955   ins_encode %{
9956     __ mul(as_Register($dst$$reg),
9957            as_Register($src1$$reg),
9958            as_Register($src2$$reg));
9959   %}
9960 
9961   ins_pipe(lmul_reg_reg);
9962 %}
9963 
9964 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9965 %{
9966   match(Set dst (MulHiL src1 src2));
9967 
9968   ins_cost(INSN_COST * 7);
9969   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9970 
9971   ins_encode %{
9972     __ smulh(as_Register($dst$$reg),
9973              as_Register($src1$$reg),
9974              as_Register($src2$$reg));
9975   %}
9976 
9977   ins_pipe(lmul_reg_reg);
9978 %}
9979 
9980 // Combined Integer Multiply & Add/Sub
9981 
9982 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9983   match(Set dst (AddI src3 (MulI src1 src2)));
9984 
9985   ins_cost(INSN_COST * 3);
9986   format %{ "madd  $dst, $src1, $src2, $src3" %}
9987 
9988   ins_encode %{
9989     __ maddw(as_Register($dst$$reg),
9990              as_Register($src1$$reg),
9991              as_Register($src2$$reg),
9992              as_Register($src3$$reg));
9993   %}
9994 
9995   ins_pipe(imac_reg_reg);
9996 %}
9997 
9998 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9999   match(Set dst (SubI src3 (MulI src1 src2)));
10000 
10001   ins_cost(INSN_COST * 3);
10002   format %{ "msub  $dst, $src1, $src2, $src3" %}
10003 
10004   ins_encode %{
10005     __ msubw(as_Register($dst$$reg),
10006              as_Register($src1$$reg),
10007              as_Register($src2$$reg),
10008              as_Register($src3$$reg));
10009   %}
10010 
10011   ins_pipe(imac_reg_reg);
10012 %}
10013 
10014 // Combined Integer Multiply & Neg
10015 
10016 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10017   match(Set dst (MulI (SubI zero src1) src2));
10018   match(Set dst (MulI src1 (SubI zero src2)));
10019 
10020   ins_cost(INSN_COST * 3);
10021   format %{ "mneg  $dst, $src1, $src2" %}
10022 
10023   ins_encode %{
10024     __ mnegw(as_Register($dst$$reg),
10025              as_Register($src1$$reg),
10026              as_Register($src2$$reg));
10027   %}
10028 
10029   ins_pipe(imac_reg_reg);
10030 %}
10031 
10032 // Combined Long Multiply & Add/Sub
10033 
10034 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10035   match(Set dst (AddL src3 (MulL src1 src2)));
10036 
10037   ins_cost(INSN_COST * 5);
10038   format %{ "madd  $dst, $src1, $src2, $src3" %}
10039 
10040   ins_encode %{
10041     __ madd(as_Register($dst$$reg),
10042             as_Register($src1$$reg),
10043             as_Register($src2$$reg),
10044             as_Register($src3$$reg));
10045   %}
10046 
10047   ins_pipe(lmac_reg_reg);
10048 %}
10049 
10050 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10051   match(Set dst (SubL src3 (MulL src1 src2)));
10052 
10053   ins_cost(INSN_COST * 5);
10054   format %{ "msub  $dst, $src1, $src2, $src3" %}
10055 
10056   ins_encode %{
10057     __ msub(as_Register($dst$$reg),
10058             as_Register($src1$$reg),
10059             as_Register($src2$$reg),
10060             as_Register($src3$$reg));
10061   %}
10062 
10063   ins_pipe(lmac_reg_reg);
10064 %}
10065 
10066 // Combined Long Multiply & Neg
10067 
10068 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10069   match(Set dst (MulL (SubL zero src1) src2));
10070   match(Set dst (MulL src1 (SubL zero src2)));
10071 
10072   ins_cost(INSN_COST * 5);
10073   format %{ "mneg  $dst, $src1, $src2" %}
10074 
10075   ins_encode %{
10076     __ mneg(as_Register($dst$$reg),
10077             as_Register($src1$$reg),
10078             as_Register($src2$$reg));
10079   %}
10080 
10081   ins_pipe(lmac_reg_reg);
10082 %}
10083 
10084 // Integer Divide
10085 
10086 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10087   match(Set dst (DivI src1 src2));
10088 
10089   ins_cost(INSN_COST * 19);
10090   format %{ "sdivw  $dst, $src1, $src2" %}
10091 
10092   ins_encode(aarch64_enc_divw(dst, src1, src2));
10093   ins_pipe(idiv_reg_reg);
10094 %}
10095 
10096 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10097   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10098   ins_cost(INSN_COST);
10099   format %{ "lsrw $dst, $src1, $div1" %}
10100   ins_encode %{
10101     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10102   %}
10103   ins_pipe(ialu_reg_shift);
10104 %}
10105 
10106 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10107   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10108   ins_cost(INSN_COST);
10109   format %{ "addw $dst, $src, LSR $div1" %}
10110 
10111   ins_encode %{
10112     __ addw(as_Register($dst$$reg),
10113               as_Register($src$$reg),
10114               as_Register($src$$reg),
10115               Assembler::LSR, 31);
10116   %}
10117   ins_pipe(ialu_reg);
10118 %}
10119 
10120 // Long Divide
10121 
10122 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10123   match(Set dst (DivL src1 src2));
10124 
10125   ins_cost(INSN_COST * 35);
10126   format %{ "sdiv   $dst, $src1, $src2" %}
10127 
10128   ins_encode(aarch64_enc_div(dst, src1, src2));
10129   ins_pipe(ldiv_reg_reg);
10130 %}
10131 
10132 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10133   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10134   ins_cost(INSN_COST);
10135   format %{ "lsr $dst, $src1, $div1" %}
10136   ins_encode %{
10137     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10138   %}
10139   ins_pipe(ialu_reg_shift);
10140 %}
10141 
10142 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10143   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10144   ins_cost(INSN_COST);
10145   format %{ "add $dst, $src, $div1" %}
10146 
10147   ins_encode %{
10148     __ add(as_Register($dst$$reg),
10149               as_Register($src$$reg),
10150               as_Register($src$$reg),
10151               Assembler::LSR, 63);
10152   %}
10153   ins_pipe(ialu_reg);
10154 %}
10155 
10156 // Integer Remainder
10157 
10158 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10159   match(Set dst (ModI src1 src2));
10160 
10161   ins_cost(INSN_COST * 22);
10162   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10163             "msubw($dst, rscratch1, $src2, $src1" %}
10164 
10165   ins_encode(aarch64_enc_modw(dst, src1, src2));
10166   ins_pipe(idiv_reg_reg);
10167 %}
10168 
10169 // Long Remainder
10170 
10171 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10172   match(Set dst (ModL src1 src2));
10173 
10174   ins_cost(INSN_COST * 38);
10175   format %{ "sdiv   rscratch1, $src1, $src2\n"
10176             "msub($dst, rscratch1, $src2, $src1" %}
10177 
10178   ins_encode(aarch64_enc_mod(dst, src1, src2));
10179   ins_pipe(ldiv_reg_reg);
10180 %}
10181 
10182 // Integer Shifts
10183 
10184 // Shift Left Register
10185 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10186   match(Set dst (LShiftI src1 src2));
10187 
10188   ins_cost(INSN_COST * 2);
10189   format %{ "lslvw  $dst, $src1, $src2" %}
10190 
10191   ins_encode %{
10192     __ lslvw(as_Register($dst$$reg),
10193              as_Register($src1$$reg),
10194              as_Register($src2$$reg));
10195   %}
10196 
10197   ins_pipe(ialu_reg_reg_vshift);
10198 %}
10199 
10200 // Shift Left Immediate
10201 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10202   match(Set dst (LShiftI src1 src2));
10203 
10204   ins_cost(INSN_COST);
10205   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10206 
10207   ins_encode %{
10208     __ lslw(as_Register($dst$$reg),
10209             as_Register($src1$$reg),
10210             $src2$$constant & 0x1f);
10211   %}
10212 
10213   ins_pipe(ialu_reg_shift);
10214 %}
10215 
10216 // Shift Right Logical Register
10217 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10218   match(Set dst (URShiftI src1 src2));
10219 
10220   ins_cost(INSN_COST * 2);
10221   format %{ "lsrvw  $dst, $src1, $src2" %}
10222 
10223   ins_encode %{
10224     __ lsrvw(as_Register($dst$$reg),
10225              as_Register($src1$$reg),
10226              as_Register($src2$$reg));
10227   %}
10228 
10229   ins_pipe(ialu_reg_reg_vshift);
10230 %}
10231 
10232 // Shift Right Logical Immediate
10233 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10234   match(Set dst (URShiftI src1 src2));
10235 
10236   ins_cost(INSN_COST);
10237   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10238 
10239   ins_encode %{
10240     __ lsrw(as_Register($dst$$reg),
10241             as_Register($src1$$reg),
10242             $src2$$constant & 0x1f);
10243   %}
10244 
10245   ins_pipe(ialu_reg_shift);
10246 %}
10247 
10248 // Shift Right Arithmetic Register
10249 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10250   match(Set dst (RShiftI src1 src2));
10251 
10252   ins_cost(INSN_COST * 2);
10253   format %{ "asrvw  $dst, $src1, $src2" %}
10254 
10255   ins_encode %{
10256     __ asrvw(as_Register($dst$$reg),
10257              as_Register($src1$$reg),
10258              as_Register($src2$$reg));
10259   %}
10260 
10261   ins_pipe(ialu_reg_reg_vshift);
10262 %}
10263 
10264 // Shift Right Arithmetic Immediate
10265 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10266   match(Set dst (RShiftI src1 src2));
10267 
10268   ins_cost(INSN_COST);
10269   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10270 
10271   ins_encode %{
10272     __ asrw(as_Register($dst$$reg),
10273             as_Register($src1$$reg),
10274             $src2$$constant & 0x1f);
10275   %}
10276 
10277   ins_pipe(ialu_reg_shift);
10278 %}
10279 
10280 // Combined Int Mask and Right Shift (using UBFM)
10281 // TODO
10282 
10283 // Long Shifts
10284 
10285 // Shift Left Register
10286 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10287   match(Set dst (LShiftL src1 src2));
10288 
10289   ins_cost(INSN_COST * 2);
10290   format %{ "lslv  $dst, $src1, $src2" %}
10291 
10292   ins_encode %{
10293     __ lslv(as_Register($dst$$reg),
10294             as_Register($src1$$reg),
10295             as_Register($src2$$reg));
10296   %}
10297 
10298   ins_pipe(ialu_reg_reg_vshift);
10299 %}
10300 
10301 // Shift Left Immediate
10302 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10303   match(Set dst (LShiftL src1 src2));
10304 
10305   ins_cost(INSN_COST);
10306   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10307 
10308   ins_encode %{
10309     __ lsl(as_Register($dst$$reg),
10310             as_Register($src1$$reg),
10311             $src2$$constant & 0x3f);
10312   %}
10313 
10314   ins_pipe(ialu_reg_shift);
10315 %}
10316 
10317 // Shift Right Logical Register
10318 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10319   match(Set dst (URShiftL src1 src2));
10320 
10321   ins_cost(INSN_COST * 2);
10322   format %{ "lsrv  $dst, $src1, $src2" %}
10323 
10324   ins_encode %{
10325     __ lsrv(as_Register($dst$$reg),
10326             as_Register($src1$$reg),
10327             as_Register($src2$$reg));
10328   %}
10329 
10330   ins_pipe(ialu_reg_reg_vshift);
10331 %}
10332 
10333 // Shift Right Logical Immediate
10334 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10335   match(Set dst (URShiftL src1 src2));
10336 
10337   ins_cost(INSN_COST);
10338   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10339 
10340   ins_encode %{
10341     __ lsr(as_Register($dst$$reg),
10342            as_Register($src1$$reg),
10343            $src2$$constant & 0x3f);
10344   %}
10345 
10346   ins_pipe(ialu_reg_shift);
10347 %}
10348 
10349 // A special-case pattern for card table stores.
10350 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10351   match(Set dst (URShiftL (CastP2X src1) src2));
10352 
10353   ins_cost(INSN_COST);
10354   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10355 
10356   ins_encode %{
10357     __ lsr(as_Register($dst$$reg),
10358            as_Register($src1$$reg),
10359            $src2$$constant & 0x3f);
10360   %}
10361 
10362   ins_pipe(ialu_reg_shift);
10363 %}
10364 
10365 // Shift Right Arithmetic Register
10366 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10367   match(Set dst (RShiftL src1 src2));
10368 
10369   ins_cost(INSN_COST * 2);
10370   format %{ "asrv  $dst, $src1, $src2" %}
10371 
10372   ins_encode %{
10373     __ asrv(as_Register($dst$$reg),
10374             as_Register($src1$$reg),
10375             as_Register($src2$$reg));
10376   %}
10377 
10378   ins_pipe(ialu_reg_reg_vshift);
10379 %}
10380 
10381 // Shift Right Arithmetic Immediate
10382 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10383   match(Set dst (RShiftL src1 src2));
10384 
10385   ins_cost(INSN_COST);
10386   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10387 
10388   ins_encode %{
10389     __ asr(as_Register($dst$$reg),
10390            as_Register($src1$$reg),
10391            $src2$$constant & 0x3f);
10392   %}
10393 
10394   ins_pipe(ialu_reg_shift);
10395 %}
10396 
10397 // BEGIN This section of the file is automatically generated. Do not edit --------------
10398 
10399 instruct regL_not_reg(iRegLNoSp dst,
10400                          iRegL src1, immL_M1 m1,
10401                          rFlagsReg cr) %{
10402   match(Set dst (XorL src1 m1));
10403   ins_cost(INSN_COST);
10404   format %{ "eon  $dst, $src1, zr" %}
10405 
10406   ins_encode %{
10407     __ eon(as_Register($dst$$reg),
10408               as_Register($src1$$reg),
10409               zr,
10410               Assembler::LSL, 0);
10411   %}
10412 
10413   ins_pipe(ialu_reg);
10414 %}
10415 instruct regI_not_reg(iRegINoSp dst,
10416                          iRegIorL2I src1, immI_M1 m1,
10417                          rFlagsReg cr) %{
10418   match(Set dst (XorI src1 m1));
10419   ins_cost(INSN_COST);
10420   format %{ "eonw  $dst, $src1, zr" %}
10421 
10422   ins_encode %{
10423     __ eonw(as_Register($dst$$reg),
10424               as_Register($src1$$reg),
10425               zr,
10426               Assembler::LSL, 0);
10427   %}
10428 
10429   ins_pipe(ialu_reg);
10430 %}
10431 
10432 instruct AndI_reg_not_reg(iRegINoSp dst,
10433                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10434                          rFlagsReg cr) %{
10435   match(Set dst (AndI src1 (XorI src2 m1)));
10436   ins_cost(INSN_COST);
10437   format %{ "bicw  $dst, $src1, $src2" %}
10438 
10439   ins_encode %{
10440     __ bicw(as_Register($dst$$reg),
10441               as_Register($src1$$reg),
10442               as_Register($src2$$reg),
10443               Assembler::LSL, 0);
10444   %}
10445 
10446   ins_pipe(ialu_reg_reg);
10447 %}
10448 
10449 instruct AndL_reg_not_reg(iRegLNoSp dst,
10450                          iRegL src1, iRegL src2, immL_M1 m1,
10451                          rFlagsReg cr) %{
10452   match(Set dst (AndL src1 (XorL src2 m1)));
10453   ins_cost(INSN_COST);
10454   format %{ "bic  $dst, $src1, $src2" %}
10455 
10456   ins_encode %{
10457     __ bic(as_Register($dst$$reg),
10458               as_Register($src1$$reg),
10459               as_Register($src2$$reg),
10460               Assembler::LSL, 0);
10461   %}
10462 
10463   ins_pipe(ialu_reg_reg);
10464 %}
10465 
10466 instruct OrI_reg_not_reg(iRegINoSp dst,
10467                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10468                          rFlagsReg cr) %{
10469   match(Set dst (OrI src1 (XorI src2 m1)));
10470   ins_cost(INSN_COST);
10471   format %{ "ornw  $dst, $src1, $src2" %}
10472 
10473   ins_encode %{
10474     __ ornw(as_Register($dst$$reg),
10475               as_Register($src1$$reg),
10476               as_Register($src2$$reg),
10477               Assembler::LSL, 0);
10478   %}
10479 
10480   ins_pipe(ialu_reg_reg);
10481 %}
10482 
10483 instruct OrL_reg_not_reg(iRegLNoSp dst,
10484                          iRegL src1, iRegL src2, immL_M1 m1,
10485                          rFlagsReg cr) %{
10486   match(Set dst (OrL src1 (XorL src2 m1)));
10487   ins_cost(INSN_COST);
10488   format %{ "orn  $dst, $src1, $src2" %}
10489 
10490   ins_encode %{
10491     __ orn(as_Register($dst$$reg),
10492               as_Register($src1$$reg),
10493               as_Register($src2$$reg),
10494               Assembler::LSL, 0);
10495   %}
10496 
10497   ins_pipe(ialu_reg_reg);
10498 %}
10499 
10500 instruct XorI_reg_not_reg(iRegINoSp dst,
10501                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10502                          rFlagsReg cr) %{
10503   match(Set dst (XorI m1 (XorI src2 src1)));
10504   ins_cost(INSN_COST);
10505   format %{ "eonw  $dst, $src1, $src2" %}
10506 
10507   ins_encode %{
10508     __ eonw(as_Register($dst$$reg),
10509               as_Register($src1$$reg),
10510               as_Register($src2$$reg),
10511               Assembler::LSL, 0);
10512   %}
10513 
10514   ins_pipe(ialu_reg_reg);
10515 %}
10516 
10517 instruct XorL_reg_not_reg(iRegLNoSp dst,
10518                          iRegL src1, iRegL src2, immL_M1 m1,
10519                          rFlagsReg cr) %{
10520   match(Set dst (XorL m1 (XorL src2 src1)));
10521   ins_cost(INSN_COST);
10522   format %{ "eon  $dst, $src1, $src2" %}
10523 
10524   ins_encode %{
10525     __ eon(as_Register($dst$$reg),
10526               as_Register($src1$$reg),
10527               as_Register($src2$$reg),
10528               Assembler::LSL, 0);
10529   %}
10530 
10531   ins_pipe(ialu_reg_reg);
10532 %}
10533 
10534 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10535                          iRegIorL2I src1, iRegIorL2I src2,
10536                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10537   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10538   ins_cost(1.9 * INSN_COST);
10539   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10540 
10541   ins_encode %{
10542     __ bicw(as_Register($dst$$reg),
10543               as_Register($src1$$reg),
10544               as_Register($src2$$reg),
10545               Assembler::LSR,
10546               $src3$$constant & 0x1f);
10547   %}
10548 
10549   ins_pipe(ialu_reg_reg_shift);
10550 %}
10551 
10552 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10553                          iRegL src1, iRegL src2,
10554                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10555   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10556   ins_cost(1.9 * INSN_COST);
10557   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10558 
10559   ins_encode %{
10560     __ bic(as_Register($dst$$reg),
10561               as_Register($src1$$reg),
10562               as_Register($src2$$reg),
10563               Assembler::LSR,
10564               $src3$$constant & 0x3f);
10565   %}
10566 
10567   ins_pipe(ialu_reg_reg_shift);
10568 %}
10569 
10570 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10571                          iRegIorL2I src1, iRegIorL2I src2,
10572                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10573   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10574   ins_cost(1.9 * INSN_COST);
10575   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10576 
10577   ins_encode %{
10578     __ bicw(as_Register($dst$$reg),
10579               as_Register($src1$$reg),
10580               as_Register($src2$$reg),
10581               Assembler::ASR,
10582               $src3$$constant & 0x1f);
10583   %}
10584 
10585   ins_pipe(ialu_reg_reg_shift);
10586 %}
10587 
10588 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10589                          iRegL src1, iRegL src2,
10590                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10591   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10592   ins_cost(1.9 * INSN_COST);
10593   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10594 
10595   ins_encode %{
10596     __ bic(as_Register($dst$$reg),
10597               as_Register($src1$$reg),
10598               as_Register($src2$$reg),
10599               Assembler::ASR,
10600               $src3$$constant & 0x3f);
10601   %}
10602 
10603   ins_pipe(ialu_reg_reg_shift);
10604 %}
10605 
10606 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10607                          iRegIorL2I src1, iRegIorL2I src2,
10608                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10609   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10610   ins_cost(1.9 * INSN_COST);
10611   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10612 
10613   ins_encode %{
10614     __ bicw(as_Register($dst$$reg),
10615               as_Register($src1$$reg),
10616               as_Register($src2$$reg),
10617               Assembler::LSL,
10618               $src3$$constant & 0x1f);
10619   %}
10620 
10621   ins_pipe(ialu_reg_reg_shift);
10622 %}
10623 
10624 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10625                          iRegL src1, iRegL src2,
10626                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10627   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10628   ins_cost(1.9 * INSN_COST);
10629   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10630 
10631   ins_encode %{
10632     __ bic(as_Register($dst$$reg),
10633               as_Register($src1$$reg),
10634               as_Register($src2$$reg),
10635               Assembler::LSL,
10636               $src3$$constant & 0x3f);
10637   %}
10638 
10639   ins_pipe(ialu_reg_reg_shift);
10640 %}
10641 
10642 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10643                          iRegIorL2I src1, iRegIorL2I src2,
10644                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10645   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10646   ins_cost(1.9 * INSN_COST);
10647   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10648 
10649   ins_encode %{
10650     __ eonw(as_Register($dst$$reg),
10651               as_Register($src1$$reg),
10652               as_Register($src2$$reg),
10653               Assembler::LSR,
10654               $src3$$constant & 0x1f);
10655   %}
10656 
10657   ins_pipe(ialu_reg_reg_shift);
10658 %}
10659 
10660 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10661                          iRegL src1, iRegL src2,
10662                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10663   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10664   ins_cost(1.9 * INSN_COST);
10665   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10666 
10667   ins_encode %{
10668     __ eon(as_Register($dst$$reg),
10669               as_Register($src1$$reg),
10670               as_Register($src2$$reg),
10671               Assembler::LSR,
10672               $src3$$constant & 0x3f);
10673   %}
10674 
10675   ins_pipe(ialu_reg_reg_shift);
10676 %}
10677 
10678 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10679                          iRegIorL2I src1, iRegIorL2I src2,
10680                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10681   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10682   ins_cost(1.9 * INSN_COST);
10683   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10684 
10685   ins_encode %{
10686     __ eonw(as_Register($dst$$reg),
10687               as_Register($src1$$reg),
10688               as_Register($src2$$reg),
10689               Assembler::ASR,
10690               $src3$$constant & 0x1f);
10691   %}
10692 
10693   ins_pipe(ialu_reg_reg_shift);
10694 %}
10695 
10696 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10697                          iRegL src1, iRegL src2,
10698                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10699   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10700   ins_cost(1.9 * INSN_COST);
10701   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10702 
10703   ins_encode %{
10704     __ eon(as_Register($dst$$reg),
10705               as_Register($src1$$reg),
10706               as_Register($src2$$reg),
10707               Assembler::ASR,
10708               $src3$$constant & 0x3f);
10709   %}
10710 
10711   ins_pipe(ialu_reg_reg_shift);
10712 %}
10713 
10714 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10715                          iRegIorL2I src1, iRegIorL2I src2,
10716                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10717   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10718   ins_cost(1.9 * INSN_COST);
10719   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10720 
10721   ins_encode %{
10722     __ eonw(as_Register($dst$$reg),
10723               as_Register($src1$$reg),
10724               as_Register($src2$$reg),
10725               Assembler::LSL,
10726               $src3$$constant & 0x1f);
10727   %}
10728 
10729   ins_pipe(ialu_reg_reg_shift);
10730 %}
10731 
10732 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10733                          iRegL src1, iRegL src2,
10734                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10735   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10736   ins_cost(1.9 * INSN_COST);
10737   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10738 
10739   ins_encode %{
10740     __ eon(as_Register($dst$$reg),
10741               as_Register($src1$$reg),
10742               as_Register($src2$$reg),
10743               Assembler::LSL,
10744               $src3$$constant & 0x3f);
10745   %}
10746 
10747   ins_pipe(ialu_reg_reg_shift);
10748 %}
10749 
10750 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10751                          iRegIorL2I src1, iRegIorL2I src2,
10752                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10753   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10754   ins_cost(1.9 * INSN_COST);
10755   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10756 
10757   ins_encode %{
10758     __ ornw(as_Register($dst$$reg),
10759               as_Register($src1$$reg),
10760               as_Register($src2$$reg),
10761               Assembler::LSR,
10762               $src3$$constant & 0x1f);
10763   %}
10764 
10765   ins_pipe(ialu_reg_reg_shift);
10766 %}
10767 
10768 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10769                          iRegL src1, iRegL src2,
10770                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10771   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10772   ins_cost(1.9 * INSN_COST);
10773   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10774 
10775   ins_encode %{
10776     __ orn(as_Register($dst$$reg),
10777               as_Register($src1$$reg),
10778               as_Register($src2$$reg),
10779               Assembler::LSR,
10780               $src3$$constant & 0x3f);
10781   %}
10782 
10783   ins_pipe(ialu_reg_reg_shift);
10784 %}
10785 
10786 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10787                          iRegIorL2I src1, iRegIorL2I src2,
10788                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10789   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10790   ins_cost(1.9 * INSN_COST);
10791   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10792 
10793   ins_encode %{
10794     __ ornw(as_Register($dst$$reg),
10795               as_Register($src1$$reg),
10796               as_Register($src2$$reg),
10797               Assembler::ASR,
10798               $src3$$constant & 0x1f);
10799   %}
10800 
10801   ins_pipe(ialu_reg_reg_shift);
10802 %}
10803 
10804 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10805                          iRegL src1, iRegL src2,
10806                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10807   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10808   ins_cost(1.9 * INSN_COST);
10809   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10810 
10811   ins_encode %{
10812     __ orn(as_Register($dst$$reg),
10813               as_Register($src1$$reg),
10814               as_Register($src2$$reg),
10815               Assembler::ASR,
10816               $src3$$constant & 0x3f);
10817   %}
10818 
10819   ins_pipe(ialu_reg_reg_shift);
10820 %}
10821 
10822 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10823                          iRegIorL2I src1, iRegIorL2I src2,
10824                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10825   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10826   ins_cost(1.9 * INSN_COST);
10827   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10828 
10829   ins_encode %{
10830     __ ornw(as_Register($dst$$reg),
10831               as_Register($src1$$reg),
10832               as_Register($src2$$reg),
10833               Assembler::LSL,
10834               $src3$$constant & 0x1f);
10835   %}
10836 
10837   ins_pipe(ialu_reg_reg_shift);
10838 %}
10839 
10840 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10841                          iRegL src1, iRegL src2,
10842                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10843   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10844   ins_cost(1.9 * INSN_COST);
10845   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10846 
10847   ins_encode %{
10848     __ orn(as_Register($dst$$reg),
10849               as_Register($src1$$reg),
10850               as_Register($src2$$reg),
10851               Assembler::LSL,
10852               $src3$$constant & 0x3f);
10853   %}
10854 
10855   ins_pipe(ialu_reg_reg_shift);
10856 %}
10857 
10858 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10859                          iRegIorL2I src1, iRegIorL2I src2,
10860                          immI src3, rFlagsReg cr) %{
10861   match(Set dst (AndI src1 (URShiftI src2 src3)));
10862 
10863   ins_cost(1.9 * INSN_COST);
10864   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10865 
10866   ins_encode %{
10867     __ andw(as_Register($dst$$reg),
10868               as_Register($src1$$reg),
10869               as_Register($src2$$reg),
10870               Assembler::LSR,
10871               $src3$$constant & 0x1f);
10872   %}
10873 
10874   ins_pipe(ialu_reg_reg_shift);
10875 %}
10876 
10877 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10878                          iRegL src1, iRegL src2,
10879                          immI src3, rFlagsReg cr) %{
10880   match(Set dst (AndL src1 (URShiftL src2 src3)));
10881 
10882   ins_cost(1.9 * INSN_COST);
10883   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10884 
10885   ins_encode %{
10886     __ andr(as_Register($dst$$reg),
10887               as_Register($src1$$reg),
10888               as_Register($src2$$reg),
10889               Assembler::LSR,
10890               $src3$$constant & 0x3f);
10891   %}
10892 
10893   ins_pipe(ialu_reg_reg_shift);
10894 %}
10895 
10896 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10897                          iRegIorL2I src1, iRegIorL2I src2,
10898                          immI src3, rFlagsReg cr) %{
10899   match(Set dst (AndI src1 (RShiftI src2 src3)));
10900 
10901   ins_cost(1.9 * INSN_COST);
10902   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10903 
10904   ins_encode %{
10905     __ andw(as_Register($dst$$reg),
10906               as_Register($src1$$reg),
10907               as_Register($src2$$reg),
10908               Assembler::ASR,
10909               $src3$$constant & 0x1f);
10910   %}
10911 
10912   ins_pipe(ialu_reg_reg_shift);
10913 %}
10914 
10915 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10916                          iRegL src1, iRegL src2,
10917                          immI src3, rFlagsReg cr) %{
10918   match(Set dst (AndL src1 (RShiftL src2 src3)));
10919 
10920   ins_cost(1.9 * INSN_COST);
10921   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10922 
10923   ins_encode %{
10924     __ andr(as_Register($dst$$reg),
10925               as_Register($src1$$reg),
10926               as_Register($src2$$reg),
10927               Assembler::ASR,
10928               $src3$$constant & 0x3f);
10929   %}
10930 
10931   ins_pipe(ialu_reg_reg_shift);
10932 %}
10933 
10934 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10935                          iRegIorL2I src1, iRegIorL2I src2,
10936                          immI src3, rFlagsReg cr) %{
10937   match(Set dst (AndI src1 (LShiftI src2 src3)));
10938 
10939   ins_cost(1.9 * INSN_COST);
10940   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10941 
10942   ins_encode %{
10943     __ andw(as_Register($dst$$reg),
10944               as_Register($src1$$reg),
10945               as_Register($src2$$reg),
10946               Assembler::LSL,
10947               $src3$$constant & 0x1f);
10948   %}
10949 
10950   ins_pipe(ialu_reg_reg_shift);
10951 %}
10952 
10953 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10954                          iRegL src1, iRegL src2,
10955                          immI src3, rFlagsReg cr) %{
10956   match(Set dst (AndL src1 (LShiftL src2 src3)));
10957 
10958   ins_cost(1.9 * INSN_COST);
10959   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10960 
10961   ins_encode %{
10962     __ andr(as_Register($dst$$reg),
10963               as_Register($src1$$reg),
10964               as_Register($src2$$reg),
10965               Assembler::LSL,
10966               $src3$$constant & 0x3f);
10967   %}
10968 
10969   ins_pipe(ialu_reg_reg_shift);
10970 %}
10971 
10972 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10973                          iRegIorL2I src1, iRegIorL2I src2,
10974                          immI src3, rFlagsReg cr) %{
10975   match(Set dst (XorI src1 (URShiftI src2 src3)));
10976 
10977   ins_cost(1.9 * INSN_COST);
10978   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10979 
10980   ins_encode %{
10981     __ eorw(as_Register($dst$$reg),
10982               as_Register($src1$$reg),
10983               as_Register($src2$$reg),
10984               Assembler::LSR,
10985               $src3$$constant & 0x1f);
10986   %}
10987 
10988   ins_pipe(ialu_reg_reg_shift);
10989 %}
10990 
10991 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10992                          iRegL src1, iRegL src2,
10993                          immI src3, rFlagsReg cr) %{
10994   match(Set dst (XorL src1 (URShiftL src2 src3)));
10995 
10996   ins_cost(1.9 * INSN_COST);
10997   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10998 
10999   ins_encode %{
11000     __ eor(as_Register($dst$$reg),
11001               as_Register($src1$$reg),
11002               as_Register($src2$$reg),
11003               Assembler::LSR,
11004               $src3$$constant & 0x3f);
11005   %}
11006 
11007   ins_pipe(ialu_reg_reg_shift);
11008 %}
11009 
11010 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11011                          iRegIorL2I src1, iRegIorL2I src2,
11012                          immI src3, rFlagsReg cr) %{
11013   match(Set dst (XorI src1 (RShiftI src2 src3)));
11014 
11015   ins_cost(1.9 * INSN_COST);
11016   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11017 
11018   ins_encode %{
11019     __ eorw(as_Register($dst$$reg),
11020               as_Register($src1$$reg),
11021               as_Register($src2$$reg),
11022               Assembler::ASR,
11023               $src3$$constant & 0x1f);
11024   %}
11025 
11026   ins_pipe(ialu_reg_reg_shift);
11027 %}
11028 
11029 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11030                          iRegL src1, iRegL src2,
11031                          immI src3, rFlagsReg cr) %{
11032   match(Set dst (XorL src1 (RShiftL src2 src3)));
11033 
11034   ins_cost(1.9 * INSN_COST);
11035   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11036 
11037   ins_encode %{
11038     __ eor(as_Register($dst$$reg),
11039               as_Register($src1$$reg),
11040               as_Register($src2$$reg),
11041               Assembler::ASR,
11042               $src3$$constant & 0x3f);
11043   %}
11044 
11045   ins_pipe(ialu_reg_reg_shift);
11046 %}
11047 
11048 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11049                          iRegIorL2I src1, iRegIorL2I src2,
11050                          immI src3, rFlagsReg cr) %{
11051   match(Set dst (XorI src1 (LShiftI src2 src3)));
11052 
11053   ins_cost(1.9 * INSN_COST);
11054   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11055 
11056   ins_encode %{
11057     __ eorw(as_Register($dst$$reg),
11058               as_Register($src1$$reg),
11059               as_Register($src2$$reg),
11060               Assembler::LSL,
11061               $src3$$constant & 0x1f);
11062   %}
11063 
11064   ins_pipe(ialu_reg_reg_shift);
11065 %}
11066 
11067 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11068                          iRegL src1, iRegL src2,
11069                          immI src3, rFlagsReg cr) %{
11070   match(Set dst (XorL src1 (LShiftL src2 src3)));
11071 
11072   ins_cost(1.9 * INSN_COST);
11073   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11074 
11075   ins_encode %{
11076     __ eor(as_Register($dst$$reg),
11077               as_Register($src1$$reg),
11078               as_Register($src2$$reg),
11079               Assembler::LSL,
11080               $src3$$constant & 0x3f);
11081   %}
11082 
11083   ins_pipe(ialu_reg_reg_shift);
11084 %}
11085 
11086 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11087                          iRegIorL2I src1, iRegIorL2I src2,
11088                          immI src3, rFlagsReg cr) %{
11089   match(Set dst (OrI src1 (URShiftI src2 src3)));
11090 
11091   ins_cost(1.9 * INSN_COST);
11092   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11093 
11094   ins_encode %{
11095     __ orrw(as_Register($dst$$reg),
11096               as_Register($src1$$reg),
11097               as_Register($src2$$reg),
11098               Assembler::LSR,
11099               $src3$$constant & 0x1f);
11100   %}
11101 
11102   ins_pipe(ialu_reg_reg_shift);
11103 %}
11104 
11105 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11106                          iRegL src1, iRegL src2,
11107                          immI src3, rFlagsReg cr) %{
11108   match(Set dst (OrL src1 (URShiftL src2 src3)));
11109 
11110   ins_cost(1.9 * INSN_COST);
11111   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11112 
11113   ins_encode %{
11114     __ orr(as_Register($dst$$reg),
11115               as_Register($src1$$reg),
11116               as_Register($src2$$reg),
11117               Assembler::LSR,
11118               $src3$$constant & 0x3f);
11119   %}
11120 
11121   ins_pipe(ialu_reg_reg_shift);
11122 %}
11123 
11124 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11125                          iRegIorL2I src1, iRegIorL2I src2,
11126                          immI src3, rFlagsReg cr) %{
11127   match(Set dst (OrI src1 (RShiftI src2 src3)));
11128 
11129   ins_cost(1.9 * INSN_COST);
11130   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11131 
11132   ins_encode %{
11133     __ orrw(as_Register($dst$$reg),
11134               as_Register($src1$$reg),
11135               as_Register($src2$$reg),
11136               Assembler::ASR,
11137               $src3$$constant & 0x1f);
11138   %}
11139 
11140   ins_pipe(ialu_reg_reg_shift);
11141 %}
11142 
11143 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11144                          iRegL src1, iRegL src2,
11145                          immI src3, rFlagsReg cr) %{
11146   match(Set dst (OrL src1 (RShiftL src2 src3)));
11147 
11148   ins_cost(1.9 * INSN_COST);
11149   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11150 
11151   ins_encode %{
11152     __ orr(as_Register($dst$$reg),
11153               as_Register($src1$$reg),
11154               as_Register($src2$$reg),
11155               Assembler::ASR,
11156               $src3$$constant & 0x3f);
11157   %}
11158 
11159   ins_pipe(ialu_reg_reg_shift);
11160 %}
11161 
11162 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11163                          iRegIorL2I src1, iRegIorL2I src2,
11164                          immI src3, rFlagsReg cr) %{
11165   match(Set dst (OrI src1 (LShiftI src2 src3)));
11166 
11167   ins_cost(1.9 * INSN_COST);
11168   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11169 
11170   ins_encode %{
11171     __ orrw(as_Register($dst$$reg),
11172               as_Register($src1$$reg),
11173               as_Register($src2$$reg),
11174               Assembler::LSL,
11175               $src3$$constant & 0x1f);
11176   %}
11177 
11178   ins_pipe(ialu_reg_reg_shift);
11179 %}
11180 
11181 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11182                          iRegL src1, iRegL src2,
11183                          immI src3, rFlagsReg cr) %{
11184   match(Set dst (OrL src1 (LShiftL src2 src3)));
11185 
11186   ins_cost(1.9 * INSN_COST);
11187   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11188 
11189   ins_encode %{
11190     __ orr(as_Register($dst$$reg),
11191               as_Register($src1$$reg),
11192               as_Register($src2$$reg),
11193               Assembler::LSL,
11194               $src3$$constant & 0x3f);
11195   %}
11196 
11197   ins_pipe(ialu_reg_reg_shift);
11198 %}
11199 
11200 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11201                          iRegIorL2I src1, iRegIorL2I src2,
11202                          immI src3, rFlagsReg cr) %{
11203   match(Set dst (AddI src1 (URShiftI src2 src3)));
11204 
11205   ins_cost(1.9 * INSN_COST);
11206   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11207 
11208   ins_encode %{
11209     __ addw(as_Register($dst$$reg),
11210               as_Register($src1$$reg),
11211               as_Register($src2$$reg),
11212               Assembler::LSR,
11213               $src3$$constant & 0x1f);
11214   %}
11215 
11216   ins_pipe(ialu_reg_reg_shift);
11217 %}
11218 
11219 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11220                          iRegL src1, iRegL src2,
11221                          immI src3, rFlagsReg cr) %{
11222   match(Set dst (AddL src1 (URShiftL src2 src3)));
11223 
11224   ins_cost(1.9 * INSN_COST);
11225   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11226 
11227   ins_encode %{
11228     __ add(as_Register($dst$$reg),
11229               as_Register($src1$$reg),
11230               as_Register($src2$$reg),
11231               Assembler::LSR,
11232               $src3$$constant & 0x3f);
11233   %}
11234 
11235   ins_pipe(ialu_reg_reg_shift);
11236 %}
11237 
11238 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11239                          iRegIorL2I src1, iRegIorL2I src2,
11240                          immI src3, rFlagsReg cr) %{
11241   match(Set dst (AddI src1 (RShiftI src2 src3)));
11242 
11243   ins_cost(1.9 * INSN_COST);
11244   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11245 
11246   ins_encode %{
11247     __ addw(as_Register($dst$$reg),
11248               as_Register($src1$$reg),
11249               as_Register($src2$$reg),
11250               Assembler::ASR,
11251               $src3$$constant & 0x1f);
11252   %}
11253 
11254   ins_pipe(ialu_reg_reg_shift);
11255 %}
11256 
11257 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11258                          iRegL src1, iRegL src2,
11259                          immI src3, rFlagsReg cr) %{
11260   match(Set dst (AddL src1 (RShiftL src2 src3)));
11261 
11262   ins_cost(1.9 * INSN_COST);
11263   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11264 
11265   ins_encode %{
11266     __ add(as_Register($dst$$reg),
11267               as_Register($src1$$reg),
11268               as_Register($src2$$reg),
11269               Assembler::ASR,
11270               $src3$$constant & 0x3f);
11271   %}
11272 
11273   ins_pipe(ialu_reg_reg_shift);
11274 %}
11275 
11276 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11277                          iRegIorL2I src1, iRegIorL2I src2,
11278                          immI src3, rFlagsReg cr) %{
11279   match(Set dst (AddI src1 (LShiftI src2 src3)));
11280 
11281   ins_cost(1.9 * INSN_COST);
11282   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11283 
11284   ins_encode %{
11285     __ addw(as_Register($dst$$reg),
11286               as_Register($src1$$reg),
11287               as_Register($src2$$reg),
11288               Assembler::LSL,
11289               $src3$$constant & 0x1f);
11290   %}
11291 
11292   ins_pipe(ialu_reg_reg_shift);
11293 %}
11294 
11295 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11296                          iRegL src1, iRegL src2,
11297                          immI src3, rFlagsReg cr) %{
11298   match(Set dst (AddL src1 (LShiftL src2 src3)));
11299 
11300   ins_cost(1.9 * INSN_COST);
11301   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11302 
11303   ins_encode %{
11304     __ add(as_Register($dst$$reg),
11305               as_Register($src1$$reg),
11306               as_Register($src2$$reg),
11307               Assembler::LSL,
11308               $src3$$constant & 0x3f);
11309   %}
11310 
11311   ins_pipe(ialu_reg_reg_shift);
11312 %}
11313 
11314 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11315                          iRegIorL2I src1, iRegIorL2I src2,
11316                          immI src3, rFlagsReg cr) %{
11317   match(Set dst (SubI src1 (URShiftI src2 src3)));
11318 
11319   ins_cost(1.9 * INSN_COST);
11320   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11321 
11322   ins_encode %{
11323     __ subw(as_Register($dst$$reg),
11324               as_Register($src1$$reg),
11325               as_Register($src2$$reg),
11326               Assembler::LSR,
11327               $src3$$constant & 0x1f);
11328   %}
11329 
11330   ins_pipe(ialu_reg_reg_shift);
11331 %}
11332 
11333 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11334                          iRegL src1, iRegL src2,
11335                          immI src3, rFlagsReg cr) %{
11336   match(Set dst (SubL src1 (URShiftL src2 src3)));
11337 
11338   ins_cost(1.9 * INSN_COST);
11339   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11340 
11341   ins_encode %{
11342     __ sub(as_Register($dst$$reg),
11343               as_Register($src1$$reg),
11344               as_Register($src2$$reg),
11345               Assembler::LSR,
11346               $src3$$constant & 0x3f);
11347   %}
11348 
11349   ins_pipe(ialu_reg_reg_shift);
11350 %}
11351 
11352 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11353                          iRegIorL2I src1, iRegIorL2I src2,
11354                          immI src3, rFlagsReg cr) %{
11355   match(Set dst (SubI src1 (RShiftI src2 src3)));
11356 
11357   ins_cost(1.9 * INSN_COST);
11358   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11359 
11360   ins_encode %{
11361     __ subw(as_Register($dst$$reg),
11362               as_Register($src1$$reg),
11363               as_Register($src2$$reg),
11364               Assembler::ASR,
11365               $src3$$constant & 0x1f);
11366   %}
11367 
11368   ins_pipe(ialu_reg_reg_shift);
11369 %}
11370 
11371 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11372                          iRegL src1, iRegL src2,
11373                          immI src3, rFlagsReg cr) %{
11374   match(Set dst (SubL src1 (RShiftL src2 src3)));
11375 
11376   ins_cost(1.9 * INSN_COST);
11377   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11378 
11379   ins_encode %{
11380     __ sub(as_Register($dst$$reg),
11381               as_Register($src1$$reg),
11382               as_Register($src2$$reg),
11383               Assembler::ASR,
11384               $src3$$constant & 0x3f);
11385   %}
11386 
11387   ins_pipe(ialu_reg_reg_shift);
11388 %}
11389 
11390 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11391                          iRegIorL2I src1, iRegIorL2I src2,
11392                          immI src3, rFlagsReg cr) %{
11393   match(Set dst (SubI src1 (LShiftI src2 src3)));
11394 
11395   ins_cost(1.9 * INSN_COST);
11396   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11397 
11398   ins_encode %{
11399     __ subw(as_Register($dst$$reg),
11400               as_Register($src1$$reg),
11401               as_Register($src2$$reg),
11402               Assembler::LSL,
11403               $src3$$constant & 0x1f);
11404   %}
11405 
11406   ins_pipe(ialu_reg_reg_shift);
11407 %}
11408 
11409 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11410                          iRegL src1, iRegL src2,
11411                          immI src3, rFlagsReg cr) %{
11412   match(Set dst (SubL src1 (LShiftL src2 src3)));
11413 
11414   ins_cost(1.9 * INSN_COST);
11415   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11416 
11417   ins_encode %{
11418     __ sub(as_Register($dst$$reg),
11419               as_Register($src1$$reg),
11420               as_Register($src2$$reg),
11421               Assembler::LSL,
11422               $src3$$constant & 0x3f);
11423   %}
11424 
11425   ins_pipe(ialu_reg_reg_shift);
11426 %}
11427 
11428 
11429 
11430 // Shift Left followed by Shift Right.
11431 // This idiom is used by the compiler for the i2b bytecode etc.
11432 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11433 %{
11434   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11435   // Make sure we are not going to exceed what sbfm can do.
11436   predicate((unsigned int)n->in(2)->get_int() <= 63
11437             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11438 
11439   ins_cost(INSN_COST * 2);
11440   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11441   ins_encode %{
11442     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11443     int s = 63 - lshift;
11444     int r = (rshift - lshift) & 63;
11445     __ sbfm(as_Register($dst$$reg),
11446             as_Register($src$$reg),
11447             r, s);
11448   %}
11449 
11450   ins_pipe(ialu_reg_shift);
11451 %}
11452 
11453 // Shift Left followed by Shift Right.
11454 // This idiom is used by the compiler for the i2b bytecode etc.
11455 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11456 %{
11457   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11458   // Make sure we are not going to exceed what sbfmw can do.
11459   predicate((unsigned int)n->in(2)->get_int() <= 31
11460             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11461 
11462   ins_cost(INSN_COST * 2);
11463   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11464   ins_encode %{
11465     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11466     int s = 31 - lshift;
11467     int r = (rshift - lshift) & 31;
11468     __ sbfmw(as_Register($dst$$reg),
11469             as_Register($src$$reg),
11470             r, s);
11471   %}
11472 
11473   ins_pipe(ialu_reg_shift);
11474 %}
11475 
11476 // Shift Left followed by Shift Right.
11477 // This idiom is used by the compiler for the i2b bytecode etc.
11478 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11479 %{
11480   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11481   // Make sure we are not going to exceed what ubfm can do.
11482   predicate((unsigned int)n->in(2)->get_int() <= 63
11483             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11484 
11485   ins_cost(INSN_COST * 2);
11486   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11487   ins_encode %{
11488     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11489     int s = 63 - lshift;
11490     int r = (rshift - lshift) & 63;
11491     __ ubfm(as_Register($dst$$reg),
11492             as_Register($src$$reg),
11493             r, s);
11494   %}
11495 
11496   ins_pipe(ialu_reg_shift);
11497 %}
11498 
11499 // Shift Left followed by Shift Right.
11500 // This idiom is used by the compiler for the i2b bytecode etc.
11501 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11502 %{
11503   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11504   // Make sure we are not going to exceed what ubfmw can do.
11505   predicate((unsigned int)n->in(2)->get_int() <= 31
11506             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11507 
11508   ins_cost(INSN_COST * 2);
11509   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11510   ins_encode %{
11511     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11512     int s = 31 - lshift;
11513     int r = (rshift - lshift) & 31;
11514     __ ubfmw(as_Register($dst$$reg),
11515             as_Register($src$$reg),
11516             r, s);
11517   %}
11518 
11519   ins_pipe(ialu_reg_shift);
11520 %}
11521 // Bitfield extract with shift & mask
11522 
11523 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11524 %{
11525   match(Set dst (AndI (URShiftI src rshift) mask));
11526 
11527   ins_cost(INSN_COST);
11528   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11529   ins_encode %{
11530     int rshift = $rshift$$constant;
11531     long mask = $mask$$constant;
11532     int width = exact_log2(mask+1);
11533     __ ubfxw(as_Register($dst$$reg),
11534             as_Register($src$$reg), rshift, width);
11535   %}
11536   ins_pipe(ialu_reg_shift);
11537 %}
11538 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11539 %{
11540   match(Set dst (AndL (URShiftL src rshift) mask));
11541 
11542   ins_cost(INSN_COST);
11543   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11544   ins_encode %{
11545     int rshift = $rshift$$constant;
11546     long mask = $mask$$constant;
11547     int width = exact_log2(mask+1);
11548     __ ubfx(as_Register($dst$$reg),
11549             as_Register($src$$reg), rshift, width);
11550   %}
11551   ins_pipe(ialu_reg_shift);
11552 %}
11553 
11554 // We can use ubfx when extending an And with a mask when we know mask
11555 // is positive.  We know that because immI_bitmask guarantees it.
11556 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11557 %{
11558   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11559 
11560   ins_cost(INSN_COST * 2);
11561   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11562   ins_encode %{
11563     int rshift = $rshift$$constant;
11564     long mask = $mask$$constant;
11565     int width = exact_log2(mask+1);
11566     __ ubfx(as_Register($dst$$reg),
11567             as_Register($src$$reg), rshift, width);
11568   %}
11569   ins_pipe(ialu_reg_shift);
11570 %}
11571 
11572 // We can use ubfiz when masking by a positive number and then left shifting the result.
11573 // We know that the mask is positive because immI_bitmask guarantees it.
11574 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11575 %{
11576   match(Set dst (LShiftI (AndI src mask) lshift));
11577   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11578     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
11579 
11580   ins_cost(INSN_COST);
11581   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11582   ins_encode %{
11583     int lshift = $lshift$$constant;
11584     long mask = $mask$$constant;
11585     int width = exact_log2(mask+1);
11586     __ ubfizw(as_Register($dst$$reg),
11587           as_Register($src$$reg), lshift, width);
11588   %}
11589   ins_pipe(ialu_reg_shift);
11590 %}
11591 // We can use ubfiz when masking by a positive number and then left shifting the result.
11592 // We know that the mask is positive because immL_bitmask guarantees it.
11593 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11594 %{
11595   match(Set dst (LShiftL (AndL src mask) lshift));
11596   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
11597     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
11598 
11599   ins_cost(INSN_COST);
11600   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11601   ins_encode %{
11602     int lshift = $lshift$$constant;
11603     long mask = $mask$$constant;
11604     int width = exact_log2(mask+1);
11605     __ ubfiz(as_Register($dst$$reg),
11606           as_Register($src$$reg), lshift, width);
11607   %}
11608   ins_pipe(ialu_reg_shift);
11609 %}
11610 
11611 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11612 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11613 %{
11614   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
11615   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11616     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
11617 
11618   ins_cost(INSN_COST);
11619   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11620   ins_encode %{
11621     int lshift = $lshift$$constant;
11622     long mask = $mask$$constant;
11623     int width = exact_log2(mask+1);
11624     __ ubfiz(as_Register($dst$$reg),
11625              as_Register($src$$reg), lshift, width);
11626   %}
11627   ins_pipe(ialu_reg_shift);
11628 %}
11629 
11630 // Rotations
11631 
11632 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11633 %{
11634   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11635   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11636 
11637   ins_cost(INSN_COST);
11638   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11639 
11640   ins_encode %{
11641     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11642             $rshift$$constant & 63);
11643   %}
11644   ins_pipe(ialu_reg_reg_extr);
11645 %}
11646 
11647 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11648 %{
11649   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11650   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11651 
11652   ins_cost(INSN_COST);
11653   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11654 
11655   ins_encode %{
11656     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11657             $rshift$$constant & 31);
11658   %}
11659   ins_pipe(ialu_reg_reg_extr);
11660 %}
11661 
11662 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11663 %{
11664   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11665   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11666 
11667   ins_cost(INSN_COST);
11668   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11669 
11670   ins_encode %{
11671     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11672             $rshift$$constant & 63);
11673   %}
11674   ins_pipe(ialu_reg_reg_extr);
11675 %}
11676 
11677 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11678 %{
11679   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11680   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11681 
11682   ins_cost(INSN_COST);
11683   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11684 
11685   ins_encode %{
11686     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11687             $rshift$$constant & 31);
11688   %}
11689   ins_pipe(ialu_reg_reg_extr);
11690 %}
11691 
11692 
11693 // rol expander
11694 
11695 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11696 %{
11697   effect(DEF dst, USE src, USE shift);
11698 
11699   format %{ "rol    $dst, $src, $shift" %}
11700   ins_cost(INSN_COST * 3);
11701   ins_encode %{
11702     __ subw(rscratch1, zr, as_Register($shift$$reg));
11703     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11704             rscratch1);
11705     %}
11706   ins_pipe(ialu_reg_reg_vshift);
11707 %}
11708 
11709 // rol expander
11710 
11711 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11712 %{
11713   effect(DEF dst, USE src, USE shift);
11714 
11715   format %{ "rol    $dst, $src, $shift" %}
11716   ins_cost(INSN_COST * 3);
11717   ins_encode %{
11718     __ subw(rscratch1, zr, as_Register($shift$$reg));
11719     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11720             rscratch1);
11721     %}
11722   ins_pipe(ialu_reg_reg_vshift);
11723 %}
11724 
11725 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11726 %{
11727   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11728 
11729   expand %{
11730     rolL_rReg(dst, src, shift, cr);
11731   %}
11732 %}
11733 
11734 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11735 %{
11736   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11737 
11738   expand %{
11739     rolL_rReg(dst, src, shift, cr);
11740   %}
11741 %}
11742 
11743 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11744 %{
11745   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11746 
11747   expand %{
11748     rolI_rReg(dst, src, shift, cr);
11749   %}
11750 %}
11751 
11752 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11753 %{
11754   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11755 
11756   expand %{
11757     rolI_rReg(dst, src, shift, cr);
11758   %}
11759 %}
11760 
11761 // ror expander
11762 
11763 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11764 %{
11765   effect(DEF dst, USE src, USE shift);
11766 
11767   format %{ "ror    $dst, $src, $shift" %}
11768   ins_cost(INSN_COST);
11769   ins_encode %{
11770     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11771             as_Register($shift$$reg));
11772     %}
11773   ins_pipe(ialu_reg_reg_vshift);
11774 %}
11775 
11776 // ror expander
11777 
11778 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11779 %{
11780   effect(DEF dst, USE src, USE shift);
11781 
11782   format %{ "ror    $dst, $src, $shift" %}
11783   ins_cost(INSN_COST);
11784   ins_encode %{
11785     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11786             as_Register($shift$$reg));
11787     %}
11788   ins_pipe(ialu_reg_reg_vshift);
11789 %}
11790 
11791 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11792 %{
11793   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11794 
11795   expand %{
11796     rorL_rReg(dst, src, shift, cr);
11797   %}
11798 %}
11799 
11800 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11801 %{
11802   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11803 
11804   expand %{
11805     rorL_rReg(dst, src, shift, cr);
11806   %}
11807 %}
11808 
11809 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11810 %{
11811   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11812 
11813   expand %{
11814     rorI_rReg(dst, src, shift, cr);
11815   %}
11816 %}
11817 
11818 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11819 %{
11820   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11821 
11822   expand %{
11823     rorI_rReg(dst, src, shift, cr);
11824   %}
11825 %}
11826 
11827 // Add/subtract (extended)
11828 
11829 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11830 %{
11831   match(Set dst (AddL src1 (ConvI2L src2)));
11832   ins_cost(INSN_COST);
11833   format %{ "add  $dst, $src1, $src2, sxtw" %}
11834 
11835    ins_encode %{
11836      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11837             as_Register($src2$$reg), ext::sxtw);
11838    %}
11839   ins_pipe(ialu_reg_reg);
11840 %};
11841 
11842 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11843 %{
11844   match(Set dst (SubL src1 (ConvI2L src2)));
11845   ins_cost(INSN_COST);
11846   format %{ "sub  $dst, $src1, $src2, sxtw" %}
11847 
11848    ins_encode %{
11849      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11850             as_Register($src2$$reg), ext::sxtw);
11851    %}
11852   ins_pipe(ialu_reg_reg);
11853 %};
11854 
11855 
11856 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11857 %{
11858   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11859   ins_cost(INSN_COST);
11860   format %{ "add  $dst, $src1, $src2, sxth" %}
11861 
11862    ins_encode %{
11863      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11864             as_Register($src2$$reg), ext::sxth);
11865    %}
11866   ins_pipe(ialu_reg_reg);
11867 %}
11868 
11869 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11870 %{
11871   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11872   ins_cost(INSN_COST);
11873   format %{ "add  $dst, $src1, $src2, sxtb" %}
11874 
11875    ins_encode %{
11876      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11877             as_Register($src2$$reg), ext::sxtb);
11878    %}
11879   ins_pipe(ialu_reg_reg);
11880 %}
11881 
11882 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11883 %{
11884   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11885   ins_cost(INSN_COST);
11886   format %{ "add  $dst, $src1, $src2, uxtb" %}
11887 
11888    ins_encode %{
11889      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11890             as_Register($src2$$reg), ext::uxtb);
11891    %}
11892   ins_pipe(ialu_reg_reg);
11893 %}
11894 
11895 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11896 %{
11897   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11898   ins_cost(INSN_COST);
11899   format %{ "add  $dst, $src1, $src2, sxth" %}
11900 
11901    ins_encode %{
11902      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11903             as_Register($src2$$reg), ext::sxth);
11904    %}
11905   ins_pipe(ialu_reg_reg);
11906 %}
11907 
11908 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11909 %{
11910   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11911   ins_cost(INSN_COST);
11912   format %{ "add  $dst, $src1, $src2, sxtw" %}
11913 
11914    ins_encode %{
11915      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11916             as_Register($src2$$reg), ext::sxtw);
11917    %}
11918   ins_pipe(ialu_reg_reg);
11919 %}
11920 
11921 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11922 %{
11923   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11924   ins_cost(INSN_COST);
11925   format %{ "add  $dst, $src1, $src2, sxtb" %}
11926 
11927    ins_encode %{
11928      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11929             as_Register($src2$$reg), ext::sxtb);
11930    %}
11931   ins_pipe(ialu_reg_reg);
11932 %}
11933 
11934 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11935 %{
11936   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11937   ins_cost(INSN_COST);
11938   format %{ "add  $dst, $src1, $src2, uxtb" %}
11939 
11940    ins_encode %{
11941      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11942             as_Register($src2$$reg), ext::uxtb);
11943    %}
11944   ins_pipe(ialu_reg_reg);
11945 %}
11946 
11947 
11948 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11949 %{
11950   match(Set dst (AddI src1 (AndI src2 mask)));
11951   ins_cost(INSN_COST);
11952   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11953 
11954    ins_encode %{
11955      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11956             as_Register($src2$$reg), ext::uxtb);
11957    %}
11958   ins_pipe(ialu_reg_reg);
11959 %}
11960 
11961 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11962 %{
11963   match(Set dst (AddI src1 (AndI src2 mask)));
11964   ins_cost(INSN_COST);
11965   format %{ "addw  $dst, $src1, $src2, uxth" %}
11966 
11967    ins_encode %{
11968      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11969             as_Register($src2$$reg), ext::uxth);
11970    %}
11971   ins_pipe(ialu_reg_reg);
11972 %}
11973 
11974 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11975 %{
11976   match(Set dst (AddL src1 (AndL src2 mask)));
11977   ins_cost(INSN_COST);
11978   format %{ "add  $dst, $src1, $src2, uxtb" %}
11979 
11980    ins_encode %{
11981      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11982             as_Register($src2$$reg), ext::uxtb);
11983    %}
11984   ins_pipe(ialu_reg_reg);
11985 %}
11986 
11987 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11988 %{
11989   match(Set dst (AddL src1 (AndL src2 mask)));
11990   ins_cost(INSN_COST);
11991   format %{ "add  $dst, $src1, $src2, uxth" %}
11992 
11993    ins_encode %{
11994      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11995             as_Register($src2$$reg), ext::uxth);
11996    %}
11997   ins_pipe(ialu_reg_reg);
11998 %}
11999 
12000 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12001 %{
12002   match(Set dst (AddL src1 (AndL src2 mask)));
12003   ins_cost(INSN_COST);
12004   format %{ "add  $dst, $src1, $src2, uxtw" %}
12005 
12006    ins_encode %{
12007      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12008             as_Register($src2$$reg), ext::uxtw);
12009    %}
12010   ins_pipe(ialu_reg_reg);
12011 %}
12012 
12013 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12014 %{
12015   match(Set dst (SubI src1 (AndI src2 mask)));
12016   ins_cost(INSN_COST);
12017   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12018 
12019    ins_encode %{
12020      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12021             as_Register($src2$$reg), ext::uxtb);
12022    %}
12023   ins_pipe(ialu_reg_reg);
12024 %}
12025 
12026 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12027 %{
12028   match(Set dst (SubI src1 (AndI src2 mask)));
12029   ins_cost(INSN_COST);
12030   format %{ "subw  $dst, $src1, $src2, uxth" %}
12031 
12032    ins_encode %{
12033      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12034             as_Register($src2$$reg), ext::uxth);
12035    %}
12036   ins_pipe(ialu_reg_reg);
12037 %}
12038 
12039 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12040 %{
12041   match(Set dst (SubL src1 (AndL src2 mask)));
12042   ins_cost(INSN_COST);
12043   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12044 
12045    ins_encode %{
12046      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12047             as_Register($src2$$reg), ext::uxtb);
12048    %}
12049   ins_pipe(ialu_reg_reg);
12050 %}
12051 
12052 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12053 %{
12054   match(Set dst (SubL src1 (AndL src2 mask)));
12055   ins_cost(INSN_COST);
12056   format %{ "sub  $dst, $src1, $src2, uxth" %}
12057 
12058    ins_encode %{
12059      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12060             as_Register($src2$$reg), ext::uxth);
12061    %}
12062   ins_pipe(ialu_reg_reg);
12063 %}
12064 
12065 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12066 %{
12067   match(Set dst (SubL src1 (AndL src2 mask)));
12068   ins_cost(INSN_COST);
12069   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12070 
12071    ins_encode %{
12072      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12073             as_Register($src2$$reg), ext::uxtw);
12074    %}
12075   ins_pipe(ialu_reg_reg);
12076 %}
12077 
12078 
12079 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12080 %{
12081   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12082   ins_cost(1.9 * INSN_COST);
12083   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12084 
12085    ins_encode %{
12086      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12087             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12088    %}
12089   ins_pipe(ialu_reg_reg_shift);
12090 %}
12091 
12092 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12093 %{
12094   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12095   ins_cost(1.9 * INSN_COST);
12096   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12097 
12098    ins_encode %{
12099      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12100             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12101    %}
12102   ins_pipe(ialu_reg_reg_shift);
12103 %}
12104 
12105 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12106 %{
12107   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12108   ins_cost(1.9 * INSN_COST);
12109   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12110 
12111    ins_encode %{
12112      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12113             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12114    %}
12115   ins_pipe(ialu_reg_reg_shift);
12116 %}
12117 
12118 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12119 %{
12120   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12121   ins_cost(1.9 * INSN_COST);
12122   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12123 
12124    ins_encode %{
12125      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12126             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12127    %}
12128   ins_pipe(ialu_reg_reg_shift);
12129 %}
12130 
12131 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12132 %{
12133   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12134   ins_cost(1.9 * INSN_COST);
12135   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12136 
12137    ins_encode %{
12138      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12139             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12140    %}
12141   ins_pipe(ialu_reg_reg_shift);
12142 %}
12143 
12144 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12145 %{
12146   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12147   ins_cost(1.9 * INSN_COST);
12148   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12149 
12150    ins_encode %{
12151      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12152             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12153    %}
12154   ins_pipe(ialu_reg_reg_shift);
12155 %}
12156 
12157 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12158 %{
12159   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12160   ins_cost(1.9 * INSN_COST);
12161   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12162 
12163    ins_encode %{
12164      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12165             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12166    %}
12167   ins_pipe(ialu_reg_reg_shift);
12168 %}
12169 
12170 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12171 %{
12172   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12173   ins_cost(1.9 * INSN_COST);
12174   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12175 
12176    ins_encode %{
12177      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12178             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12179    %}
12180   ins_pipe(ialu_reg_reg_shift);
12181 %}
12182 
12183 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12184 %{
12185   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12186   ins_cost(1.9 * INSN_COST);
12187   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12188 
12189    ins_encode %{
12190      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12191             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12192    %}
12193   ins_pipe(ialu_reg_reg_shift);
12194 %}
12195 
12196 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12197 %{
12198   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12199   ins_cost(1.9 * INSN_COST);
12200   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12201 
12202    ins_encode %{
12203      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12204             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12205    %}
12206   ins_pipe(ialu_reg_reg_shift);
12207 %}
12208 
12209 
12210 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12211 %{
12212   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12213   ins_cost(1.9 * INSN_COST);
12214   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12215 
12216    ins_encode %{
12217      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12218             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12219    %}
12220   ins_pipe(ialu_reg_reg_shift);
12221 %};
12222 
12223 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12224 %{
12225   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12226   ins_cost(1.9 * INSN_COST);
12227   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12228 
12229    ins_encode %{
12230      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12231             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12232    %}
12233   ins_pipe(ialu_reg_reg_shift);
12234 %};
12235 
12236 
12237 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12238 %{
12239   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12240   ins_cost(1.9 * INSN_COST);
12241   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12242 
12243    ins_encode %{
12244      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12245             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12246    %}
12247   ins_pipe(ialu_reg_reg_shift);
12248 %}
12249 
12250 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12251 %{
12252   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12253   ins_cost(1.9 * INSN_COST);
12254   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12255 
12256    ins_encode %{
12257      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12258             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12259    %}
12260   ins_pipe(ialu_reg_reg_shift);
12261 %}
12262 
12263 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12264 %{
12265   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12266   ins_cost(1.9 * INSN_COST);
12267   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12268 
12269    ins_encode %{
12270      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12271             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12272    %}
12273   ins_pipe(ialu_reg_reg_shift);
12274 %}
12275 
12276 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12277 %{
12278   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12279   ins_cost(1.9 * INSN_COST);
12280   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12281 
12282    ins_encode %{
12283      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12284             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12285    %}
12286   ins_pipe(ialu_reg_reg_shift);
12287 %}
12288 
12289 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12290 %{
12291   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12292   ins_cost(1.9 * INSN_COST);
12293   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12294 
12295    ins_encode %{
12296      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12297             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12298    %}
12299   ins_pipe(ialu_reg_reg_shift);
12300 %}
12301 
12302 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12303 %{
12304   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12305   ins_cost(1.9 * INSN_COST);
12306   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12307 
12308    ins_encode %{
12309      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12310             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12311    %}
12312   ins_pipe(ialu_reg_reg_shift);
12313 %}
12314 
12315 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12316 %{
12317   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12318   ins_cost(1.9 * INSN_COST);
12319   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12320 
12321    ins_encode %{
12322      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12323             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12324    %}
12325   ins_pipe(ialu_reg_reg_shift);
12326 %}
12327 
12328 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12329 %{
12330   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12331   ins_cost(1.9 * INSN_COST);
12332   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12333 
12334    ins_encode %{
12335      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12336             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12337    %}
12338   ins_pipe(ialu_reg_reg_shift);
12339 %}
12340 
12341 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12342 %{
12343   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12344   ins_cost(1.9 * INSN_COST);
12345   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12346 
12347    ins_encode %{
12348      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12349             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12350    %}
12351   ins_pipe(ialu_reg_reg_shift);
12352 %}
12353 
12354 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12355 %{
12356   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12357   ins_cost(1.9 * INSN_COST);
12358   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12359 
12360    ins_encode %{
12361      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12362             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12363    %}
12364   ins_pipe(ialu_reg_reg_shift);
12365 %}
12366 // END This section of the file is automatically generated. Do not edit --------------
12367 
12368 // ============================================================================
12369 // Floating Point Arithmetic Instructions
12370 
12371 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12372   match(Set dst (AddF src1 src2));
12373 
12374   ins_cost(INSN_COST * 5);
12375   format %{ "fadds   $dst, $src1, $src2" %}
12376 
12377   ins_encode %{
12378     __ fadds(as_FloatRegister($dst$$reg),
12379              as_FloatRegister($src1$$reg),
12380              as_FloatRegister($src2$$reg));
12381   %}
12382 
12383   ins_pipe(fp_dop_reg_reg_s);
12384 %}
12385 
12386 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12387   match(Set dst (AddD src1 src2));
12388 
12389   ins_cost(INSN_COST * 5);
12390   format %{ "faddd   $dst, $src1, $src2" %}
12391 
12392   ins_encode %{
12393     __ faddd(as_FloatRegister($dst$$reg),
12394              as_FloatRegister($src1$$reg),
12395              as_FloatRegister($src2$$reg));
12396   %}
12397 
12398   ins_pipe(fp_dop_reg_reg_d);
12399 %}
12400 
12401 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12402   match(Set dst (SubF src1 src2));
12403 
12404   ins_cost(INSN_COST * 5);
12405   format %{ "fsubs   $dst, $src1, $src2" %}
12406 
12407   ins_encode %{
12408     __ fsubs(as_FloatRegister($dst$$reg),
12409              as_FloatRegister($src1$$reg),
12410              as_FloatRegister($src2$$reg));
12411   %}
12412 
12413   ins_pipe(fp_dop_reg_reg_s);
12414 %}
12415 
12416 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12417   match(Set dst (SubD src1 src2));
12418 
12419   ins_cost(INSN_COST * 5);
12420   format %{ "fsubd   $dst, $src1, $src2" %}
12421 
12422   ins_encode %{
12423     __ fsubd(as_FloatRegister($dst$$reg),
12424              as_FloatRegister($src1$$reg),
12425              as_FloatRegister($src2$$reg));
12426   %}
12427 
12428   ins_pipe(fp_dop_reg_reg_d);
12429 %}
12430 
12431 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12432   match(Set dst (MulF src1 src2));
12433 
12434   ins_cost(INSN_COST * 6);
12435   format %{ "fmuls   $dst, $src1, $src2" %}
12436 
12437   ins_encode %{
12438     __ fmuls(as_FloatRegister($dst$$reg),
12439              as_FloatRegister($src1$$reg),
12440              as_FloatRegister($src2$$reg));
12441   %}
12442 
12443   ins_pipe(fp_dop_reg_reg_s);
12444 %}
12445 
12446 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12447   match(Set dst (MulD src1 src2));
12448 
12449   ins_cost(INSN_COST * 6);
12450   format %{ "fmuld   $dst, $src1, $src2" %}
12451 
12452   ins_encode %{
12453     __ fmuld(as_FloatRegister($dst$$reg),
12454              as_FloatRegister($src1$$reg),
12455              as_FloatRegister($src2$$reg));
12456   %}
12457 
12458   ins_pipe(fp_dop_reg_reg_d);
12459 %}
12460 
12461 // src1 * src2 + src3
12462 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12463   predicate(UseFMA);
12464   match(Set dst (FmaF src3 (Binary src1 src2)));
12465 
12466   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12467 
12468   ins_encode %{
12469     __ fmadds(as_FloatRegister($dst$$reg),
12470              as_FloatRegister($src1$$reg),
12471              as_FloatRegister($src2$$reg),
12472              as_FloatRegister($src3$$reg));
12473   %}
12474 
12475   ins_pipe(pipe_class_default);
12476 %}
12477 
12478 // src1 * src2 + src3
12479 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12480   predicate(UseFMA);
12481   match(Set dst (FmaD src3 (Binary src1 src2)));
12482 
12483   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12484 
12485   ins_encode %{
12486     __ fmaddd(as_FloatRegister($dst$$reg),
12487              as_FloatRegister($src1$$reg),
12488              as_FloatRegister($src2$$reg),
12489              as_FloatRegister($src3$$reg));
12490   %}
12491 
12492   ins_pipe(pipe_class_default);
12493 %}
12494 
12495 // -src1 * src2 + src3
12496 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12497   predicate(UseFMA);
12498   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12499   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12500 
12501   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12502 
12503   ins_encode %{
12504     __ fmsubs(as_FloatRegister($dst$$reg),
12505               as_FloatRegister($src1$$reg),
12506               as_FloatRegister($src2$$reg),
12507               as_FloatRegister($src3$$reg));
12508   %}
12509 
12510   ins_pipe(pipe_class_default);
12511 %}
12512 
12513 // -src1 * src2 + src3
12514 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12515   predicate(UseFMA);
12516   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12517   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12518 
12519   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12520 
12521   ins_encode %{
12522     __ fmsubd(as_FloatRegister($dst$$reg),
12523               as_FloatRegister($src1$$reg),
12524               as_FloatRegister($src2$$reg),
12525               as_FloatRegister($src3$$reg));
12526   %}
12527 
12528   ins_pipe(pipe_class_default);
12529 %}
12530 
12531 // -src1 * src2 - src3
12532 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12533   predicate(UseFMA);
12534   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12535   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12536 
12537   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12538 
12539   ins_encode %{
12540     __ fnmadds(as_FloatRegister($dst$$reg),
12541                as_FloatRegister($src1$$reg),
12542                as_FloatRegister($src2$$reg),
12543                as_FloatRegister($src3$$reg));
12544   %}
12545 
12546   ins_pipe(pipe_class_default);
12547 %}
12548 
12549 // -src1 * src2 - src3
12550 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12551   predicate(UseFMA);
12552   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12553   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12554 
12555   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12556 
12557   ins_encode %{
12558     __ fnmaddd(as_FloatRegister($dst$$reg),
12559                as_FloatRegister($src1$$reg),
12560                as_FloatRegister($src2$$reg),
12561                as_FloatRegister($src3$$reg));
12562   %}
12563 
12564   ins_pipe(pipe_class_default);
12565 %}
12566 
12567 // src1 * src2 - src3
12568 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12569   predicate(UseFMA);
12570   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12571 
12572   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12573 
12574   ins_encode %{
12575     __ fnmsubs(as_FloatRegister($dst$$reg),
12576                as_FloatRegister($src1$$reg),
12577                as_FloatRegister($src2$$reg),
12578                as_FloatRegister($src3$$reg));
12579   %}
12580 
12581   ins_pipe(pipe_class_default);
12582 %}
12583 
12584 // src1 * src2 - src3
12585 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12586   predicate(UseFMA);
12587   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12588 
12589   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12590 
12591   ins_encode %{
12592   // n.b. insn name should be fnmsubd
12593     __ fnmsub(as_FloatRegister($dst$$reg),
12594               as_FloatRegister($src1$$reg),
12595               as_FloatRegister($src2$$reg),
12596               as_FloatRegister($src3$$reg));
12597   %}
12598 
12599   ins_pipe(pipe_class_default);
12600 %}
12601 
12602 
12603 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12604   match(Set dst (DivF src1  src2));
12605 
12606   ins_cost(INSN_COST * 18);
12607   format %{ "fdivs   $dst, $src1, $src2" %}
12608 
12609   ins_encode %{
12610     __ fdivs(as_FloatRegister($dst$$reg),
12611              as_FloatRegister($src1$$reg),
12612              as_FloatRegister($src2$$reg));
12613   %}
12614 
12615   ins_pipe(fp_div_s);
12616 %}
12617 
12618 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12619   match(Set dst (DivD src1  src2));
12620 
12621   ins_cost(INSN_COST * 32);
12622   format %{ "fdivd   $dst, $src1, $src2" %}
12623 
12624   ins_encode %{
12625     __ fdivd(as_FloatRegister($dst$$reg),
12626              as_FloatRegister($src1$$reg),
12627              as_FloatRegister($src2$$reg));
12628   %}
12629 
12630   ins_pipe(fp_div_d);
12631 %}
12632 
12633 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12634   match(Set dst (NegF src));
12635 
12636   ins_cost(INSN_COST * 3);
12637   format %{ "fneg   $dst, $src" %}
12638 
12639   ins_encode %{
12640     __ fnegs(as_FloatRegister($dst$$reg),
12641              as_FloatRegister($src$$reg));
12642   %}
12643 
12644   ins_pipe(fp_uop_s);
12645 %}
12646 
12647 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12648   match(Set dst (NegD src));
12649 
12650   ins_cost(INSN_COST * 3);
12651   format %{ "fnegd   $dst, $src" %}
12652 
12653   ins_encode %{
12654     __ fnegd(as_FloatRegister($dst$$reg),
12655              as_FloatRegister($src$$reg));
12656   %}
12657 
12658   ins_pipe(fp_uop_d);
12659 %}
12660 
12661 instruct absF_reg(vRegF dst, vRegF src) %{
12662   match(Set dst (AbsF src));
12663 
12664   ins_cost(INSN_COST * 3);
12665   format %{ "fabss   $dst, $src" %}
12666   ins_encode %{
12667     __ fabss(as_FloatRegister($dst$$reg),
12668              as_FloatRegister($src$$reg));
12669   %}
12670 
12671   ins_pipe(fp_uop_s);
12672 %}
12673 
12674 instruct absD_reg(vRegD dst, vRegD src) %{
12675   match(Set dst (AbsD src));
12676 
12677   ins_cost(INSN_COST * 3);
12678   format %{ "fabsd   $dst, $src" %}
12679   ins_encode %{
12680     __ fabsd(as_FloatRegister($dst$$reg),
12681              as_FloatRegister($src$$reg));
12682   %}
12683 
12684   ins_pipe(fp_uop_d);
12685 %}
12686 
12687 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12688   match(Set dst (SqrtD src));
12689 
12690   ins_cost(INSN_COST * 50);
12691   format %{ "fsqrtd  $dst, $src" %}
12692   ins_encode %{
12693     __ fsqrtd(as_FloatRegister($dst$$reg),
12694              as_FloatRegister($src$$reg));
12695   %}
12696 
12697   ins_pipe(fp_div_s);
12698 %}
12699 
12700 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12701   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12702 
12703   ins_cost(INSN_COST * 50);
12704   format %{ "fsqrts  $dst, $src" %}
12705   ins_encode %{
12706     __ fsqrts(as_FloatRegister($dst$$reg),
12707              as_FloatRegister($src$$reg));
12708   %}
12709 
12710   ins_pipe(fp_div_d);
12711 %}
12712 
12713 // ============================================================================
12714 // Logical Instructions
12715 
12716 // Integer Logical Instructions
12717 
12718 // And Instructions
12719 
12720 
12721 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12722   match(Set dst (AndI src1 src2));
12723 
12724   format %{ "andw  $dst, $src1, $src2\t# int" %}
12725 
12726   ins_cost(INSN_COST);
12727   ins_encode %{
12728     __ andw(as_Register($dst$$reg),
12729             as_Register($src1$$reg),
12730             as_Register($src2$$reg));
12731   %}
12732 
12733   ins_pipe(ialu_reg_reg);
12734 %}
12735 
12736 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12737   match(Set dst (AndI src1 src2));
12738 
12739   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12740 
12741   ins_cost(INSN_COST);
12742   ins_encode %{
12743     __ andw(as_Register($dst$$reg),
12744             as_Register($src1$$reg),
12745             (unsigned long)($src2$$constant));
12746   %}
12747 
12748   ins_pipe(ialu_reg_imm);
12749 %}
12750 
12751 // Or Instructions
12752 
12753 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12754   match(Set dst (OrI src1 src2));
12755 
12756   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12757 
12758   ins_cost(INSN_COST);
12759   ins_encode %{
12760     __ orrw(as_Register($dst$$reg),
12761             as_Register($src1$$reg),
12762             as_Register($src2$$reg));
12763   %}
12764 
12765   ins_pipe(ialu_reg_reg);
12766 %}
12767 
12768 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12769   match(Set dst (OrI src1 src2));
12770 
12771   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12772 
12773   ins_cost(INSN_COST);
12774   ins_encode %{
12775     __ orrw(as_Register($dst$$reg),
12776             as_Register($src1$$reg),
12777             (unsigned long)($src2$$constant));
12778   %}
12779 
12780   ins_pipe(ialu_reg_imm);
12781 %}
12782 
12783 // Xor Instructions
12784 
12785 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12786   match(Set dst (XorI src1 src2));
12787 
12788   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12789 
12790   ins_cost(INSN_COST);
12791   ins_encode %{
12792     __ eorw(as_Register($dst$$reg),
12793             as_Register($src1$$reg),
12794             as_Register($src2$$reg));
12795   %}
12796 
12797   ins_pipe(ialu_reg_reg);
12798 %}
12799 
12800 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12801   match(Set dst (XorI src1 src2));
12802 
12803   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12804 
12805   ins_cost(INSN_COST);
12806   ins_encode %{
12807     __ eorw(as_Register($dst$$reg),
12808             as_Register($src1$$reg),
12809             (unsigned long)($src2$$constant));
12810   %}
12811 
12812   ins_pipe(ialu_reg_imm);
12813 %}
12814 
12815 // Long Logical Instructions
12816 // TODO
12817 
12818 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12819   match(Set dst (AndL src1 src2));
12820 
12821   format %{ "and  $dst, $src1, $src2\t# int" %}
12822 
12823   ins_cost(INSN_COST);
12824   ins_encode %{
12825     __ andr(as_Register($dst$$reg),
12826             as_Register($src1$$reg),
12827             as_Register($src2$$reg));
12828   %}
12829 
12830   ins_pipe(ialu_reg_reg);
12831 %}
12832 
12833 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12834   match(Set dst (AndL src1 src2));
12835 
12836   format %{ "and  $dst, $src1, $src2\t# int" %}
12837 
12838   ins_cost(INSN_COST);
12839   ins_encode %{
12840     __ andr(as_Register($dst$$reg),
12841             as_Register($src1$$reg),
12842             (unsigned long)($src2$$constant));
12843   %}
12844 
12845   ins_pipe(ialu_reg_imm);
12846 %}
12847 
12848 // Or Instructions
12849 
12850 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12851   match(Set dst (OrL src1 src2));
12852 
12853   format %{ "orr  $dst, $src1, $src2\t# int" %}
12854 
12855   ins_cost(INSN_COST);
12856   ins_encode %{
12857     __ orr(as_Register($dst$$reg),
12858            as_Register($src1$$reg),
12859            as_Register($src2$$reg));
12860   %}
12861 
12862   ins_pipe(ialu_reg_reg);
12863 %}
12864 
12865 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12866   match(Set dst (OrL src1 src2));
12867 
12868   format %{ "orr  $dst, $src1, $src2\t# int" %}
12869 
12870   ins_cost(INSN_COST);
12871   ins_encode %{
12872     __ orr(as_Register($dst$$reg),
12873            as_Register($src1$$reg),
12874            (unsigned long)($src2$$constant));
12875   %}
12876 
12877   ins_pipe(ialu_reg_imm);
12878 %}
12879 
12880 // Xor Instructions
12881 
12882 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12883   match(Set dst (XorL src1 src2));
12884 
12885   format %{ "eor  $dst, $src1, $src2\t# int" %}
12886 
12887   ins_cost(INSN_COST);
12888   ins_encode %{
12889     __ eor(as_Register($dst$$reg),
12890            as_Register($src1$$reg),
12891            as_Register($src2$$reg));
12892   %}
12893 
12894   ins_pipe(ialu_reg_reg);
12895 %}
12896 
12897 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12898   match(Set dst (XorL src1 src2));
12899 
12900   ins_cost(INSN_COST);
12901   format %{ "eor  $dst, $src1, $src2\t# int" %}
12902 
12903   ins_encode %{
12904     __ eor(as_Register($dst$$reg),
12905            as_Register($src1$$reg),
12906            (unsigned long)($src2$$constant));
12907   %}
12908 
12909   ins_pipe(ialu_reg_imm);
12910 %}
12911 
12912 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12913 %{
12914   match(Set dst (ConvI2L src));
12915 
12916   ins_cost(INSN_COST);
12917   format %{ "sxtw  $dst, $src\t# i2l" %}
12918   ins_encode %{
12919     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12920   %}
12921   ins_pipe(ialu_reg_shift);
12922 %}
12923 
12924 // this pattern occurs in bigmath arithmetic
12925 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12926 %{
12927   match(Set dst (AndL (ConvI2L src) mask));
12928 
12929   ins_cost(INSN_COST);
12930   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12931   ins_encode %{
12932     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12933   %}
12934 
12935   ins_pipe(ialu_reg_shift);
12936 %}
12937 
12938 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12939   match(Set dst (ConvL2I src));
12940 
12941   ins_cost(INSN_COST);
12942   format %{ "movw  $dst, $src \t// l2i" %}
12943 
12944   ins_encode %{
12945     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12946   %}
12947 
12948   ins_pipe(ialu_reg);
12949 %}
12950 
12951 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12952 %{
12953   match(Set dst (Conv2B src));
12954   effect(KILL cr);
12955 
12956   format %{
12957     "cmpw $src, zr\n\t"
12958     "cset $dst, ne"
12959   %}
12960 
12961   ins_encode %{
12962     __ cmpw(as_Register($src$$reg), zr);
12963     __ cset(as_Register($dst$$reg), Assembler::NE);
12964   %}
12965 
12966   ins_pipe(ialu_reg);
12967 %}
12968 
12969 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12970 %{
12971   match(Set dst (Conv2B src));
12972   effect(KILL cr);
12973 
12974   format %{
12975     "cmp  $src, zr\n\t"
12976     "cset $dst, ne"
12977   %}
12978 
12979   ins_encode %{
12980     __ cmp(as_Register($src$$reg), zr);
12981     __ cset(as_Register($dst$$reg), Assembler::NE);
12982   %}
12983 
12984   ins_pipe(ialu_reg);
12985 %}
12986 
12987 instruct convD2F_reg(vRegF dst, vRegD src) %{
12988   match(Set dst (ConvD2F src));
12989 
12990   ins_cost(INSN_COST * 5);
12991   format %{ "fcvtd  $dst, $src \t// d2f" %}
12992 
12993   ins_encode %{
12994     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12995   %}
12996 
12997   ins_pipe(fp_d2f);
12998 %}
12999 
13000 instruct convF2D_reg(vRegD dst, vRegF src) %{
13001   match(Set dst (ConvF2D src));
13002 
13003   ins_cost(INSN_COST * 5);
13004   format %{ "fcvts  $dst, $src \t// f2d" %}
13005 
13006   ins_encode %{
13007     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13008   %}
13009 
13010   ins_pipe(fp_f2d);
13011 %}
13012 
13013 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13014   match(Set dst (ConvF2I src));
13015 
13016   ins_cost(INSN_COST * 5);
13017   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13018 
13019   ins_encode %{
13020     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13021   %}
13022 
13023   ins_pipe(fp_f2i);
13024 %}
13025 
13026 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13027   match(Set dst (ConvF2L src));
13028 
13029   ins_cost(INSN_COST * 5);
13030   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13031 
13032   ins_encode %{
13033     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13034   %}
13035 
13036   ins_pipe(fp_f2l);
13037 %}
13038 
13039 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13040   match(Set dst (ConvI2F src));
13041 
13042   ins_cost(INSN_COST * 5);
13043   format %{ "scvtfws  $dst, $src \t// i2f" %}
13044 
13045   ins_encode %{
13046     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13047   %}
13048 
13049   ins_pipe(fp_i2f);
13050 %}
13051 
13052 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13053   match(Set dst (ConvL2F src));
13054 
13055   ins_cost(INSN_COST * 5);
13056   format %{ "scvtfs  $dst, $src \t// l2f" %}
13057 
13058   ins_encode %{
13059     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13060   %}
13061 
13062   ins_pipe(fp_l2f);
13063 %}
13064 
13065 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13066   match(Set dst (ConvD2I src));
13067 
13068   ins_cost(INSN_COST * 5);
13069   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13070 
13071   ins_encode %{
13072     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13073   %}
13074 
13075   ins_pipe(fp_d2i);
13076 %}
13077 
13078 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13079   match(Set dst (ConvD2L src));
13080 
13081   ins_cost(INSN_COST * 5);
13082   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13083 
13084   ins_encode %{
13085     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13086   %}
13087 
13088   ins_pipe(fp_d2l);
13089 %}
13090 
13091 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13092   match(Set dst (ConvI2D src));
13093 
13094   ins_cost(INSN_COST * 5);
13095   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13096 
13097   ins_encode %{
13098     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13099   %}
13100 
13101   ins_pipe(fp_i2d);
13102 %}
13103 
13104 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13105   match(Set dst (ConvL2D src));
13106 
13107   ins_cost(INSN_COST * 5);
13108   format %{ "scvtfd  $dst, $src \t// l2d" %}
13109 
13110   ins_encode %{
13111     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13112   %}
13113 
13114   ins_pipe(fp_l2d);
13115 %}
13116 
13117 // stack <-> reg and reg <-> reg shuffles with no conversion
13118 
13119 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13120 
13121   match(Set dst (MoveF2I src));
13122 
13123   effect(DEF dst, USE src);
13124 
13125   ins_cost(4 * INSN_COST);
13126 
13127   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13128 
13129   ins_encode %{
13130     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13131   %}
13132 
13133   ins_pipe(iload_reg_reg);
13134 
13135 %}
13136 
13137 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13138 
13139   match(Set dst (MoveI2F src));
13140 
13141   effect(DEF dst, USE src);
13142 
13143   ins_cost(4 * INSN_COST);
13144 
13145   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13146 
13147   ins_encode %{
13148     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13149   %}
13150 
13151   ins_pipe(pipe_class_memory);
13152 
13153 %}
13154 
13155 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13156 
13157   match(Set dst (MoveD2L src));
13158 
13159   effect(DEF dst, USE src);
13160 
13161   ins_cost(4 * INSN_COST);
13162 
13163   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13164 
13165   ins_encode %{
13166     __ ldr($dst$$Register, Address(sp, $src$$disp));
13167   %}
13168 
13169   ins_pipe(iload_reg_reg);
13170 
13171 %}
13172 
13173 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13174 
13175   match(Set dst (MoveL2D src));
13176 
13177   effect(DEF dst, USE src);
13178 
13179   ins_cost(4 * INSN_COST);
13180 
13181   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13182 
13183   ins_encode %{
13184     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13185   %}
13186 
13187   ins_pipe(pipe_class_memory);
13188 
13189 %}
13190 
13191 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13192 
13193   match(Set dst (MoveF2I src));
13194 
13195   effect(DEF dst, USE src);
13196 
13197   ins_cost(INSN_COST);
13198 
13199   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13200 
13201   ins_encode %{
13202     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13203   %}
13204 
13205   ins_pipe(pipe_class_memory);
13206 
13207 %}
13208 
13209 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13210 
13211   match(Set dst (MoveI2F src));
13212 
13213   effect(DEF dst, USE src);
13214 
13215   ins_cost(INSN_COST);
13216 
13217   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13218 
13219   ins_encode %{
13220     __ strw($src$$Register, Address(sp, $dst$$disp));
13221   %}
13222 
13223   ins_pipe(istore_reg_reg);
13224 
13225 %}
13226 
13227 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13228 
13229   match(Set dst (MoveD2L src));
13230 
13231   effect(DEF dst, USE src);
13232 
13233   ins_cost(INSN_COST);
13234 
13235   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13236 
13237   ins_encode %{
13238     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13239   %}
13240 
13241   ins_pipe(pipe_class_memory);
13242 
13243 %}
13244 
13245 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13246 
13247   match(Set dst (MoveL2D src));
13248 
13249   effect(DEF dst, USE src);
13250 
13251   ins_cost(INSN_COST);
13252 
13253   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13254 
13255   ins_encode %{
13256     __ str($src$$Register, Address(sp, $dst$$disp));
13257   %}
13258 
13259   ins_pipe(istore_reg_reg);
13260 
13261 %}
13262 
13263 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13264 
13265   match(Set dst (MoveF2I src));
13266 
13267   effect(DEF dst, USE src);
13268 
13269   ins_cost(INSN_COST);
13270 
13271   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13272 
13273   ins_encode %{
13274     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13275   %}
13276 
13277   ins_pipe(fp_f2i);
13278 
13279 %}
13280 
13281 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13282 
13283   match(Set dst (MoveI2F src));
13284 
13285   effect(DEF dst, USE src);
13286 
13287   ins_cost(INSN_COST);
13288 
13289   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13290 
13291   ins_encode %{
13292     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13293   %}
13294 
13295   ins_pipe(fp_i2f);
13296 
13297 %}
13298 
13299 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13300 
13301   match(Set dst (MoveD2L src));
13302 
13303   effect(DEF dst, USE src);
13304 
13305   ins_cost(INSN_COST);
13306 
13307   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13308 
13309   ins_encode %{
13310     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13311   %}
13312 
13313   ins_pipe(fp_d2l);
13314 
13315 %}
13316 
13317 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13318 
13319   match(Set dst (MoveL2D src));
13320 
13321   effect(DEF dst, USE src);
13322 
13323   ins_cost(INSN_COST);
13324 
13325   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13326 
13327   ins_encode %{
13328     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13329   %}
13330 
13331   ins_pipe(fp_l2d);
13332 
13333 %}
13334 
13335 // ============================================================================
13336 // clearing of an array
13337 
13338 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13339 %{
13340   match(Set dummy (ClearArray cnt base));
13341   effect(USE_KILL cnt, USE_KILL base);
13342 
13343   ins_cost(4 * INSN_COST);
13344   format %{ "ClearArray $cnt, $base" %}
13345 
13346   ins_encode %{
13347     __ zero_words($base$$Register, $cnt$$Register);
13348   %}
13349 
13350   ins_pipe(pipe_class_memory);
13351 %}
13352 
13353 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13354 %{
13355   predicate((u_int64_t)n->in(2)->get_long()
13356             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13357   match(Set dummy (ClearArray cnt base));
13358   effect(USE_KILL base);
13359 
13360   ins_cost(4 * INSN_COST);
13361   format %{ "ClearArray $cnt, $base" %}
13362 
13363   ins_encode %{
13364     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13365   %}
13366 
13367   ins_pipe(pipe_class_memory);
13368 %}
13369 
13370 // ============================================================================
13371 // Overflow Math Instructions
13372 
13373 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13374 %{
13375   match(Set cr (OverflowAddI op1 op2));
13376 
13377   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13378   ins_cost(INSN_COST);
13379   ins_encode %{
13380     __ cmnw($op1$$Register, $op2$$Register);
13381   %}
13382 
13383   ins_pipe(icmp_reg_reg);
13384 %}
13385 
13386 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13387 %{
13388   match(Set cr (OverflowAddI op1 op2));
13389 
13390   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13391   ins_cost(INSN_COST);
13392   ins_encode %{
13393     __ cmnw($op1$$Register, $op2$$constant);
13394   %}
13395 
13396   ins_pipe(icmp_reg_imm);
13397 %}
13398 
13399 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13400 %{
13401   match(Set cr (OverflowAddL op1 op2));
13402 
13403   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13404   ins_cost(INSN_COST);
13405   ins_encode %{
13406     __ cmn($op1$$Register, $op2$$Register);
13407   %}
13408 
13409   ins_pipe(icmp_reg_reg);
13410 %}
13411 
13412 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13413 %{
13414   match(Set cr (OverflowAddL op1 op2));
13415 
13416   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13417   ins_cost(INSN_COST);
13418   ins_encode %{
13419     __ cmn($op1$$Register, $op2$$constant);
13420   %}
13421 
13422   ins_pipe(icmp_reg_imm);
13423 %}
13424 
13425 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13426 %{
13427   match(Set cr (OverflowSubI op1 op2));
13428 
13429   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13430   ins_cost(INSN_COST);
13431   ins_encode %{
13432     __ cmpw($op1$$Register, $op2$$Register);
13433   %}
13434 
13435   ins_pipe(icmp_reg_reg);
13436 %}
13437 
13438 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13439 %{
13440   match(Set cr (OverflowSubI op1 op2));
13441 
13442   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13443   ins_cost(INSN_COST);
13444   ins_encode %{
13445     __ cmpw($op1$$Register, $op2$$constant);
13446   %}
13447 
13448   ins_pipe(icmp_reg_imm);
13449 %}
13450 
13451 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13452 %{
13453   match(Set cr (OverflowSubL op1 op2));
13454 
13455   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13456   ins_cost(INSN_COST);
13457   ins_encode %{
13458     __ cmp($op1$$Register, $op2$$Register);
13459   %}
13460 
13461   ins_pipe(icmp_reg_reg);
13462 %}
13463 
13464 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13465 %{
13466   match(Set cr (OverflowSubL op1 op2));
13467 
13468   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13469   ins_cost(INSN_COST);
13470   ins_encode %{
13471     __ subs(zr, $op1$$Register, $op2$$constant);
13472   %}
13473 
13474   ins_pipe(icmp_reg_imm);
13475 %}
13476 
13477 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13478 %{
13479   match(Set cr (OverflowSubI zero op1));
13480 
13481   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13482   ins_cost(INSN_COST);
13483   ins_encode %{
13484     __ cmpw(zr, $op1$$Register);
13485   %}
13486 
13487   ins_pipe(icmp_reg_imm);
13488 %}
13489 
13490 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13491 %{
13492   match(Set cr (OverflowSubL zero op1));
13493 
13494   format %{ "cmp   zr, $op1\t# overflow check long" %}
13495   ins_cost(INSN_COST);
13496   ins_encode %{
13497     __ cmp(zr, $op1$$Register);
13498   %}
13499 
13500   ins_pipe(icmp_reg_imm);
13501 %}
13502 
13503 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13504 %{
13505   match(Set cr (OverflowMulI op1 op2));
13506 
13507   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13508             "cmp   rscratch1, rscratch1, sxtw\n\t"
13509             "movw  rscratch1, #0x80000000\n\t"
13510             "cselw rscratch1, rscratch1, zr, NE\n\t"
13511             "cmpw  rscratch1, #1" %}
13512   ins_cost(5 * INSN_COST);
13513   ins_encode %{
13514     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13515     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13516     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13517     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13518     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13519   %}
13520 
13521   ins_pipe(pipe_slow);
13522 %}
13523 
13524 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13525 %{
13526   match(If cmp (OverflowMulI op1 op2));
13527   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13528             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13529   effect(USE labl, KILL cr);
13530 
13531   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13532             "cmp   rscratch1, rscratch1, sxtw\n\t"
13533             "b$cmp   $labl" %}
13534   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13535   ins_encode %{
13536     Label* L = $labl$$label;
13537     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13538     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13539     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13540     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13541   %}
13542 
13543   ins_pipe(pipe_serial);
13544 %}
13545 
13546 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13547 %{
13548   match(Set cr (OverflowMulL op1 op2));
13549 
13550   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13551             "smulh rscratch2, $op1, $op2\n\t"
13552             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13553             "movw  rscratch1, #0x80000000\n\t"
13554             "cselw rscratch1, rscratch1, zr, NE\n\t"
13555             "cmpw  rscratch1, #1" %}
13556   ins_cost(6 * INSN_COST);
13557   ins_encode %{
13558     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13559     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13560     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13561     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13562     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13563     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13564   %}
13565 
13566   ins_pipe(pipe_slow);
13567 %}
13568 
13569 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13570 %{
13571   match(If cmp (OverflowMulL op1 op2));
13572   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13573             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13574   effect(USE labl, KILL cr);
13575 
13576   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13577             "smulh rscratch2, $op1, $op2\n\t"
13578             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13579             "b$cmp $labl" %}
13580   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13581   ins_encode %{
13582     Label* L = $labl$$label;
13583     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13584     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13585     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13586     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13587     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13588   %}
13589 
13590   ins_pipe(pipe_serial);
13591 %}
13592 
13593 // ============================================================================
13594 // Compare Instructions
13595 
13596 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13597 %{
13598   match(Set cr (CmpI op1 op2));
13599 
13600   effect(DEF cr, USE op1, USE op2);
13601 
13602   ins_cost(INSN_COST);
13603   format %{ "cmpw  $op1, $op2" %}
13604 
13605   ins_encode(aarch64_enc_cmpw(op1, op2));
13606 
13607   ins_pipe(icmp_reg_reg);
13608 %}
13609 
13610 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13611 %{
13612   match(Set cr (CmpI op1 zero));
13613 
13614   effect(DEF cr, USE op1);
13615 
13616   ins_cost(INSN_COST);
13617   format %{ "cmpw $op1, 0" %}
13618 
13619   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13620 
13621   ins_pipe(icmp_reg_imm);
13622 %}
13623 
13624 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13625 %{
13626   match(Set cr (CmpI op1 op2));
13627 
13628   effect(DEF cr, USE op1);
13629 
13630   ins_cost(INSN_COST);
13631   format %{ "cmpw  $op1, $op2" %}
13632 
13633   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13634 
13635   ins_pipe(icmp_reg_imm);
13636 %}
13637 
13638 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13639 %{
13640   match(Set cr (CmpI op1 op2));
13641 
13642   effect(DEF cr, USE op1);
13643 
13644   ins_cost(INSN_COST * 2);
13645   format %{ "cmpw  $op1, $op2" %}
13646 
13647   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13648 
13649   ins_pipe(icmp_reg_imm);
13650 %}
13651 
13652 // Unsigned compare Instructions; really, same as signed compare
13653 // except it should only be used to feed an If or a CMovI which takes a
13654 // cmpOpU.
13655 
13656 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13657 %{
13658   match(Set cr (CmpU op1 op2));
13659 
13660   effect(DEF cr, USE op1, USE op2);
13661 
13662   ins_cost(INSN_COST);
13663   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13664 
13665   ins_encode(aarch64_enc_cmpw(op1, op2));
13666 
13667   ins_pipe(icmp_reg_reg);
13668 %}
13669 
13670 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13671 %{
13672   match(Set cr (CmpU op1 zero));
13673 
13674   effect(DEF cr, USE op1);
13675 
13676   ins_cost(INSN_COST);
13677   format %{ "cmpw $op1, #0\t# unsigned" %}
13678 
13679   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13680 
13681   ins_pipe(icmp_reg_imm);
13682 %}
13683 
13684 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13685 %{
13686   match(Set cr (CmpU op1 op2));
13687 
13688   effect(DEF cr, USE op1);
13689 
13690   ins_cost(INSN_COST);
13691   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13692 
13693   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13694 
13695   ins_pipe(icmp_reg_imm);
13696 %}
13697 
13698 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13699 %{
13700   match(Set cr (CmpU op1 op2));
13701 
13702   effect(DEF cr, USE op1);
13703 
13704   ins_cost(INSN_COST * 2);
13705   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13706 
13707   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13708 
13709   ins_pipe(icmp_reg_imm);
13710 %}
13711 
13712 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13713 %{
13714   match(Set cr (CmpL op1 op2));
13715 
13716   effect(DEF cr, USE op1, USE op2);
13717 
13718   ins_cost(INSN_COST);
13719   format %{ "cmp  $op1, $op2" %}
13720 
13721   ins_encode(aarch64_enc_cmp(op1, op2));
13722 
13723   ins_pipe(icmp_reg_reg);
13724 %}
13725 
13726 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
13727 %{
13728   match(Set cr (CmpL op1 zero));
13729 
13730   effect(DEF cr, USE op1);
13731 
13732   ins_cost(INSN_COST);
13733   format %{ "tst  $op1" %}
13734 
13735   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13736 
13737   ins_pipe(icmp_reg_imm);
13738 %}
13739 
13740 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13741 %{
13742   match(Set cr (CmpL op1 op2));
13743 
13744   effect(DEF cr, USE op1);
13745 
13746   ins_cost(INSN_COST);
13747   format %{ "cmp  $op1, $op2" %}
13748 
13749   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13750 
13751   ins_pipe(icmp_reg_imm);
13752 %}
13753 
13754 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13755 %{
13756   match(Set cr (CmpL op1 op2));
13757 
13758   effect(DEF cr, USE op1);
13759 
13760   ins_cost(INSN_COST * 2);
13761   format %{ "cmp  $op1, $op2" %}
13762 
13763   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13764 
13765   ins_pipe(icmp_reg_imm);
13766 %}
13767 
13768 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
13769 %{
13770   match(Set cr (CmpUL op1 op2));
13771 
13772   effect(DEF cr, USE op1, USE op2);
13773 
13774   ins_cost(INSN_COST);
13775   format %{ "cmp  $op1, $op2" %}
13776 
13777   ins_encode(aarch64_enc_cmp(op1, op2));
13778 
13779   ins_pipe(icmp_reg_reg);
13780 %}
13781 
13782 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
13783 %{
13784   match(Set cr (CmpUL op1 zero));
13785 
13786   effect(DEF cr, USE op1);
13787 
13788   ins_cost(INSN_COST);
13789   format %{ "tst  $op1" %}
13790 
13791   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13792 
13793   ins_pipe(icmp_reg_imm);
13794 %}
13795 
13796 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
13797 %{
13798   match(Set cr (CmpUL op1 op2));
13799 
13800   effect(DEF cr, USE op1);
13801 
13802   ins_cost(INSN_COST);
13803   format %{ "cmp  $op1, $op2" %}
13804 
13805   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13806 
13807   ins_pipe(icmp_reg_imm);
13808 %}
13809 
13810 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
13811 %{
13812   match(Set cr (CmpUL op1 op2));
13813 
13814   effect(DEF cr, USE op1);
13815 
13816   ins_cost(INSN_COST * 2);
13817   format %{ "cmp  $op1, $op2" %}
13818 
13819   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13820 
13821   ins_pipe(icmp_reg_imm);
13822 %}
13823 
13824 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13825 %{
13826   match(Set cr (CmpP op1 op2));
13827 
13828   effect(DEF cr, USE op1, USE op2);
13829 
13830   ins_cost(INSN_COST);
13831   format %{ "cmp  $op1, $op2\t // ptr" %}
13832 
13833   ins_encode(aarch64_enc_cmpp(op1, op2));
13834 
13835   ins_pipe(icmp_reg_reg);
13836 %}
13837 
13838 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13839 %{
13840   match(Set cr (CmpN op1 op2));
13841 
13842   effect(DEF cr, USE op1, USE op2);
13843 
13844   ins_cost(INSN_COST);
13845   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13846 
13847   ins_encode(aarch64_enc_cmpn(op1, op2));
13848 
13849   ins_pipe(icmp_reg_reg);
13850 %}
13851 
13852 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13853 %{
13854   match(Set cr (CmpP op1 zero));
13855 
13856   effect(DEF cr, USE op1, USE zero);
13857 
13858   ins_cost(INSN_COST);
13859   format %{ "cmp  $op1, 0\t // ptr" %}
13860 
13861   ins_encode(aarch64_enc_testp(op1));
13862 
13863   ins_pipe(icmp_reg_imm);
13864 %}
13865 
13866 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13867 %{
13868   match(Set cr (CmpN op1 zero));
13869 
13870   effect(DEF cr, USE op1, USE zero);
13871 
13872   ins_cost(INSN_COST);
13873   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13874 
13875   ins_encode(aarch64_enc_testn(op1));
13876 
13877   ins_pipe(icmp_reg_imm);
13878 %}
13879 
13880 // FP comparisons
13881 //
13882 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13883 // using normal cmpOp. See declaration of rFlagsReg for details.
13884 
13885 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13886 %{
13887   match(Set cr (CmpF src1 src2));
13888 
13889   ins_cost(3 * INSN_COST);
13890   format %{ "fcmps $src1, $src2" %}
13891 
13892   ins_encode %{
13893     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13894   %}
13895 
13896   ins_pipe(pipe_class_compare);
13897 %}
13898 
13899 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13900 %{
13901   match(Set cr (CmpF src1 src2));
13902 
13903   ins_cost(3 * INSN_COST);
13904   format %{ "fcmps $src1, 0.0" %}
13905 
13906   ins_encode %{
13907     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13908   %}
13909 
13910   ins_pipe(pipe_class_compare);
13911 %}
13912 // FROM HERE
13913 
13914 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13915 %{
13916   match(Set cr (CmpD src1 src2));
13917 
13918   ins_cost(3 * INSN_COST);
13919   format %{ "fcmpd $src1, $src2" %}
13920 
13921   ins_encode %{
13922     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13923   %}
13924 
13925   ins_pipe(pipe_class_compare);
13926 %}
13927 
13928 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13929 %{
13930   match(Set cr (CmpD src1 src2));
13931 
13932   ins_cost(3 * INSN_COST);
13933   format %{ "fcmpd $src1, 0.0" %}
13934 
13935   ins_encode %{
13936     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13937   %}
13938 
13939   ins_pipe(pipe_class_compare);
13940 %}
13941 
13942 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13943 %{
13944   match(Set dst (CmpF3 src1 src2));
13945   effect(KILL cr);
13946 
13947   ins_cost(5 * INSN_COST);
13948   format %{ "fcmps $src1, $src2\n\t"
13949             "csinvw($dst, zr, zr, eq\n\t"
13950             "csnegw($dst, $dst, $dst, lt)"
13951   %}
13952 
13953   ins_encode %{
13954     Label done;
13955     FloatRegister s1 = as_FloatRegister($src1$$reg);
13956     FloatRegister s2 = as_FloatRegister($src2$$reg);
13957     Register d = as_Register($dst$$reg);
13958     __ fcmps(s1, s2);
13959     // installs 0 if EQ else -1
13960     __ csinvw(d, zr, zr, Assembler::EQ);
13961     // keeps -1 if less or unordered else installs 1
13962     __ csnegw(d, d, d, Assembler::LT);
13963     __ bind(done);
13964   %}
13965 
13966   ins_pipe(pipe_class_default);
13967 
13968 %}
13969 
13970 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13971 %{
13972   match(Set dst (CmpD3 src1 src2));
13973   effect(KILL cr);
13974 
13975   ins_cost(5 * INSN_COST);
13976   format %{ "fcmpd $src1, $src2\n\t"
13977             "csinvw($dst, zr, zr, eq\n\t"
13978             "csnegw($dst, $dst, $dst, lt)"
13979   %}
13980 
13981   ins_encode %{
13982     Label done;
13983     FloatRegister s1 = as_FloatRegister($src1$$reg);
13984     FloatRegister s2 = as_FloatRegister($src2$$reg);
13985     Register d = as_Register($dst$$reg);
13986     __ fcmpd(s1, s2);
13987     // installs 0 if EQ else -1
13988     __ csinvw(d, zr, zr, Assembler::EQ);
13989     // keeps -1 if less or unordered else installs 1
13990     __ csnegw(d, d, d, Assembler::LT);
13991     __ bind(done);
13992   %}
13993   ins_pipe(pipe_class_default);
13994 
13995 %}
13996 
13997 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13998 %{
13999   match(Set dst (CmpF3 src1 zero));
14000   effect(KILL cr);
14001 
14002   ins_cost(5 * INSN_COST);
14003   format %{ "fcmps $src1, 0.0\n\t"
14004             "csinvw($dst, zr, zr, eq\n\t"
14005             "csnegw($dst, $dst, $dst, lt)"
14006   %}
14007 
14008   ins_encode %{
14009     Label done;
14010     FloatRegister s1 = as_FloatRegister($src1$$reg);
14011     Register d = as_Register($dst$$reg);
14012     __ fcmps(s1, 0.0D);
14013     // installs 0 if EQ else -1
14014     __ csinvw(d, zr, zr, Assembler::EQ);
14015     // keeps -1 if less or unordered else installs 1
14016     __ csnegw(d, d, d, Assembler::LT);
14017     __ bind(done);
14018   %}
14019 
14020   ins_pipe(pipe_class_default);
14021 
14022 %}
14023 
14024 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14025 %{
14026   match(Set dst (CmpD3 src1 zero));
14027   effect(KILL cr);
14028 
14029   ins_cost(5 * INSN_COST);
14030   format %{ "fcmpd $src1, 0.0\n\t"
14031             "csinvw($dst, zr, zr, eq\n\t"
14032             "csnegw($dst, $dst, $dst, lt)"
14033   %}
14034 
14035   ins_encode %{
14036     Label done;
14037     FloatRegister s1 = as_FloatRegister($src1$$reg);
14038     Register d = as_Register($dst$$reg);
14039     __ fcmpd(s1, 0.0D);
14040     // installs 0 if EQ else -1
14041     __ csinvw(d, zr, zr, Assembler::EQ);
14042     // keeps -1 if less or unordered else installs 1
14043     __ csnegw(d, d, d, Assembler::LT);
14044     __ bind(done);
14045   %}
14046   ins_pipe(pipe_class_default);
14047 
14048 %}
14049 
14050 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14051 %{
14052   match(Set dst (CmpLTMask p q));
14053   effect(KILL cr);
14054 
14055   ins_cost(3 * INSN_COST);
14056 
14057   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14058             "csetw $dst, lt\n\t"
14059             "subw $dst, zr, $dst"
14060   %}
14061 
14062   ins_encode %{
14063     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14064     __ csetw(as_Register($dst$$reg), Assembler::LT);
14065     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14066   %}
14067 
14068   ins_pipe(ialu_reg_reg);
14069 %}
14070 
14071 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14072 %{
14073   match(Set dst (CmpLTMask src zero));
14074   effect(KILL cr);
14075 
14076   ins_cost(INSN_COST);
14077 
14078   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14079 
14080   ins_encode %{
14081     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14082   %}
14083 
14084   ins_pipe(ialu_reg_shift);
14085 %}
14086 
14087 // ============================================================================
14088 // Max and Min
14089 
14090 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14091 %{
14092   match(Set dst (MinI src1 src2));
14093 
14094   effect(DEF dst, USE src1, USE src2, KILL cr);
14095   size(8);
14096 
14097   ins_cost(INSN_COST * 3);
14098   format %{
14099     "cmpw $src1 $src2\t signed int\n\t"
14100     "cselw $dst, $src1, $src2 lt\t"
14101   %}
14102 
14103   ins_encode %{
14104     __ cmpw(as_Register($src1$$reg),
14105             as_Register($src2$$reg));
14106     __ cselw(as_Register($dst$$reg),
14107              as_Register($src1$$reg),
14108              as_Register($src2$$reg),
14109              Assembler::LT);
14110   %}
14111 
14112   ins_pipe(ialu_reg_reg);
14113 %}
14114 // FROM HERE
14115 
14116 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14117 %{
14118   match(Set dst (MaxI src1 src2));
14119 
14120   effect(DEF dst, USE src1, USE src2, KILL cr);
14121   size(8);
14122 
14123   ins_cost(INSN_COST * 3);
14124   format %{
14125     "cmpw $src1 $src2\t signed int\n\t"
14126     "cselw $dst, $src1, $src2 gt\t"
14127   %}
14128 
14129   ins_encode %{
14130     __ cmpw(as_Register($src1$$reg),
14131             as_Register($src2$$reg));
14132     __ cselw(as_Register($dst$$reg),
14133              as_Register($src1$$reg),
14134              as_Register($src2$$reg),
14135              Assembler::GT);
14136   %}
14137 
14138   ins_pipe(ialu_reg_reg);
14139 %}
14140 
14141 // ============================================================================
14142 // Branch Instructions
14143 
14144 // Direct Branch.
14145 instruct branch(label lbl)
14146 %{
14147   match(Goto);
14148 
14149   effect(USE lbl);
14150 
14151   ins_cost(BRANCH_COST);
14152   format %{ "b  $lbl" %}
14153 
14154   ins_encode(aarch64_enc_b(lbl));
14155 
14156   ins_pipe(pipe_branch);
14157 %}
14158 
14159 // Conditional Near Branch
14160 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14161 %{
14162   // Same match rule as `branchConFar'.
14163   match(If cmp cr);
14164 
14165   effect(USE lbl);
14166 
14167   ins_cost(BRANCH_COST);
14168   // If set to 1 this indicates that the current instruction is a
14169   // short variant of a long branch. This avoids using this
14170   // instruction in first-pass matching. It will then only be used in
14171   // the `Shorten_branches' pass.
14172   // ins_short_branch(1);
14173   format %{ "b$cmp  $lbl" %}
14174 
14175   ins_encode(aarch64_enc_br_con(cmp, lbl));
14176 
14177   ins_pipe(pipe_branch_cond);
14178 %}
14179 
14180 // Conditional Near Branch Unsigned
14181 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14182 %{
14183   // Same match rule as `branchConFar'.
14184   match(If cmp cr);
14185 
14186   effect(USE lbl);
14187 
14188   ins_cost(BRANCH_COST);
14189   // If set to 1 this indicates that the current instruction is a
14190   // short variant of a long branch. This avoids using this
14191   // instruction in first-pass matching. It will then only be used in
14192   // the `Shorten_branches' pass.
14193   // ins_short_branch(1);
14194   format %{ "b$cmp  $lbl\t# unsigned" %}
14195 
14196   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14197 
14198   ins_pipe(pipe_branch_cond);
14199 %}
14200 
14201 // Make use of CBZ and CBNZ.  These instructions, as well as being
14202 // shorter than (cmp; branch), have the additional benefit of not
14203 // killing the flags.
14204 
14205 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14206   match(If cmp (CmpI op1 op2));
14207   effect(USE labl);
14208 
14209   ins_cost(BRANCH_COST);
14210   format %{ "cbw$cmp   $op1, $labl" %}
14211   ins_encode %{
14212     Label* L = $labl$$label;
14213     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14214     if (cond == Assembler::EQ)
14215       __ cbzw($op1$$Register, *L);
14216     else
14217       __ cbnzw($op1$$Register, *L);
14218   %}
14219   ins_pipe(pipe_cmp_branch);
14220 %}
14221 
14222 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14223   match(If cmp (CmpL op1 op2));
14224   effect(USE labl);
14225 
14226   ins_cost(BRANCH_COST);
14227   format %{ "cb$cmp   $op1, $labl" %}
14228   ins_encode %{
14229     Label* L = $labl$$label;
14230     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14231     if (cond == Assembler::EQ)
14232       __ cbz($op1$$Register, *L);
14233     else
14234       __ cbnz($op1$$Register, *L);
14235   %}
14236   ins_pipe(pipe_cmp_branch);
14237 %}
14238 
14239 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14240   match(If cmp (CmpP op1 op2));
14241   effect(USE labl);
14242 
14243   ins_cost(BRANCH_COST);
14244   format %{ "cb$cmp   $op1, $labl" %}
14245   ins_encode %{
14246     Label* L = $labl$$label;
14247     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14248     if (cond == Assembler::EQ)
14249       __ cbz($op1$$Register, *L);
14250     else
14251       __ cbnz($op1$$Register, *L);
14252   %}
14253   ins_pipe(pipe_cmp_branch);
14254 %}
14255 
14256 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14257   match(If cmp (CmpN op1 op2));
14258   effect(USE labl);
14259 
14260   ins_cost(BRANCH_COST);
14261   format %{ "cbw$cmp   $op1, $labl" %}
14262   ins_encode %{
14263     Label* L = $labl$$label;
14264     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14265     if (cond == Assembler::EQ)
14266       __ cbzw($op1$$Register, *L);
14267     else
14268       __ cbnzw($op1$$Register, *L);
14269   %}
14270   ins_pipe(pipe_cmp_branch);
14271 %}
14272 
14273 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14274   match(If cmp (CmpP (DecodeN oop) zero));
14275   effect(USE labl);
14276 
14277   ins_cost(BRANCH_COST);
14278   format %{ "cb$cmp   $oop, $labl" %}
14279   ins_encode %{
14280     Label* L = $labl$$label;
14281     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14282     if (cond == Assembler::EQ)
14283       __ cbzw($oop$$Register, *L);
14284     else
14285       __ cbnzw($oop$$Register, *L);
14286   %}
14287   ins_pipe(pipe_cmp_branch);
14288 %}
14289 
14290 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14291   match(If cmp (CmpU op1 op2));
14292   effect(USE labl);
14293 
14294   ins_cost(BRANCH_COST);
14295   format %{ "cbw$cmp   $op1, $labl" %}
14296   ins_encode %{
14297     Label* L = $labl$$label;
14298     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14299     if (cond == Assembler::EQ || cond == Assembler::LS)
14300       __ cbzw($op1$$Register, *L);
14301     else
14302       __ cbnzw($op1$$Register, *L);
14303   %}
14304   ins_pipe(pipe_cmp_branch);
14305 %}
14306 
14307 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14308   match(If cmp (CmpUL op1 op2));
14309   effect(USE labl);
14310 
14311   ins_cost(BRANCH_COST);
14312   format %{ "cb$cmp   $op1, $labl" %}
14313   ins_encode %{
14314     Label* L = $labl$$label;
14315     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14316     if (cond == Assembler::EQ || cond == Assembler::LS)
14317       __ cbz($op1$$Register, *L);
14318     else
14319       __ cbnz($op1$$Register, *L);
14320   %}
14321   ins_pipe(pipe_cmp_branch);
14322 %}
14323 
14324 // Test bit and Branch
14325 
14326 // Patterns for short (< 32KiB) variants
14327 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14328   match(If cmp (CmpL op1 op2));
14329   effect(USE labl);
14330 
14331   ins_cost(BRANCH_COST);
14332   format %{ "cb$cmp   $op1, $labl # long" %}
14333   ins_encode %{
14334     Label* L = $labl$$label;
14335     Assembler::Condition cond =
14336       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14337     __ tbr(cond, $op1$$Register, 63, *L);
14338   %}
14339   ins_pipe(pipe_cmp_branch);
14340   ins_short_branch(1);
14341 %}
14342 
14343 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14344   match(If cmp (CmpI op1 op2));
14345   effect(USE labl);
14346 
14347   ins_cost(BRANCH_COST);
14348   format %{ "cb$cmp   $op1, $labl # int" %}
14349   ins_encode %{
14350     Label* L = $labl$$label;
14351     Assembler::Condition cond =
14352       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14353     __ tbr(cond, $op1$$Register, 31, *L);
14354   %}
14355   ins_pipe(pipe_cmp_branch);
14356   ins_short_branch(1);
14357 %}
14358 
14359 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14360   match(If cmp (CmpL (AndL op1 op2) op3));
14361   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14362   effect(USE labl);
14363 
14364   ins_cost(BRANCH_COST);
14365   format %{ "tb$cmp   $op1, $op2, $labl" %}
14366   ins_encode %{
14367     Label* L = $labl$$label;
14368     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14369     int bit = exact_log2($op2$$constant);
14370     __ tbr(cond, $op1$$Register, bit, *L);
14371   %}
14372   ins_pipe(pipe_cmp_branch);
14373   ins_short_branch(1);
14374 %}
14375 
14376 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14377   match(If cmp (CmpI (AndI op1 op2) op3));
14378   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14379   effect(USE labl);
14380 
14381   ins_cost(BRANCH_COST);
14382   format %{ "tb$cmp   $op1, $op2, $labl" %}
14383   ins_encode %{
14384     Label* L = $labl$$label;
14385     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14386     int bit = exact_log2($op2$$constant);
14387     __ tbr(cond, $op1$$Register, bit, *L);
14388   %}
14389   ins_pipe(pipe_cmp_branch);
14390   ins_short_branch(1);
14391 %}
14392 
14393 // And far variants
14394 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14395   match(If cmp (CmpL op1 op2));
14396   effect(USE labl);
14397 
14398   ins_cost(BRANCH_COST);
14399   format %{ "cb$cmp   $op1, $labl # long" %}
14400   ins_encode %{
14401     Label* L = $labl$$label;
14402     Assembler::Condition cond =
14403       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14404     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14405   %}
14406   ins_pipe(pipe_cmp_branch);
14407 %}
14408 
14409 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14410   match(If cmp (CmpI op1 op2));
14411   effect(USE labl);
14412 
14413   ins_cost(BRANCH_COST);
14414   format %{ "cb$cmp   $op1, $labl # int" %}
14415   ins_encode %{
14416     Label* L = $labl$$label;
14417     Assembler::Condition cond =
14418       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14419     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14420   %}
14421   ins_pipe(pipe_cmp_branch);
14422 %}
14423 
14424 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14425   match(If cmp (CmpL (AndL op1 op2) op3));
14426   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14427   effect(USE labl);
14428 
14429   ins_cost(BRANCH_COST);
14430   format %{ "tb$cmp   $op1, $op2, $labl" %}
14431   ins_encode %{
14432     Label* L = $labl$$label;
14433     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14434     int bit = exact_log2($op2$$constant);
14435     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14436   %}
14437   ins_pipe(pipe_cmp_branch);
14438 %}
14439 
14440 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14441   match(If cmp (CmpI (AndI op1 op2) op3));
14442   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14443   effect(USE labl);
14444 
14445   ins_cost(BRANCH_COST);
14446   format %{ "tb$cmp   $op1, $op2, $labl" %}
14447   ins_encode %{
14448     Label* L = $labl$$label;
14449     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14450     int bit = exact_log2($op2$$constant);
14451     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14452   %}
14453   ins_pipe(pipe_cmp_branch);
14454 %}
14455 
14456 // Test bits
14457 
14458 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14459   match(Set cr (CmpL (AndL op1 op2) op3));
14460   predicate(Assembler::operand_valid_for_logical_immediate
14461             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14462 
14463   ins_cost(INSN_COST);
14464   format %{ "tst $op1, $op2 # long" %}
14465   ins_encode %{
14466     __ tst($op1$$Register, $op2$$constant);
14467   %}
14468   ins_pipe(ialu_reg_reg);
14469 %}
14470 
14471 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14472   match(Set cr (CmpI (AndI op1 op2) op3));
14473   predicate(Assembler::operand_valid_for_logical_immediate
14474             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14475 
14476   ins_cost(INSN_COST);
14477   format %{ "tst $op1, $op2 # int" %}
14478   ins_encode %{
14479     __ tstw($op1$$Register, $op2$$constant);
14480   %}
14481   ins_pipe(ialu_reg_reg);
14482 %}
14483 
14484 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14485   match(Set cr (CmpL (AndL op1 op2) op3));
14486 
14487   ins_cost(INSN_COST);
14488   format %{ "tst $op1, $op2 # long" %}
14489   ins_encode %{
14490     __ tst($op1$$Register, $op2$$Register);
14491   %}
14492   ins_pipe(ialu_reg_reg);
14493 %}
14494 
14495 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14496   match(Set cr (CmpI (AndI op1 op2) op3));
14497 
14498   ins_cost(INSN_COST);
14499   format %{ "tstw $op1, $op2 # int" %}
14500   ins_encode %{
14501     __ tstw($op1$$Register, $op2$$Register);
14502   %}
14503   ins_pipe(ialu_reg_reg);
14504 %}
14505 
14506 
14507 // Conditional Far Branch
14508 // Conditional Far Branch Unsigned
14509 // TODO: fixme
14510 
14511 // counted loop end branch near
14512 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14513 %{
14514   match(CountedLoopEnd cmp cr);
14515 
14516   effect(USE lbl);
14517 
14518   ins_cost(BRANCH_COST);
14519   // short variant.
14520   // ins_short_branch(1);
14521   format %{ "b$cmp $lbl \t// counted loop end" %}
14522 
14523   ins_encode(aarch64_enc_br_con(cmp, lbl));
14524 
14525   ins_pipe(pipe_branch);
14526 %}
14527 
14528 // counted loop end branch near Unsigned
14529 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14530 %{
14531   match(CountedLoopEnd cmp cr);
14532 
14533   effect(USE lbl);
14534 
14535   ins_cost(BRANCH_COST);
14536   // short variant.
14537   // ins_short_branch(1);
14538   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14539 
14540   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14541 
14542   ins_pipe(pipe_branch);
14543 %}
14544 
14545 // counted loop end branch far
14546 // counted loop end branch far unsigned
14547 // TODO: fixme
14548 
14549 // ============================================================================
14550 // inlined locking and unlocking
14551 
14552 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14553 %{
14554   match(Set cr (FastLock object box));
14555   effect(TEMP tmp, TEMP tmp2);
14556 
14557   // TODO
14558   // identify correct cost
14559   ins_cost(5 * INSN_COST);
14560   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14561 
14562   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14563 
14564   ins_pipe(pipe_serial);
14565 %}
14566 
14567 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14568 %{
14569   match(Set cr (FastUnlock object box));
14570   effect(TEMP tmp, TEMP tmp2);
14571 
14572   ins_cost(5 * INSN_COST);
14573   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14574 
14575   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14576 
14577   ins_pipe(pipe_serial);
14578 %}
14579 
14580 
14581 // ============================================================================
14582 // Safepoint Instructions
14583 
14584 // TODO
14585 // provide a near and far version of this code
14586 
14587 instruct safePoint(iRegP poll)
14588 %{
14589   match(SafePoint poll);
14590 
14591   format %{
14592     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14593   %}
14594   ins_encode %{
14595     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14596   %}
14597   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14598 %}
14599 
14600 
14601 // ============================================================================
14602 // Procedure Call/Return Instructions
14603 
14604 // Call Java Static Instruction
14605 
14606 instruct CallStaticJavaDirect(method meth)
14607 %{
14608   match(CallStaticJava);
14609 
14610   effect(USE meth);
14611 
14612   ins_cost(CALL_COST);
14613 
14614   format %{ "call,static $meth \t// ==> " %}
14615 
14616   ins_encode( aarch64_enc_java_static_call(meth),
14617               aarch64_enc_call_epilog );
14618 
14619   ins_pipe(pipe_class_call);
14620 %}
14621 
14622 // TO HERE
14623 
14624 // Call Java Dynamic Instruction
14625 instruct CallDynamicJavaDirect(method meth)
14626 %{
14627   match(CallDynamicJava);
14628 
14629   effect(USE meth);
14630 
14631   ins_cost(CALL_COST);
14632 
14633   format %{ "CALL,dynamic $meth \t// ==> " %}
14634 
14635   ins_encode( aarch64_enc_java_dynamic_call(meth),
14636                aarch64_enc_call_epilog );
14637 
14638   ins_pipe(pipe_class_call);
14639 %}
14640 
14641 // Call Runtime Instruction
14642 
14643 instruct CallRuntimeDirect(method meth)
14644 %{
14645   match(CallRuntime);
14646 
14647   effect(USE meth);
14648 
14649   ins_cost(CALL_COST);
14650 
14651   format %{ "CALL, runtime $meth" %}
14652 
14653   ins_encode( aarch64_enc_java_to_runtime(meth) );
14654 
14655   ins_pipe(pipe_class_call);
14656 %}
14657 
14658 // Call Runtime Instruction
14659 
14660 instruct CallLeafDirect(method meth)
14661 %{
14662   match(CallLeaf);
14663 
14664   effect(USE meth);
14665 
14666   ins_cost(CALL_COST);
14667 
14668   format %{ "CALL, runtime leaf $meth" %}
14669 
14670   ins_encode( aarch64_enc_java_to_runtime(meth) );
14671 
14672   ins_pipe(pipe_class_call);
14673 %}
14674 
14675 // Call Runtime Instruction
14676 
14677 instruct CallLeafNoFPDirect(method meth)
14678 %{
14679   match(CallLeafNoFP);
14680 
14681   effect(USE meth);
14682 
14683   ins_cost(CALL_COST);
14684 
14685   format %{ "CALL, runtime leaf nofp $meth" %}
14686 
14687   ins_encode( aarch64_enc_java_to_runtime(meth) );
14688 
14689   ins_pipe(pipe_class_call);
14690 %}
14691 
14692 // Tail Call; Jump from runtime stub to Java code.
14693 // Also known as an 'interprocedural jump'.
14694 // Target of jump will eventually return to caller.
14695 // TailJump below removes the return address.
14696 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14697 %{
14698   match(TailCall jump_target method_oop);
14699 
14700   ins_cost(CALL_COST);
14701 
14702   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14703 
14704   ins_encode(aarch64_enc_tail_call(jump_target));
14705 
14706   ins_pipe(pipe_class_call);
14707 %}
14708 
14709 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14710 %{
14711   match(TailJump jump_target ex_oop);
14712 
14713   ins_cost(CALL_COST);
14714 
14715   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14716 
14717   ins_encode(aarch64_enc_tail_jmp(jump_target));
14718 
14719   ins_pipe(pipe_class_call);
14720 %}
14721 
14722 // Create exception oop: created by stack-crawling runtime code.
14723 // Created exception is now available to this handler, and is setup
14724 // just prior to jumping to this handler. No code emitted.
14725 // TODO check
14726 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14727 instruct CreateException(iRegP_R0 ex_oop)
14728 %{
14729   match(Set ex_oop (CreateEx));
14730 
14731   format %{ " -- \t// exception oop; no code emitted" %}
14732 
14733   size(0);
14734 
14735   ins_encode( /*empty*/ );
14736 
14737   ins_pipe(pipe_class_empty);
14738 %}
14739 
14740 // Rethrow exception: The exception oop will come in the first
14741 // argument position. Then JUMP (not call) to the rethrow stub code.
14742 instruct RethrowException() %{
14743   match(Rethrow);
14744   ins_cost(CALL_COST);
14745 
14746   format %{ "b rethrow_stub" %}
14747 
14748   ins_encode( aarch64_enc_rethrow() );
14749 
14750   ins_pipe(pipe_class_call);
14751 %}
14752 
14753 
14754 // Return Instruction
14755 // epilog node loads ret address into lr as part of frame pop
14756 instruct Ret()
14757 %{
14758   match(Return);
14759 
14760   format %{ "ret\t// return register" %}
14761 
14762   ins_encode( aarch64_enc_ret() );
14763 
14764   ins_pipe(pipe_branch);
14765 %}
14766 
14767 // Die now.
14768 instruct ShouldNotReachHere() %{
14769   match(Halt);
14770 
14771   ins_cost(CALL_COST);
14772   format %{ "ShouldNotReachHere" %}
14773 
14774   ins_encode %{
14775     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
14776     // return true
14777     __ dpcs1(0xdead + 1);
14778   %}
14779 
14780   ins_pipe(pipe_class_default);
14781 %}
14782 
14783 // ============================================================================
14784 // Partial Subtype Check
14785 //
14786 // superklass array for an instance of the superklass.  Set a hidden
14787 // internal cache on a hit (cache is checked with exposed code in
14788 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14789 // encoding ALSO sets flags.
14790 
14791 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14792 %{
14793   match(Set result (PartialSubtypeCheck sub super));
14794   effect(KILL cr, KILL temp);
14795 
14796   ins_cost(1100);  // slightly larger than the next version
14797   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14798 
14799   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14800 
14801   opcode(0x1); // Force zero of result reg on hit
14802 
14803   ins_pipe(pipe_class_memory);
14804 %}
14805 
14806 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14807 %{
14808   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14809   effect(KILL temp, KILL result);
14810 
14811   ins_cost(1100);  // slightly larger than the next version
14812   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14813 
14814   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14815 
14816   opcode(0x0); // Don't zero result reg on hit
14817 
14818   ins_pipe(pipe_class_memory);
14819 %}
14820 
14821 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14822                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14823 %{
14824   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
14825   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14826   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14827 
14828   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14829   ins_encode %{
14830     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14831     __ string_compare($str1$$Register, $str2$$Register,
14832                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14833                       $tmp1$$Register, $tmp2$$Register,
14834                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
14835   %}
14836   ins_pipe(pipe_class_memory);
14837 %}
14838 
14839 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14840                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14841 %{
14842   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
14843   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14844   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14845 
14846   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14847   ins_encode %{
14848     __ string_compare($str1$$Register, $str2$$Register,
14849                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14850                       $tmp1$$Register, $tmp2$$Register,
14851                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
14852   %}
14853   ins_pipe(pipe_class_memory);
14854 %}
14855 
14856 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14857                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14858                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14859 %{
14860   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
14861   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14862   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14863          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14864 
14865   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14866   ins_encode %{
14867     __ string_compare($str1$$Register, $str2$$Register,
14868                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14869                       $tmp1$$Register, $tmp2$$Register,
14870                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14871                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
14872   %}
14873   ins_pipe(pipe_class_memory);
14874 %}
14875 
14876 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14877                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14878                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14879 %{
14880   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
14881   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14882   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14883          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14884 
14885   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14886   ins_encode %{
14887     __ string_compare($str1$$Register, $str2$$Register,
14888                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14889                       $tmp1$$Register, $tmp2$$Register,
14890                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14891                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
14892   %}
14893   ins_pipe(pipe_class_memory);
14894 %}
14895 
14896 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14897        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14898        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14899 %{
14900   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14901   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14902   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14903          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14904   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
14905 
14906   ins_encode %{
14907     __ string_indexof($str1$$Register, $str2$$Register,
14908                       $cnt1$$Register, $cnt2$$Register,
14909                       $tmp1$$Register, $tmp2$$Register,
14910                       $tmp3$$Register, $tmp4$$Register,
14911                       $tmp5$$Register, $tmp6$$Register,
14912                       -1, $result$$Register, StrIntrinsicNode::UU);
14913   %}
14914   ins_pipe(pipe_class_memory);
14915 %}
14916 
14917 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14918        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14919        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14920 %{
14921   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
14922   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14923   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14924          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14925   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
14926 
14927   ins_encode %{
14928     __ string_indexof($str1$$Register, $str2$$Register,
14929                       $cnt1$$Register, $cnt2$$Register,
14930                       $tmp1$$Register, $tmp2$$Register,
14931                       $tmp3$$Register, $tmp4$$Register,
14932                       $tmp5$$Register, $tmp6$$Register,
14933                       -1, $result$$Register, StrIntrinsicNode::LL);
14934   %}
14935   ins_pipe(pipe_class_memory);
14936 %}
14937 
14938 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14939        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14940        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14941 %{
14942   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
14943   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14944   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14945          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14946   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
14947 
14948   ins_encode %{
14949     __ string_indexof($str1$$Register, $str2$$Register,
14950                       $cnt1$$Register, $cnt2$$Register,
14951                       $tmp1$$Register, $tmp2$$Register,
14952                       $tmp3$$Register, $tmp4$$Register,
14953                       $tmp5$$Register, $tmp6$$Register,
14954                       -1, $result$$Register, StrIntrinsicNode::UL);
14955   %}
14956   ins_pipe(pipe_class_memory);
14957 %}
14958 
14959 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14960                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14961                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14962 %{
14963   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14964   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14965   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14966          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14967   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
14968 
14969   ins_encode %{
14970     int icnt2 = (int)$int_cnt2$$constant;
14971     __ string_indexof($str1$$Register, $str2$$Register,
14972                       $cnt1$$Register, zr,
14973                       $tmp1$$Register, $tmp2$$Register,
14974                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14975                       icnt2, $result$$Register, StrIntrinsicNode::UU);
14976   %}
14977   ins_pipe(pipe_class_memory);
14978 %}
14979 
14980 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14981                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14982                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14983 %{
14984   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
14985   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14986   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14987          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14988   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
14989 
14990   ins_encode %{
14991     int icnt2 = (int)$int_cnt2$$constant;
14992     __ string_indexof($str1$$Register, $str2$$Register,
14993                       $cnt1$$Register, zr,
14994                       $tmp1$$Register, $tmp2$$Register,
14995                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14996                       icnt2, $result$$Register, StrIntrinsicNode::LL);
14997   %}
14998   ins_pipe(pipe_class_memory);
14999 %}
15000 
15001 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15002                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15003                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15004 %{
15005   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15006   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15007   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15008          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15009   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15010 
15011   ins_encode %{
15012     int icnt2 = (int)$int_cnt2$$constant;
15013     __ string_indexof($str1$$Register, $str2$$Register,
15014                       $cnt1$$Register, zr,
15015                       $tmp1$$Register, $tmp2$$Register,
15016                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15017                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15018   %}
15019   ins_pipe(pipe_class_memory);
15020 %}
15021 
15022 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15023                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15024                               iRegINoSp tmp3, rFlagsReg cr)
15025 %{
15026   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15027   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15028          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15029 
15030   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15031 
15032   ins_encode %{
15033     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15034                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15035                            $tmp3$$Register);
15036   %}
15037   ins_pipe(pipe_class_memory);
15038 %}
15039 
15040 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15041                         iRegI_R0 result, rFlagsReg cr)
15042 %{
15043   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15044   match(Set result (StrEquals (Binary str1 str2) cnt));
15045   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15046 
15047   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15048   ins_encode %{
15049     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15050     __ string_equals($str1$$Register, $str2$$Register,
15051                      $result$$Register, $cnt$$Register, 1);
15052   %}
15053   ins_pipe(pipe_class_memory);
15054 %}
15055 
15056 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15057                         iRegI_R0 result, rFlagsReg cr)
15058 %{
15059   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15060   match(Set result (StrEquals (Binary str1 str2) cnt));
15061   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15062 
15063   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15064   ins_encode %{
15065     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15066     __ string_equals($str1$$Register, $str2$$Register,
15067                      $result$$Register, $cnt$$Register, 2);
15068   %}
15069   ins_pipe(pipe_class_memory);
15070 %}
15071 
15072 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15073                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15074                        iRegP_R10 tmp, rFlagsReg cr)
15075 %{
15076   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15077   match(Set result (AryEq ary1 ary2));
15078   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15079 
15080   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15081   ins_encode %{
15082     __ arrays_equals($ary1$$Register, $ary2$$Register,
15083                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15084                      $result$$Register, $tmp$$Register, 1);
15085     %}
15086   ins_pipe(pipe_class_memory);
15087 %}
15088 
15089 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15090                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15091                        iRegP_R10 tmp, rFlagsReg cr)
15092 %{
15093   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15094   match(Set result (AryEq ary1 ary2));
15095   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15096 
15097   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15098   ins_encode %{
15099     __ arrays_equals($ary1$$Register, $ary2$$Register,
15100                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15101                      $result$$Register, $tmp$$Register, 2);
15102   %}
15103   ins_pipe(pipe_class_memory);
15104 %}
15105 
15106 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15107 %{
15108   match(Set result (HasNegatives ary1 len));
15109   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15110   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15111   ins_encode %{
15112     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15113   %}
15114   ins_pipe( pipe_slow );
15115 %}
15116 
15117 // fast char[] to byte[] compression
15118 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15119                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15120                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15121                          iRegI_R0 result, rFlagsReg cr)
15122 %{
15123   match(Set result (StrCompressedCopy src (Binary dst len)));
15124   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15125 
15126   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15127   ins_encode %{
15128     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15129                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15130                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15131                            $result$$Register);
15132   %}
15133   ins_pipe( pipe_slow );
15134 %}
15135 
15136 // fast byte[] to char[] inflation
15137 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15138                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15139 %{
15140   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15141   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15142 
15143   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15144   ins_encode %{
15145     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15146                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15147   %}
15148   ins_pipe(pipe_class_memory);
15149 %}
15150 
15151 // encode char[] to byte[] in ISO_8859_1
15152 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15153                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15154                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15155                           iRegI_R0 result, rFlagsReg cr)
15156 %{
15157   match(Set result (EncodeISOArray src (Binary dst len)));
15158   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15159          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15160 
15161   format %{ "Encode array $src,$dst,$len -> $result" %}
15162   ins_encode %{
15163     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15164          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15165          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15166   %}
15167   ins_pipe( pipe_class_memory );
15168 %}
15169 
15170 // ============================================================================
15171 // This name is KNOWN by the ADLC and cannot be changed.
15172 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15173 // for this guy.
15174 instruct tlsLoadP(thread_RegP dst)
15175 %{
15176   match(Set dst (ThreadLocal));
15177 
15178   ins_cost(0);
15179 
15180   format %{ " -- \t// $dst=Thread::current(), empty" %}
15181 
15182   size(0);
15183 
15184   ins_encode( /*empty*/ );
15185 
15186   ins_pipe(pipe_class_empty);
15187 %}
15188 
15189 // ====================VECTOR INSTRUCTIONS=====================================
15190 
15191 // Load vector (32 bits)
15192 instruct loadV4(vecD dst, vmem4 mem)
15193 %{
15194   predicate(n->as_LoadVector()->memory_size() == 4);
15195   match(Set dst (LoadVector mem));
15196   ins_cost(4 * INSN_COST);
15197   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15198   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15199   ins_pipe(vload_reg_mem64);
15200 %}
15201 
15202 // Load vector (64 bits)
15203 instruct loadV8(vecD dst, vmem8 mem)
15204 %{
15205   predicate(n->as_LoadVector()->memory_size() == 8);
15206   match(Set dst (LoadVector mem));
15207   ins_cost(4 * INSN_COST);
15208   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15209   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15210   ins_pipe(vload_reg_mem64);
15211 %}
15212 
15213 // Load Vector (128 bits)
15214 instruct loadV16(vecX dst, vmem16 mem)
15215 %{
15216   predicate(n->as_LoadVector()->memory_size() == 16);
15217   match(Set dst (LoadVector mem));
15218   ins_cost(4 * INSN_COST);
15219   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15220   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15221   ins_pipe(vload_reg_mem128);
15222 %}
15223 
15224 // Store Vector (32 bits)
15225 instruct storeV4(vecD src, vmem4 mem)
15226 %{
15227   predicate(n->as_StoreVector()->memory_size() == 4);
15228   match(Set mem (StoreVector mem src));
15229   ins_cost(4 * INSN_COST);
15230   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15231   ins_encode( aarch64_enc_strvS(src, mem) );
15232   ins_pipe(vstore_reg_mem64);
15233 %}
15234 
15235 // Store Vector (64 bits)
15236 instruct storeV8(vecD src, vmem8 mem)
15237 %{
15238   predicate(n->as_StoreVector()->memory_size() == 8);
15239   match(Set mem (StoreVector mem src));
15240   ins_cost(4 * INSN_COST);
15241   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15242   ins_encode( aarch64_enc_strvD(src, mem) );
15243   ins_pipe(vstore_reg_mem64);
15244 %}
15245 
15246 // Store Vector (128 bits)
15247 instruct storeV16(vecX src, vmem16 mem)
15248 %{
15249   predicate(n->as_StoreVector()->memory_size() == 16);
15250   match(Set mem (StoreVector mem src));
15251   ins_cost(4 * INSN_COST);
15252   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15253   ins_encode( aarch64_enc_strvQ(src, mem) );
15254   ins_pipe(vstore_reg_mem128);
15255 %}
15256 
15257 instruct replicate8B(vecD dst, iRegIorL2I src)
15258 %{
15259   predicate(n->as_Vector()->length() == 4 ||
15260             n->as_Vector()->length() == 8);
15261   match(Set dst (ReplicateB src));
15262   ins_cost(INSN_COST);
15263   format %{ "dup  $dst, $src\t# vector (8B)" %}
15264   ins_encode %{
15265     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15266   %}
15267   ins_pipe(vdup_reg_reg64);
15268 %}
15269 
15270 instruct replicate16B(vecX dst, iRegIorL2I src)
15271 %{
15272   predicate(n->as_Vector()->length() == 16);
15273   match(Set dst (ReplicateB src));
15274   ins_cost(INSN_COST);
15275   format %{ "dup  $dst, $src\t# vector (16B)" %}
15276   ins_encode %{
15277     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15278   %}
15279   ins_pipe(vdup_reg_reg128);
15280 %}
15281 
15282 instruct replicate8B_imm(vecD dst, immI con)
15283 %{
15284   predicate(n->as_Vector()->length() == 4 ||
15285             n->as_Vector()->length() == 8);
15286   match(Set dst (ReplicateB con));
15287   ins_cost(INSN_COST);
15288   format %{ "movi  $dst, $con\t# vector(8B)" %}
15289   ins_encode %{
15290     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15291   %}
15292   ins_pipe(vmovi_reg_imm64);
15293 %}
15294 
15295 instruct replicate16B_imm(vecX dst, immI con)
15296 %{
15297   predicate(n->as_Vector()->length() == 16);
15298   match(Set dst (ReplicateB con));
15299   ins_cost(INSN_COST);
15300   format %{ "movi  $dst, $con\t# vector(16B)" %}
15301   ins_encode %{
15302     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15303   %}
15304   ins_pipe(vmovi_reg_imm128);
15305 %}
15306 
15307 instruct replicate4S(vecD dst, iRegIorL2I src)
15308 %{
15309   predicate(n->as_Vector()->length() == 2 ||
15310             n->as_Vector()->length() == 4);
15311   match(Set dst (ReplicateS src));
15312   ins_cost(INSN_COST);
15313   format %{ "dup  $dst, $src\t# vector (4S)" %}
15314   ins_encode %{
15315     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15316   %}
15317   ins_pipe(vdup_reg_reg64);
15318 %}
15319 
15320 instruct replicate8S(vecX dst, iRegIorL2I src)
15321 %{
15322   predicate(n->as_Vector()->length() == 8);
15323   match(Set dst (ReplicateS src));
15324   ins_cost(INSN_COST);
15325   format %{ "dup  $dst, $src\t# vector (8S)" %}
15326   ins_encode %{
15327     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15328   %}
15329   ins_pipe(vdup_reg_reg128);
15330 %}
15331 
15332 instruct replicate4S_imm(vecD dst, immI con)
15333 %{
15334   predicate(n->as_Vector()->length() == 2 ||
15335             n->as_Vector()->length() == 4);
15336   match(Set dst (ReplicateS con));
15337   ins_cost(INSN_COST);
15338   format %{ "movi  $dst, $con\t# vector(4H)" %}
15339   ins_encode %{
15340     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15341   %}
15342   ins_pipe(vmovi_reg_imm64);
15343 %}
15344 
15345 instruct replicate8S_imm(vecX dst, immI con)
15346 %{
15347   predicate(n->as_Vector()->length() == 8);
15348   match(Set dst (ReplicateS con));
15349   ins_cost(INSN_COST);
15350   format %{ "movi  $dst, $con\t# vector(8H)" %}
15351   ins_encode %{
15352     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15353   %}
15354   ins_pipe(vmovi_reg_imm128);
15355 %}
15356 
15357 instruct replicate2I(vecD dst, iRegIorL2I src)
15358 %{
15359   predicate(n->as_Vector()->length() == 2);
15360   match(Set dst (ReplicateI src));
15361   ins_cost(INSN_COST);
15362   format %{ "dup  $dst, $src\t# vector (2I)" %}
15363   ins_encode %{
15364     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15365   %}
15366   ins_pipe(vdup_reg_reg64);
15367 %}
15368 
15369 instruct replicate4I(vecX dst, iRegIorL2I src)
15370 %{
15371   predicate(n->as_Vector()->length() == 4);
15372   match(Set dst (ReplicateI src));
15373   ins_cost(INSN_COST);
15374   format %{ "dup  $dst, $src\t# vector (4I)" %}
15375   ins_encode %{
15376     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15377   %}
15378   ins_pipe(vdup_reg_reg128);
15379 %}
15380 
15381 instruct replicate2I_imm(vecD dst, immI con)
15382 %{
15383   predicate(n->as_Vector()->length() == 2);
15384   match(Set dst (ReplicateI con));
15385   ins_cost(INSN_COST);
15386   format %{ "movi  $dst, $con\t# vector(2I)" %}
15387   ins_encode %{
15388     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15389   %}
15390   ins_pipe(vmovi_reg_imm64);
15391 %}
15392 
15393 instruct replicate4I_imm(vecX dst, immI con)
15394 %{
15395   predicate(n->as_Vector()->length() == 4);
15396   match(Set dst (ReplicateI con));
15397   ins_cost(INSN_COST);
15398   format %{ "movi  $dst, $con\t# vector(4I)" %}
15399   ins_encode %{
15400     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15401   %}
15402   ins_pipe(vmovi_reg_imm128);
15403 %}
15404 
15405 instruct replicate2L(vecX dst, iRegL src)
15406 %{
15407   predicate(n->as_Vector()->length() == 2);
15408   match(Set dst (ReplicateL src));
15409   ins_cost(INSN_COST);
15410   format %{ "dup  $dst, $src\t# vector (2L)" %}
15411   ins_encode %{
15412     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15413   %}
15414   ins_pipe(vdup_reg_reg128);
15415 %}
15416 
15417 instruct replicate2L_zero(vecX dst, immI0 zero)
15418 %{
15419   predicate(n->as_Vector()->length() == 2);
15420   match(Set dst (ReplicateI zero));
15421   ins_cost(INSN_COST);
15422   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15423   ins_encode %{
15424     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15425            as_FloatRegister($dst$$reg),
15426            as_FloatRegister($dst$$reg));
15427   %}
15428   ins_pipe(vmovi_reg_imm128);
15429 %}
15430 
15431 instruct replicate2F(vecD dst, vRegF src)
15432 %{
15433   predicate(n->as_Vector()->length() == 2);
15434   match(Set dst (ReplicateF src));
15435   ins_cost(INSN_COST);
15436   format %{ "dup  $dst, $src\t# vector (2F)" %}
15437   ins_encode %{
15438     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15439            as_FloatRegister($src$$reg));
15440   %}
15441   ins_pipe(vdup_reg_freg64);
15442 %}
15443 
15444 instruct replicate4F(vecX dst, vRegF src)
15445 %{
15446   predicate(n->as_Vector()->length() == 4);
15447   match(Set dst (ReplicateF src));
15448   ins_cost(INSN_COST);
15449   format %{ "dup  $dst, $src\t# vector (4F)" %}
15450   ins_encode %{
15451     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15452            as_FloatRegister($src$$reg));
15453   %}
15454   ins_pipe(vdup_reg_freg128);
15455 %}
15456 
15457 instruct replicate2D(vecX dst, vRegD src)
15458 %{
15459   predicate(n->as_Vector()->length() == 2);
15460   match(Set dst (ReplicateD src));
15461   ins_cost(INSN_COST);
15462   format %{ "dup  $dst, $src\t# vector (2D)" %}
15463   ins_encode %{
15464     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15465            as_FloatRegister($src$$reg));
15466   %}
15467   ins_pipe(vdup_reg_dreg128);
15468 %}
15469 
15470 // ====================REDUCTION ARITHMETIC====================================
15471 
15472 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15473 %{
15474   match(Set dst (AddReductionVI src1 src2));
15475   ins_cost(INSN_COST);
15476   effect(TEMP tmp, TEMP tmp2);
15477   format %{ "umov  $tmp, $src2, S, 0\n\t"
15478             "umov  $tmp2, $src2, S, 1\n\t"
15479             "addw  $dst, $src1, $tmp\n\t"
15480             "addw  $dst, $dst, $tmp2\t add reduction2i"
15481   %}
15482   ins_encode %{
15483     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15484     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15485     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15486     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15487   %}
15488   ins_pipe(pipe_class_default);
15489 %}
15490 
15491 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15492 %{
15493   match(Set dst (AddReductionVI src1 src2));
15494   ins_cost(INSN_COST);
15495   effect(TEMP tmp, TEMP tmp2);
15496   format %{ "addv  $tmp, T4S, $src2\n\t"
15497             "umov  $tmp2, $tmp, S, 0\n\t"
15498             "addw  $dst, $tmp2, $src1\t add reduction4i"
15499   %}
15500   ins_encode %{
15501     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15502             as_FloatRegister($src2$$reg));
15503     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15504     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15505   %}
15506   ins_pipe(pipe_class_default);
15507 %}
15508 
15509 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15510 %{
15511   match(Set dst (MulReductionVI src1 src2));
15512   ins_cost(INSN_COST);
15513   effect(TEMP tmp, TEMP dst);
15514   format %{ "umov  $tmp, $src2, S, 0\n\t"
15515             "mul   $dst, $tmp, $src1\n\t"
15516             "umov  $tmp, $src2, S, 1\n\t"
15517             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15518   %}
15519   ins_encode %{
15520     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15521     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15522     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15523     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15524   %}
15525   ins_pipe(pipe_class_default);
15526 %}
15527 
15528 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15529 %{
15530   match(Set dst (MulReductionVI src1 src2));
15531   ins_cost(INSN_COST);
15532   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15533   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15534             "mul   $tmp, $tmp, $src2\n\t"
15535             "umov  $tmp2, $tmp, S, 0\n\t"
15536             "mul   $dst, $tmp2, $src1\n\t"
15537             "umov  $tmp2, $tmp, S, 1\n\t"
15538             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15539   %}
15540   ins_encode %{
15541     __ ins(as_FloatRegister($tmp$$reg), __ D,
15542            as_FloatRegister($src2$$reg), 0, 1);
15543     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15544            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15545     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15546     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15547     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15548     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15549   %}
15550   ins_pipe(pipe_class_default);
15551 %}
15552 
15553 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15554 %{
15555   match(Set dst (AddReductionVF src1 src2));
15556   ins_cost(INSN_COST);
15557   effect(TEMP tmp, TEMP dst);
15558   format %{ "fadds $dst, $src1, $src2\n\t"
15559             "ins   $tmp, S, $src2, 0, 1\n\t"
15560             "fadds $dst, $dst, $tmp\t add reduction2f"
15561   %}
15562   ins_encode %{
15563     __ fadds(as_FloatRegister($dst$$reg),
15564              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15565     __ ins(as_FloatRegister($tmp$$reg), __ S,
15566            as_FloatRegister($src2$$reg), 0, 1);
15567     __ fadds(as_FloatRegister($dst$$reg),
15568              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15569   %}
15570   ins_pipe(pipe_class_default);
15571 %}
15572 
15573 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15574 %{
15575   match(Set dst (AddReductionVF src1 src2));
15576   ins_cost(INSN_COST);
15577   effect(TEMP tmp, TEMP dst);
15578   format %{ "fadds $dst, $src1, $src2\n\t"
15579             "ins   $tmp, S, $src2, 0, 1\n\t"
15580             "fadds $dst, $dst, $tmp\n\t"
15581             "ins   $tmp, S, $src2, 0, 2\n\t"
15582             "fadds $dst, $dst, $tmp\n\t"
15583             "ins   $tmp, S, $src2, 0, 3\n\t"
15584             "fadds $dst, $dst, $tmp\t add reduction4f"
15585   %}
15586   ins_encode %{
15587     __ fadds(as_FloatRegister($dst$$reg),
15588              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15589     __ ins(as_FloatRegister($tmp$$reg), __ S,
15590            as_FloatRegister($src2$$reg), 0, 1);
15591     __ fadds(as_FloatRegister($dst$$reg),
15592              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15593     __ ins(as_FloatRegister($tmp$$reg), __ S,
15594            as_FloatRegister($src2$$reg), 0, 2);
15595     __ fadds(as_FloatRegister($dst$$reg),
15596              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15597     __ ins(as_FloatRegister($tmp$$reg), __ S,
15598            as_FloatRegister($src2$$reg), 0, 3);
15599     __ fadds(as_FloatRegister($dst$$reg),
15600              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15601   %}
15602   ins_pipe(pipe_class_default);
15603 %}
15604 
15605 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15606 %{
15607   match(Set dst (MulReductionVF src1 src2));
15608   ins_cost(INSN_COST);
15609   effect(TEMP tmp, TEMP dst);
15610   format %{ "fmuls $dst, $src1, $src2\n\t"
15611             "ins   $tmp, S, $src2, 0, 1\n\t"
15612             "fmuls $dst, $dst, $tmp\t add reduction4f"
15613   %}
15614   ins_encode %{
15615     __ fmuls(as_FloatRegister($dst$$reg),
15616              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15617     __ ins(as_FloatRegister($tmp$$reg), __ S,
15618            as_FloatRegister($src2$$reg), 0, 1);
15619     __ fmuls(as_FloatRegister($dst$$reg),
15620              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15621   %}
15622   ins_pipe(pipe_class_default);
15623 %}
15624 
15625 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15626 %{
15627   match(Set dst (MulReductionVF src1 src2));
15628   ins_cost(INSN_COST);
15629   effect(TEMP tmp, TEMP dst);
15630   format %{ "fmuls $dst, $src1, $src2\n\t"
15631             "ins   $tmp, S, $src2, 0, 1\n\t"
15632             "fmuls $dst, $dst, $tmp\n\t"
15633             "ins   $tmp, S, $src2, 0, 2\n\t"
15634             "fmuls $dst, $dst, $tmp\n\t"
15635             "ins   $tmp, S, $src2, 0, 3\n\t"
15636             "fmuls $dst, $dst, $tmp\t add reduction4f"
15637   %}
15638   ins_encode %{
15639     __ fmuls(as_FloatRegister($dst$$reg),
15640              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15641     __ ins(as_FloatRegister($tmp$$reg), __ S,
15642            as_FloatRegister($src2$$reg), 0, 1);
15643     __ fmuls(as_FloatRegister($dst$$reg),
15644              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15645     __ ins(as_FloatRegister($tmp$$reg), __ S,
15646            as_FloatRegister($src2$$reg), 0, 2);
15647     __ fmuls(as_FloatRegister($dst$$reg),
15648              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15649     __ ins(as_FloatRegister($tmp$$reg), __ S,
15650            as_FloatRegister($src2$$reg), 0, 3);
15651     __ fmuls(as_FloatRegister($dst$$reg),
15652              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15653   %}
15654   ins_pipe(pipe_class_default);
15655 %}
15656 
15657 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15658 %{
15659   match(Set dst (AddReductionVD src1 src2));
15660   ins_cost(INSN_COST);
15661   effect(TEMP tmp, TEMP dst);
15662   format %{ "faddd $dst, $src1, $src2\n\t"
15663             "ins   $tmp, D, $src2, 0, 1\n\t"
15664             "faddd $dst, $dst, $tmp\t add reduction2d"
15665   %}
15666   ins_encode %{
15667     __ faddd(as_FloatRegister($dst$$reg),
15668              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15669     __ ins(as_FloatRegister($tmp$$reg), __ D,
15670            as_FloatRegister($src2$$reg), 0, 1);
15671     __ faddd(as_FloatRegister($dst$$reg),
15672              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15673   %}
15674   ins_pipe(pipe_class_default);
15675 %}
15676 
15677 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15678 %{
15679   match(Set dst (MulReductionVD src1 src2));
15680   ins_cost(INSN_COST);
15681   effect(TEMP tmp, TEMP dst);
15682   format %{ "fmuld $dst, $src1, $src2\n\t"
15683             "ins   $tmp, D, $src2, 0, 1\n\t"
15684             "fmuld $dst, $dst, $tmp\t add reduction2d"
15685   %}
15686   ins_encode %{
15687     __ fmuld(as_FloatRegister($dst$$reg),
15688              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15689     __ ins(as_FloatRegister($tmp$$reg), __ D,
15690            as_FloatRegister($src2$$reg), 0, 1);
15691     __ fmuld(as_FloatRegister($dst$$reg),
15692              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15693   %}
15694   ins_pipe(pipe_class_default);
15695 %}
15696 
15697 // ====================VECTOR ARITHMETIC=======================================
15698 
15699 // --------------------------------- ADD --------------------------------------
15700 
15701 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15702 %{
15703   predicate(n->as_Vector()->length() == 4 ||
15704             n->as_Vector()->length() == 8);
15705   match(Set dst (AddVB src1 src2));
15706   ins_cost(INSN_COST);
15707   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15708   ins_encode %{
15709     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15710             as_FloatRegister($src1$$reg),
15711             as_FloatRegister($src2$$reg));
15712   %}
15713   ins_pipe(vdop64);
15714 %}
15715 
15716 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15717 %{
15718   predicate(n->as_Vector()->length() == 16);
15719   match(Set dst (AddVB src1 src2));
15720   ins_cost(INSN_COST);
15721   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15722   ins_encode %{
15723     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15724             as_FloatRegister($src1$$reg),
15725             as_FloatRegister($src2$$reg));
15726   %}
15727   ins_pipe(vdop128);
15728 %}
15729 
15730 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15731 %{
15732   predicate(n->as_Vector()->length() == 2 ||
15733             n->as_Vector()->length() == 4);
15734   match(Set dst (AddVS src1 src2));
15735   ins_cost(INSN_COST);
15736   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15737   ins_encode %{
15738     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15739             as_FloatRegister($src1$$reg),
15740             as_FloatRegister($src2$$reg));
15741   %}
15742   ins_pipe(vdop64);
15743 %}
15744 
15745 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15746 %{
15747   predicate(n->as_Vector()->length() == 8);
15748   match(Set dst (AddVS src1 src2));
15749   ins_cost(INSN_COST);
15750   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15751   ins_encode %{
15752     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15753             as_FloatRegister($src1$$reg),
15754             as_FloatRegister($src2$$reg));
15755   %}
15756   ins_pipe(vdop128);
15757 %}
15758 
15759 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15760 %{
15761   predicate(n->as_Vector()->length() == 2);
15762   match(Set dst (AddVI src1 src2));
15763   ins_cost(INSN_COST);
15764   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15765   ins_encode %{
15766     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15767             as_FloatRegister($src1$$reg),
15768             as_FloatRegister($src2$$reg));
15769   %}
15770   ins_pipe(vdop64);
15771 %}
15772 
15773 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15774 %{
15775   predicate(n->as_Vector()->length() == 4);
15776   match(Set dst (AddVI src1 src2));
15777   ins_cost(INSN_COST);
15778   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15779   ins_encode %{
15780     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15781             as_FloatRegister($src1$$reg),
15782             as_FloatRegister($src2$$reg));
15783   %}
15784   ins_pipe(vdop128);
15785 %}
15786 
15787 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15788 %{
15789   predicate(n->as_Vector()->length() == 2);
15790   match(Set dst (AddVL src1 src2));
15791   ins_cost(INSN_COST);
15792   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15793   ins_encode %{
15794     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15795             as_FloatRegister($src1$$reg),
15796             as_FloatRegister($src2$$reg));
15797   %}
15798   ins_pipe(vdop128);
15799 %}
15800 
15801 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15802 %{
15803   predicate(n->as_Vector()->length() == 2);
15804   match(Set dst (AddVF src1 src2));
15805   ins_cost(INSN_COST);
15806   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15807   ins_encode %{
15808     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15809             as_FloatRegister($src1$$reg),
15810             as_FloatRegister($src2$$reg));
15811   %}
15812   ins_pipe(vdop_fp64);
15813 %}
15814 
15815 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15816 %{
15817   predicate(n->as_Vector()->length() == 4);
15818   match(Set dst (AddVF src1 src2));
15819   ins_cost(INSN_COST);
15820   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15821   ins_encode %{
15822     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15823             as_FloatRegister($src1$$reg),
15824             as_FloatRegister($src2$$reg));
15825   %}
15826   ins_pipe(vdop_fp128);
15827 %}
15828 
15829 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15830 %{
15831   match(Set dst (AddVD src1 src2));
15832   ins_cost(INSN_COST);
15833   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15834   ins_encode %{
15835     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15836             as_FloatRegister($src1$$reg),
15837             as_FloatRegister($src2$$reg));
15838   %}
15839   ins_pipe(vdop_fp128);
15840 %}
15841 
15842 // --------------------------------- SUB --------------------------------------
15843 
15844 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15845 %{
15846   predicate(n->as_Vector()->length() == 4 ||
15847             n->as_Vector()->length() == 8);
15848   match(Set dst (SubVB src1 src2));
15849   ins_cost(INSN_COST);
15850   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15851   ins_encode %{
15852     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15853             as_FloatRegister($src1$$reg),
15854             as_FloatRegister($src2$$reg));
15855   %}
15856   ins_pipe(vdop64);
15857 %}
15858 
15859 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15860 %{
15861   predicate(n->as_Vector()->length() == 16);
15862   match(Set dst (SubVB src1 src2));
15863   ins_cost(INSN_COST);
15864   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15865   ins_encode %{
15866     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15867             as_FloatRegister($src1$$reg),
15868             as_FloatRegister($src2$$reg));
15869   %}
15870   ins_pipe(vdop128);
15871 %}
15872 
15873 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15874 %{
15875   predicate(n->as_Vector()->length() == 2 ||
15876             n->as_Vector()->length() == 4);
15877   match(Set dst (SubVS src1 src2));
15878   ins_cost(INSN_COST);
15879   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15880   ins_encode %{
15881     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15882             as_FloatRegister($src1$$reg),
15883             as_FloatRegister($src2$$reg));
15884   %}
15885   ins_pipe(vdop64);
15886 %}
15887 
15888 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15889 %{
15890   predicate(n->as_Vector()->length() == 8);
15891   match(Set dst (SubVS src1 src2));
15892   ins_cost(INSN_COST);
15893   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15894   ins_encode %{
15895     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15896             as_FloatRegister($src1$$reg),
15897             as_FloatRegister($src2$$reg));
15898   %}
15899   ins_pipe(vdop128);
15900 %}
15901 
15902 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15903 %{
15904   predicate(n->as_Vector()->length() == 2);
15905   match(Set dst (SubVI src1 src2));
15906   ins_cost(INSN_COST);
15907   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15908   ins_encode %{
15909     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15910             as_FloatRegister($src1$$reg),
15911             as_FloatRegister($src2$$reg));
15912   %}
15913   ins_pipe(vdop64);
15914 %}
15915 
15916 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15917 %{
15918   predicate(n->as_Vector()->length() == 4);
15919   match(Set dst (SubVI src1 src2));
15920   ins_cost(INSN_COST);
15921   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15922   ins_encode %{
15923     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15924             as_FloatRegister($src1$$reg),
15925             as_FloatRegister($src2$$reg));
15926   %}
15927   ins_pipe(vdop128);
15928 %}
15929 
15930 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15931 %{
15932   predicate(n->as_Vector()->length() == 2);
15933   match(Set dst (SubVL src1 src2));
15934   ins_cost(INSN_COST);
15935   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15936   ins_encode %{
15937     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15938             as_FloatRegister($src1$$reg),
15939             as_FloatRegister($src2$$reg));
15940   %}
15941   ins_pipe(vdop128);
15942 %}
15943 
15944 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15945 %{
15946   predicate(n->as_Vector()->length() == 2);
15947   match(Set dst (SubVF src1 src2));
15948   ins_cost(INSN_COST);
15949   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15950   ins_encode %{
15951     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15952             as_FloatRegister($src1$$reg),
15953             as_FloatRegister($src2$$reg));
15954   %}
15955   ins_pipe(vdop_fp64);
15956 %}
15957 
15958 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15959 %{
15960   predicate(n->as_Vector()->length() == 4);
15961   match(Set dst (SubVF src1 src2));
15962   ins_cost(INSN_COST);
15963   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15964   ins_encode %{
15965     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15966             as_FloatRegister($src1$$reg),
15967             as_FloatRegister($src2$$reg));
15968   %}
15969   ins_pipe(vdop_fp128);
15970 %}
15971 
15972 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15973 %{
15974   predicate(n->as_Vector()->length() == 2);
15975   match(Set dst (SubVD src1 src2));
15976   ins_cost(INSN_COST);
15977   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15978   ins_encode %{
15979     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15980             as_FloatRegister($src1$$reg),
15981             as_FloatRegister($src2$$reg));
15982   %}
15983   ins_pipe(vdop_fp128);
15984 %}
15985 
15986 // --------------------------------- MUL --------------------------------------
15987 
15988 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15989 %{
15990   predicate(n->as_Vector()->length() == 2 ||
15991             n->as_Vector()->length() == 4);
15992   match(Set dst (MulVS src1 src2));
15993   ins_cost(INSN_COST);
15994   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15995   ins_encode %{
15996     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15997             as_FloatRegister($src1$$reg),
15998             as_FloatRegister($src2$$reg));
15999   %}
16000   ins_pipe(vmul64);
16001 %}
16002 
16003 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16004 %{
16005   predicate(n->as_Vector()->length() == 8);
16006   match(Set dst (MulVS src1 src2));
16007   ins_cost(INSN_COST);
16008   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16009   ins_encode %{
16010     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16011             as_FloatRegister($src1$$reg),
16012             as_FloatRegister($src2$$reg));
16013   %}
16014   ins_pipe(vmul128);
16015 %}
16016 
16017 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16018 %{
16019   predicate(n->as_Vector()->length() == 2);
16020   match(Set dst (MulVI src1 src2));
16021   ins_cost(INSN_COST);
16022   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16023   ins_encode %{
16024     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16025             as_FloatRegister($src1$$reg),
16026             as_FloatRegister($src2$$reg));
16027   %}
16028   ins_pipe(vmul64);
16029 %}
16030 
16031 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16032 %{
16033   predicate(n->as_Vector()->length() == 4);
16034   match(Set dst (MulVI src1 src2));
16035   ins_cost(INSN_COST);
16036   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16037   ins_encode %{
16038     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16039             as_FloatRegister($src1$$reg),
16040             as_FloatRegister($src2$$reg));
16041   %}
16042   ins_pipe(vmul128);
16043 %}
16044 
16045 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16046 %{
16047   predicate(n->as_Vector()->length() == 2);
16048   match(Set dst (MulVF src1 src2));
16049   ins_cost(INSN_COST);
16050   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16051   ins_encode %{
16052     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16053             as_FloatRegister($src1$$reg),
16054             as_FloatRegister($src2$$reg));
16055   %}
16056   ins_pipe(vmuldiv_fp64);
16057 %}
16058 
16059 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16060 %{
16061   predicate(n->as_Vector()->length() == 4);
16062   match(Set dst (MulVF src1 src2));
16063   ins_cost(INSN_COST);
16064   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16065   ins_encode %{
16066     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16067             as_FloatRegister($src1$$reg),
16068             as_FloatRegister($src2$$reg));
16069   %}
16070   ins_pipe(vmuldiv_fp128);
16071 %}
16072 
16073 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16074 %{
16075   predicate(n->as_Vector()->length() == 2);
16076   match(Set dst (MulVD src1 src2));
16077   ins_cost(INSN_COST);
16078   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16079   ins_encode %{
16080     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16081             as_FloatRegister($src1$$reg),
16082             as_FloatRegister($src2$$reg));
16083   %}
16084   ins_pipe(vmuldiv_fp128);
16085 %}
16086 
16087 // --------------------------------- MLA --------------------------------------
16088 
16089 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16090 %{
16091   predicate(n->as_Vector()->length() == 2 ||
16092             n->as_Vector()->length() == 4);
16093   match(Set dst (AddVS dst (MulVS src1 src2)));
16094   ins_cost(INSN_COST);
16095   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16096   ins_encode %{
16097     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16098             as_FloatRegister($src1$$reg),
16099             as_FloatRegister($src2$$reg));
16100   %}
16101   ins_pipe(vmla64);
16102 %}
16103 
16104 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16105 %{
16106   predicate(n->as_Vector()->length() == 8);
16107   match(Set dst (AddVS dst (MulVS src1 src2)));
16108   ins_cost(INSN_COST);
16109   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16110   ins_encode %{
16111     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16112             as_FloatRegister($src1$$reg),
16113             as_FloatRegister($src2$$reg));
16114   %}
16115   ins_pipe(vmla128);
16116 %}
16117 
16118 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16119 %{
16120   predicate(n->as_Vector()->length() == 2);
16121   match(Set dst (AddVI dst (MulVI src1 src2)));
16122   ins_cost(INSN_COST);
16123   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16124   ins_encode %{
16125     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16126             as_FloatRegister($src1$$reg),
16127             as_FloatRegister($src2$$reg));
16128   %}
16129   ins_pipe(vmla64);
16130 %}
16131 
16132 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16133 %{
16134   predicate(n->as_Vector()->length() == 4);
16135   match(Set dst (AddVI dst (MulVI src1 src2)));
16136   ins_cost(INSN_COST);
16137   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16138   ins_encode %{
16139     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16140             as_FloatRegister($src1$$reg),
16141             as_FloatRegister($src2$$reg));
16142   %}
16143   ins_pipe(vmla128);
16144 %}
16145 
16146 // dst + src1 * src2
16147 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16148   predicate(UseFMA && n->as_Vector()->length() == 2);
16149   match(Set dst (FmaVF  dst (Binary src1 src2)));
16150   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16151   ins_cost(INSN_COST);
16152   ins_encode %{
16153     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16154             as_FloatRegister($src1$$reg),
16155             as_FloatRegister($src2$$reg));
16156   %}
16157   ins_pipe(vmuldiv_fp64);
16158 %}
16159 
16160 // dst + src1 * src2
16161 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16162   predicate(UseFMA && n->as_Vector()->length() == 4);
16163   match(Set dst (FmaVF  dst (Binary src1 src2)));
16164   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16165   ins_cost(INSN_COST);
16166   ins_encode %{
16167     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16168             as_FloatRegister($src1$$reg),
16169             as_FloatRegister($src2$$reg));
16170   %}
16171   ins_pipe(vmuldiv_fp128);
16172 %}
16173 
16174 // dst + src1 * src2
16175 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16176   predicate(UseFMA && n->as_Vector()->length() == 2);
16177   match(Set dst (FmaVD  dst (Binary src1 src2)));
16178   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16179   ins_cost(INSN_COST);
16180   ins_encode %{
16181     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16182             as_FloatRegister($src1$$reg),
16183             as_FloatRegister($src2$$reg));
16184   %}
16185   ins_pipe(vmuldiv_fp128);
16186 %}
16187 
16188 // --------------------------------- MLS --------------------------------------
16189 
16190 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16191 %{
16192   predicate(n->as_Vector()->length() == 2 ||
16193             n->as_Vector()->length() == 4);
16194   match(Set dst (SubVS dst (MulVS src1 src2)));
16195   ins_cost(INSN_COST);
16196   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16197   ins_encode %{
16198     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16199             as_FloatRegister($src1$$reg),
16200             as_FloatRegister($src2$$reg));
16201   %}
16202   ins_pipe(vmla64);
16203 %}
16204 
16205 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16206 %{
16207   predicate(n->as_Vector()->length() == 8);
16208   match(Set dst (SubVS dst (MulVS src1 src2)));
16209   ins_cost(INSN_COST);
16210   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16211   ins_encode %{
16212     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16213             as_FloatRegister($src1$$reg),
16214             as_FloatRegister($src2$$reg));
16215   %}
16216   ins_pipe(vmla128);
16217 %}
16218 
16219 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16220 %{
16221   predicate(n->as_Vector()->length() == 2);
16222   match(Set dst (SubVI dst (MulVI src1 src2)));
16223   ins_cost(INSN_COST);
16224   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16225   ins_encode %{
16226     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16227             as_FloatRegister($src1$$reg),
16228             as_FloatRegister($src2$$reg));
16229   %}
16230   ins_pipe(vmla64);
16231 %}
16232 
16233 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16234 %{
16235   predicate(n->as_Vector()->length() == 4);
16236   match(Set dst (SubVI dst (MulVI src1 src2)));
16237   ins_cost(INSN_COST);
16238   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16239   ins_encode %{
16240     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16241             as_FloatRegister($src1$$reg),
16242             as_FloatRegister($src2$$reg));
16243   %}
16244   ins_pipe(vmla128);
16245 %}
16246 
16247 // dst - src1 * src2
16248 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16249   predicate(UseFMA && n->as_Vector()->length() == 2);
16250   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16251   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16252   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16253   ins_cost(INSN_COST);
16254   ins_encode %{
16255     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16256             as_FloatRegister($src1$$reg),
16257             as_FloatRegister($src2$$reg));
16258   %}
16259   ins_pipe(vmuldiv_fp64);
16260 %}
16261 
16262 // dst - src1 * src2
16263 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16264   predicate(UseFMA && n->as_Vector()->length() == 4);
16265   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16266   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16267   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16268   ins_cost(INSN_COST);
16269   ins_encode %{
16270     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16271             as_FloatRegister($src1$$reg),
16272             as_FloatRegister($src2$$reg));
16273   %}
16274   ins_pipe(vmuldiv_fp128);
16275 %}
16276 
16277 // dst - src1 * src2
16278 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16279   predicate(UseFMA && n->as_Vector()->length() == 2);
16280   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16281   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16282   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16283   ins_cost(INSN_COST);
16284   ins_encode %{
16285     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16286             as_FloatRegister($src1$$reg),
16287             as_FloatRegister($src2$$reg));
16288   %}
16289   ins_pipe(vmuldiv_fp128);
16290 %}
16291 
16292 // --------------------------------- DIV --------------------------------------
16293 
16294 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16295 %{
16296   predicate(n->as_Vector()->length() == 2);
16297   match(Set dst (DivVF src1 src2));
16298   ins_cost(INSN_COST);
16299   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16300   ins_encode %{
16301     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16302             as_FloatRegister($src1$$reg),
16303             as_FloatRegister($src2$$reg));
16304   %}
16305   ins_pipe(vmuldiv_fp64);
16306 %}
16307 
16308 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16309 %{
16310   predicate(n->as_Vector()->length() == 4);
16311   match(Set dst (DivVF src1 src2));
16312   ins_cost(INSN_COST);
16313   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16314   ins_encode %{
16315     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16316             as_FloatRegister($src1$$reg),
16317             as_FloatRegister($src2$$reg));
16318   %}
16319   ins_pipe(vmuldiv_fp128);
16320 %}
16321 
16322 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16323 %{
16324   predicate(n->as_Vector()->length() == 2);
16325   match(Set dst (DivVD src1 src2));
16326   ins_cost(INSN_COST);
16327   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16328   ins_encode %{
16329     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16330             as_FloatRegister($src1$$reg),
16331             as_FloatRegister($src2$$reg));
16332   %}
16333   ins_pipe(vmuldiv_fp128);
16334 %}
16335 
16336 // --------------------------------- SQRT -------------------------------------
16337 
16338 instruct vsqrt2D(vecX dst, vecX src)
16339 %{
16340   predicate(n->as_Vector()->length() == 2);
16341   match(Set dst (SqrtVD src));
16342   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16343   ins_encode %{
16344     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16345              as_FloatRegister($src$$reg));
16346   %}
16347   ins_pipe(vsqrt_fp128);
16348 %}
16349 
16350 // --------------------------------- ABS --------------------------------------
16351 
16352 instruct vabs2F(vecD dst, vecD src)
16353 %{
16354   predicate(n->as_Vector()->length() == 2);
16355   match(Set dst (AbsVF src));
16356   ins_cost(INSN_COST * 3);
16357   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16358   ins_encode %{
16359     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16360             as_FloatRegister($src$$reg));
16361   %}
16362   ins_pipe(vunop_fp64);
16363 %}
16364 
16365 instruct vabs4F(vecX dst, vecX src)
16366 %{
16367   predicate(n->as_Vector()->length() == 4);
16368   match(Set dst (AbsVF src));
16369   ins_cost(INSN_COST * 3);
16370   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16371   ins_encode %{
16372     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16373             as_FloatRegister($src$$reg));
16374   %}
16375   ins_pipe(vunop_fp128);
16376 %}
16377 
16378 instruct vabs2D(vecX dst, vecX src)
16379 %{
16380   predicate(n->as_Vector()->length() == 2);
16381   match(Set dst (AbsVD src));
16382   ins_cost(INSN_COST * 3);
16383   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16384   ins_encode %{
16385     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16386             as_FloatRegister($src$$reg));
16387   %}
16388   ins_pipe(vunop_fp128);
16389 %}
16390 
16391 // --------------------------------- NEG --------------------------------------
16392 
16393 instruct vneg2F(vecD dst, vecD src)
16394 %{
16395   predicate(n->as_Vector()->length() == 2);
16396   match(Set dst (NegVF src));
16397   ins_cost(INSN_COST * 3);
16398   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16399   ins_encode %{
16400     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16401             as_FloatRegister($src$$reg));
16402   %}
16403   ins_pipe(vunop_fp64);
16404 %}
16405 
16406 instruct vneg4F(vecX dst, vecX src)
16407 %{
16408   predicate(n->as_Vector()->length() == 4);
16409   match(Set dst (NegVF src));
16410   ins_cost(INSN_COST * 3);
16411   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16412   ins_encode %{
16413     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16414             as_FloatRegister($src$$reg));
16415   %}
16416   ins_pipe(vunop_fp128);
16417 %}
16418 
16419 instruct vneg2D(vecX dst, vecX src)
16420 %{
16421   predicate(n->as_Vector()->length() == 2);
16422   match(Set dst (NegVD src));
16423   ins_cost(INSN_COST * 3);
16424   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16425   ins_encode %{
16426     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16427             as_FloatRegister($src$$reg));
16428   %}
16429   ins_pipe(vunop_fp128);
16430 %}
16431 
16432 // --------------------------------- AND --------------------------------------
16433 
16434 instruct vand8B(vecD dst, vecD src1, vecD src2)
16435 %{
16436   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16437             n->as_Vector()->length_in_bytes() == 8);
16438   match(Set dst (AndV src1 src2));
16439   ins_cost(INSN_COST);
16440   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16441   ins_encode %{
16442     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16443             as_FloatRegister($src1$$reg),
16444             as_FloatRegister($src2$$reg));
16445   %}
16446   ins_pipe(vlogical64);
16447 %}
16448 
16449 instruct vand16B(vecX dst, vecX src1, vecX src2)
16450 %{
16451   predicate(n->as_Vector()->length_in_bytes() == 16);
16452   match(Set dst (AndV src1 src2));
16453   ins_cost(INSN_COST);
16454   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16455   ins_encode %{
16456     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16457             as_FloatRegister($src1$$reg),
16458             as_FloatRegister($src2$$reg));
16459   %}
16460   ins_pipe(vlogical128);
16461 %}
16462 
16463 // --------------------------------- OR ---------------------------------------
16464 
16465 instruct vor8B(vecD dst, vecD src1, vecD src2)
16466 %{
16467   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16468             n->as_Vector()->length_in_bytes() == 8);
16469   match(Set dst (OrV src1 src2));
16470   ins_cost(INSN_COST);
16471   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16472   ins_encode %{
16473     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16474             as_FloatRegister($src1$$reg),
16475             as_FloatRegister($src2$$reg));
16476   %}
16477   ins_pipe(vlogical64);
16478 %}
16479 
16480 instruct vor16B(vecX dst, vecX src1, vecX src2)
16481 %{
16482   predicate(n->as_Vector()->length_in_bytes() == 16);
16483   match(Set dst (OrV src1 src2));
16484   ins_cost(INSN_COST);
16485   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16486   ins_encode %{
16487     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16488             as_FloatRegister($src1$$reg),
16489             as_FloatRegister($src2$$reg));
16490   %}
16491   ins_pipe(vlogical128);
16492 %}
16493 
16494 // --------------------------------- XOR --------------------------------------
16495 
16496 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16497 %{
16498   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16499             n->as_Vector()->length_in_bytes() == 8);
16500   match(Set dst (XorV src1 src2));
16501   ins_cost(INSN_COST);
16502   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16503   ins_encode %{
16504     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16505             as_FloatRegister($src1$$reg),
16506             as_FloatRegister($src2$$reg));
16507   %}
16508   ins_pipe(vlogical64);
16509 %}
16510 
16511 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16512 %{
16513   predicate(n->as_Vector()->length_in_bytes() == 16);
16514   match(Set dst (XorV src1 src2));
16515   ins_cost(INSN_COST);
16516   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16517   ins_encode %{
16518     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16519             as_FloatRegister($src1$$reg),
16520             as_FloatRegister($src2$$reg));
16521   %}
16522   ins_pipe(vlogical128);
16523 %}
16524 
16525 // ------------------------------ Shift ---------------------------------------
16526 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
16527   predicate(n->as_Vector()->length_in_bytes() == 8);
16528   match(Set dst (LShiftCntV cnt));
16529   match(Set dst (RShiftCntV cnt));
16530   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
16531   ins_encode %{
16532     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
16533   %}
16534   ins_pipe(vdup_reg_reg64);
16535 %}
16536 
16537 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
16538   predicate(n->as_Vector()->length_in_bytes() == 16);
16539   match(Set dst (LShiftCntV cnt));
16540   match(Set dst (RShiftCntV cnt));
16541   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
16542   ins_encode %{
16543     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16544   %}
16545   ins_pipe(vdup_reg_reg128);
16546 %}
16547 
16548 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
16549   predicate(n->as_Vector()->length() == 4 ||
16550             n->as_Vector()->length() == 8);
16551   match(Set dst (LShiftVB src shift));
16552   ins_cost(INSN_COST);
16553   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16554   ins_encode %{
16555     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16556             as_FloatRegister($src$$reg),
16557             as_FloatRegister($shift$$reg));
16558   %}
16559   ins_pipe(vshift64);
16560 %}
16561 
16562 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16563   predicate(n->as_Vector()->length() == 16);
16564   match(Set dst (LShiftVB src shift));
16565   ins_cost(INSN_COST);
16566   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16567   ins_encode %{
16568     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16569             as_FloatRegister($src$$reg),
16570             as_FloatRegister($shift$$reg));
16571   %}
16572   ins_pipe(vshift128);
16573 %}
16574 
16575 // Right shifts with vector shift count on aarch64 SIMD are implemented
16576 // as left shift by negative shift count.
16577 // There are two cases for vector shift count.
16578 //
16579 // Case 1: The vector shift count is from replication.
16580 //        |            |
16581 //    LoadVector  RShiftCntV
16582 //        |       /
16583 //     RShiftVI
16584 // Note: In inner loop, multiple neg instructions are used, which can be
16585 // moved to outer loop and merge into one neg instruction.
16586 //
16587 // Case 2: The vector shift count is from loading.
16588 // This case isn't supported by middle-end now. But it's supported by
16589 // panama/vectorIntrinsics(JEP 338: Vector API).
16590 //        |            |
16591 //    LoadVector  LoadVector
16592 //        |       /
16593 //     RShiftVI
16594 //
16595 
16596 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
16597   predicate(n->as_Vector()->length() == 4 ||
16598             n->as_Vector()->length() == 8);
16599   match(Set dst (RShiftVB src shift));
16600   ins_cost(INSN_COST);
16601   effect(TEMP tmp);
16602   format %{ "negr  $tmp,$shift\t"
16603             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
16604   ins_encode %{
16605     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16606             as_FloatRegister($shift$$reg));
16607     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16608             as_FloatRegister($src$$reg),
16609             as_FloatRegister($tmp$$reg));
16610   %}
16611   ins_pipe(vshift64);
16612 %}
16613 
16614 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
16615   predicate(n->as_Vector()->length() == 16);
16616   match(Set dst (RShiftVB src shift));
16617   ins_cost(INSN_COST);
16618   effect(TEMP tmp);
16619   format %{ "negr  $tmp,$shift\t"
16620             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
16621   ins_encode %{
16622     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16623             as_FloatRegister($shift$$reg));
16624     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16625             as_FloatRegister($src$$reg),
16626             as_FloatRegister($tmp$$reg));
16627   %}
16628   ins_pipe(vshift128);
16629 %}
16630 
16631 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
16632   predicate(n->as_Vector()->length() == 4 ||
16633             n->as_Vector()->length() == 8);
16634   match(Set dst (URShiftVB src shift));
16635   ins_cost(INSN_COST);
16636   effect(TEMP tmp);
16637   format %{ "negr  $tmp,$shift\t"
16638             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
16639   ins_encode %{
16640     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16641             as_FloatRegister($shift$$reg));
16642     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16643             as_FloatRegister($src$$reg),
16644             as_FloatRegister($tmp$$reg));
16645   %}
16646   ins_pipe(vshift64);
16647 %}
16648 
16649 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
16650   predicate(n->as_Vector()->length() == 16);
16651   match(Set dst (URShiftVB src shift));
16652   ins_cost(INSN_COST);
16653   effect(TEMP tmp);
16654   format %{ "negr  $tmp,$shift\t"
16655             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
16656   ins_encode %{
16657     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16658             as_FloatRegister($shift$$reg));
16659     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16660             as_FloatRegister($src$$reg),
16661             as_FloatRegister($tmp$$reg));
16662   %}
16663   ins_pipe(vshift128);
16664 %}
16665 
16666 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16667   predicate(n->as_Vector()->length() == 4 ||
16668             n->as_Vector()->length() == 8);
16669   match(Set dst (LShiftVB src shift));
16670   ins_cost(INSN_COST);
16671   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16672   ins_encode %{
16673     int sh = (int)$shift$$constant;
16674     if (sh >= 8) {
16675       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16676              as_FloatRegister($src$$reg),
16677              as_FloatRegister($src$$reg));
16678     } else {
16679       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16680              as_FloatRegister($src$$reg), sh);
16681     }
16682   %}
16683   ins_pipe(vshift64_imm);
16684 %}
16685 
16686 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16687   predicate(n->as_Vector()->length() == 16);
16688   match(Set dst (LShiftVB src shift));
16689   ins_cost(INSN_COST);
16690   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16691   ins_encode %{
16692     int sh = (int)$shift$$constant;
16693     if (sh >= 8) {
16694       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16695              as_FloatRegister($src$$reg),
16696              as_FloatRegister($src$$reg));
16697     } else {
16698       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16699              as_FloatRegister($src$$reg), sh);
16700     }
16701   %}
16702   ins_pipe(vshift128_imm);
16703 %}
16704 
16705 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16706   predicate(n->as_Vector()->length() == 4 ||
16707             n->as_Vector()->length() == 8);
16708   match(Set dst (RShiftVB src shift));
16709   ins_cost(INSN_COST);
16710   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16711   ins_encode %{
16712     int sh = (int)$shift$$constant;
16713     if (sh >= 8) sh = 7;
16714     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16715            as_FloatRegister($src$$reg), sh);
16716   %}
16717   ins_pipe(vshift64_imm);
16718 %}
16719 
16720 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16721   predicate(n->as_Vector()->length() == 16);
16722   match(Set dst (RShiftVB src shift));
16723   ins_cost(INSN_COST);
16724   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16725   ins_encode %{
16726     int sh = (int)$shift$$constant;
16727     if (sh >= 8) sh = 7;
16728     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16729            as_FloatRegister($src$$reg), sh);
16730   %}
16731   ins_pipe(vshift128_imm);
16732 %}
16733 
16734 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16735   predicate(n->as_Vector()->length() == 4 ||
16736             n->as_Vector()->length() == 8);
16737   match(Set dst (URShiftVB src shift));
16738   ins_cost(INSN_COST);
16739   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16740   ins_encode %{
16741     int sh = (int)$shift$$constant;
16742     if (sh >= 8) {
16743       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16744              as_FloatRegister($src$$reg),
16745              as_FloatRegister($src$$reg));
16746     } else {
16747       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16748              as_FloatRegister($src$$reg), sh);
16749     }
16750   %}
16751   ins_pipe(vshift64_imm);
16752 %}
16753 
16754 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16755   predicate(n->as_Vector()->length() == 16);
16756   match(Set dst (URShiftVB src shift));
16757   ins_cost(INSN_COST);
16758   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16759   ins_encode %{
16760     int sh = (int)$shift$$constant;
16761     if (sh >= 8) {
16762       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16763              as_FloatRegister($src$$reg),
16764              as_FloatRegister($src$$reg));
16765     } else {
16766       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16767              as_FloatRegister($src$$reg), sh);
16768     }
16769   %}
16770   ins_pipe(vshift128_imm);
16771 %}
16772 
16773 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
16774   predicate(n->as_Vector()->length() == 2 ||
16775             n->as_Vector()->length() == 4);
16776   match(Set dst (LShiftVS src shift));
16777   ins_cost(INSN_COST);
16778   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16779   ins_encode %{
16780     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16781             as_FloatRegister($src$$reg),
16782             as_FloatRegister($shift$$reg));
16783   %}
16784   ins_pipe(vshift64);
16785 %}
16786 
16787 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16788   predicate(n->as_Vector()->length() == 8);
16789   match(Set dst (LShiftVS src shift));
16790   ins_cost(INSN_COST);
16791   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16792   ins_encode %{
16793     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16794             as_FloatRegister($src$$reg),
16795             as_FloatRegister($shift$$reg));
16796   %}
16797   ins_pipe(vshift128);
16798 %}
16799 
16800 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
16801   predicate(n->as_Vector()->length() == 2 ||
16802             n->as_Vector()->length() == 4);
16803   match(Set dst (RShiftVS src shift));
16804   ins_cost(INSN_COST);
16805   effect(TEMP tmp);
16806   format %{ "negr  $tmp,$shift\t"
16807             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
16808   ins_encode %{
16809     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16810             as_FloatRegister($shift$$reg));
16811     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16812             as_FloatRegister($src$$reg),
16813             as_FloatRegister($tmp$$reg));
16814   %}
16815   ins_pipe(vshift64);
16816 %}
16817 
16818 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
16819   predicate(n->as_Vector()->length() == 8);
16820   match(Set dst (RShiftVS src shift));
16821   ins_cost(INSN_COST);
16822   effect(TEMP tmp);
16823   format %{ "negr  $tmp,$shift\t"
16824             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
16825   ins_encode %{
16826     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16827             as_FloatRegister($shift$$reg));
16828     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16829             as_FloatRegister($src$$reg),
16830             as_FloatRegister($tmp$$reg));
16831   %}
16832   ins_pipe(vshift128);
16833 %}
16834 
16835 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
16836   predicate(n->as_Vector()->length() == 2 ||
16837             n->as_Vector()->length() == 4);
16838   match(Set dst (URShiftVS src shift));
16839   ins_cost(INSN_COST);
16840   effect(TEMP tmp);
16841   format %{ "negr  $tmp,$shift\t"
16842             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
16843   ins_encode %{
16844     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16845             as_FloatRegister($shift$$reg));
16846     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16847             as_FloatRegister($src$$reg),
16848             as_FloatRegister($tmp$$reg));
16849   %}
16850   ins_pipe(vshift64);
16851 %}
16852 
16853 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
16854   predicate(n->as_Vector()->length() == 8);
16855   match(Set dst (URShiftVS src shift));
16856   ins_cost(INSN_COST);
16857   effect(TEMP tmp);
16858   format %{ "negr  $tmp,$shift\t"
16859             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
16860   ins_encode %{
16861     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16862             as_FloatRegister($shift$$reg));
16863     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16864             as_FloatRegister($src$$reg),
16865             as_FloatRegister($tmp$$reg));
16866   %}
16867   ins_pipe(vshift128);
16868 %}
16869 
16870 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16871   predicate(n->as_Vector()->length() == 2 ||
16872             n->as_Vector()->length() == 4);
16873   match(Set dst (LShiftVS src shift));
16874   ins_cost(INSN_COST);
16875   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16876   ins_encode %{
16877     int sh = (int)$shift$$constant;
16878     if (sh >= 16) {
16879       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16880              as_FloatRegister($src$$reg),
16881              as_FloatRegister($src$$reg));
16882     } else {
16883       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16884              as_FloatRegister($src$$reg), sh);
16885     }
16886   %}
16887   ins_pipe(vshift64_imm);
16888 %}
16889 
16890 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16891   predicate(n->as_Vector()->length() == 8);
16892   match(Set dst (LShiftVS src shift));
16893   ins_cost(INSN_COST);
16894   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16895   ins_encode %{
16896     int sh = (int)$shift$$constant;
16897     if (sh >= 16) {
16898       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16899              as_FloatRegister($src$$reg),
16900              as_FloatRegister($src$$reg));
16901     } else {
16902       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16903              as_FloatRegister($src$$reg), sh);
16904     }
16905   %}
16906   ins_pipe(vshift128_imm);
16907 %}
16908 
16909 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16910   predicate(n->as_Vector()->length() == 2 ||
16911             n->as_Vector()->length() == 4);
16912   match(Set dst (RShiftVS src shift));
16913   ins_cost(INSN_COST);
16914   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16915   ins_encode %{
16916     int sh = (int)$shift$$constant;
16917     if (sh >= 16) sh = 15;
16918     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16919            as_FloatRegister($src$$reg), sh);
16920   %}
16921   ins_pipe(vshift64_imm);
16922 %}
16923 
16924 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16925   predicate(n->as_Vector()->length() == 8);
16926   match(Set dst (RShiftVS src shift));
16927   ins_cost(INSN_COST);
16928   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16929   ins_encode %{
16930     int sh = (int)$shift$$constant;
16931     if (sh >= 16) sh = 15;
16932     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16933            as_FloatRegister($src$$reg), sh);
16934   %}
16935   ins_pipe(vshift128_imm);
16936 %}
16937 
16938 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16939   predicate(n->as_Vector()->length() == 2 ||
16940             n->as_Vector()->length() == 4);
16941   match(Set dst (URShiftVS src shift));
16942   ins_cost(INSN_COST);
16943   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16944   ins_encode %{
16945     int sh = (int)$shift$$constant;
16946     if (sh >= 16) {
16947       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16948              as_FloatRegister($src$$reg),
16949              as_FloatRegister($src$$reg));
16950     } else {
16951       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16952              as_FloatRegister($src$$reg), sh);
16953     }
16954   %}
16955   ins_pipe(vshift64_imm);
16956 %}
16957 
16958 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16959   predicate(n->as_Vector()->length() == 8);
16960   match(Set dst (URShiftVS src shift));
16961   ins_cost(INSN_COST);
16962   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16963   ins_encode %{
16964     int sh = (int)$shift$$constant;
16965     if (sh >= 16) {
16966       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16967              as_FloatRegister($src$$reg),
16968              as_FloatRegister($src$$reg));
16969     } else {
16970       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16971              as_FloatRegister($src$$reg), sh);
16972     }
16973   %}
16974   ins_pipe(vshift128_imm);
16975 %}
16976 
16977 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
16978   predicate(n->as_Vector()->length() == 2);
16979   match(Set dst (LShiftVI src shift));
16980   ins_cost(INSN_COST);
16981   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16982   ins_encode %{
16983     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16984             as_FloatRegister($src$$reg),
16985             as_FloatRegister($shift$$reg));
16986   %}
16987   ins_pipe(vshift64);
16988 %}
16989 
16990 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16991   predicate(n->as_Vector()->length() == 4);
16992   match(Set dst (LShiftVI src shift));
16993   ins_cost(INSN_COST);
16994   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16995   ins_encode %{
16996     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16997             as_FloatRegister($src$$reg),
16998             as_FloatRegister($shift$$reg));
16999   %}
17000   ins_pipe(vshift128);
17001 %}
17002 
17003 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17004   predicate(n->as_Vector()->length() == 2);
17005   match(Set dst (RShiftVI src shift));
17006   ins_cost(INSN_COST);
17007   effect(TEMP tmp);
17008   format %{ "negr  $tmp,$shift\t"
17009             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
17010   ins_encode %{
17011     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17012             as_FloatRegister($shift$$reg));
17013     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17014             as_FloatRegister($src$$reg),
17015             as_FloatRegister($tmp$$reg));
17016   %}
17017   ins_pipe(vshift64);
17018 %}
17019 
17020 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17021   predicate(n->as_Vector()->length() == 4);
17022   match(Set dst (RShiftVI src shift));
17023   ins_cost(INSN_COST);
17024   effect(TEMP tmp);
17025   format %{ "negr  $tmp,$shift\t"
17026             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
17027   ins_encode %{
17028     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17029             as_FloatRegister($shift$$reg));
17030     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17031             as_FloatRegister($src$$reg),
17032             as_FloatRegister($tmp$$reg));
17033   %}
17034   ins_pipe(vshift128);
17035 %}
17036 
17037 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17038   predicate(n->as_Vector()->length() == 2);
17039   match(Set dst (URShiftVI src shift));
17040   ins_cost(INSN_COST);
17041   effect(TEMP tmp);
17042   format %{ "negr  $tmp,$shift\t"
17043             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
17044   ins_encode %{
17045     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17046             as_FloatRegister($shift$$reg));
17047     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17048             as_FloatRegister($src$$reg),
17049             as_FloatRegister($tmp$$reg));
17050   %}
17051   ins_pipe(vshift64);
17052 %}
17053 
17054 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17055   predicate(n->as_Vector()->length() == 4);
17056   match(Set dst (URShiftVI src shift));
17057   ins_cost(INSN_COST);
17058   effect(TEMP tmp);
17059   format %{ "negr  $tmp,$shift\t"
17060             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
17061   ins_encode %{
17062     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17063             as_FloatRegister($shift$$reg));
17064     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17065             as_FloatRegister($src$$reg),
17066             as_FloatRegister($tmp$$reg));
17067   %}
17068   ins_pipe(vshift128);
17069 %}
17070 
17071 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17072   predicate(n->as_Vector()->length() == 2);
17073   match(Set dst (LShiftVI src shift));
17074   ins_cost(INSN_COST);
17075   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17076   ins_encode %{
17077     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17078            as_FloatRegister($src$$reg),
17079            (int)$shift$$constant);
17080   %}
17081   ins_pipe(vshift64_imm);
17082 %}
17083 
17084 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17085   predicate(n->as_Vector()->length() == 4);
17086   match(Set dst (LShiftVI src shift));
17087   ins_cost(INSN_COST);
17088   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17089   ins_encode %{
17090     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17091            as_FloatRegister($src$$reg),
17092            (int)$shift$$constant);
17093   %}
17094   ins_pipe(vshift128_imm);
17095 %}
17096 
17097 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17098   predicate(n->as_Vector()->length() == 2);
17099   match(Set dst (RShiftVI src shift));
17100   ins_cost(INSN_COST);
17101   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17102   ins_encode %{
17103     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17104             as_FloatRegister($src$$reg),
17105             (int)$shift$$constant);
17106   %}
17107   ins_pipe(vshift64_imm);
17108 %}
17109 
17110 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17111   predicate(n->as_Vector()->length() == 4);
17112   match(Set dst (RShiftVI src shift));
17113   ins_cost(INSN_COST);
17114   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17115   ins_encode %{
17116     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17117             as_FloatRegister($src$$reg),
17118             (int)$shift$$constant);
17119   %}
17120   ins_pipe(vshift128_imm);
17121 %}
17122 
17123 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17124   predicate(n->as_Vector()->length() == 2);
17125   match(Set dst (URShiftVI src shift));
17126   ins_cost(INSN_COST);
17127   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17128   ins_encode %{
17129     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17130             as_FloatRegister($src$$reg),
17131             (int)$shift$$constant);
17132   %}
17133   ins_pipe(vshift64_imm);
17134 %}
17135 
17136 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17137   predicate(n->as_Vector()->length() == 4);
17138   match(Set dst (URShiftVI src shift));
17139   ins_cost(INSN_COST);
17140   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17141   ins_encode %{
17142     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17143             as_FloatRegister($src$$reg),
17144             (int)$shift$$constant);
17145   %}
17146   ins_pipe(vshift128_imm);
17147 %}
17148 
17149 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17150   predicate(n->as_Vector()->length() == 2);
17151   match(Set dst (LShiftVL src shift));
17152   ins_cost(INSN_COST);
17153   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17154   ins_encode %{
17155     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17156             as_FloatRegister($src$$reg),
17157             as_FloatRegister($shift$$reg));
17158   %}
17159   ins_pipe(vshift128);
17160 %}
17161 
17162 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17163   predicate(n->as_Vector()->length() == 2);
17164   match(Set dst (RShiftVL src shift));
17165   ins_cost(INSN_COST);
17166   effect(TEMP tmp);
17167   format %{ "negr  $tmp,$shift\t"
17168             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
17169   ins_encode %{
17170     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17171             as_FloatRegister($shift$$reg));
17172     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17173             as_FloatRegister($src$$reg),
17174             as_FloatRegister($tmp$$reg));
17175   %}
17176   ins_pipe(vshift128);
17177 %}
17178 
17179 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17180   predicate(n->as_Vector()->length() == 2);
17181   match(Set dst (URShiftVL src shift));
17182   ins_cost(INSN_COST);
17183   effect(TEMP tmp);
17184   format %{ "negr  $tmp,$shift\t"
17185             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
17186   ins_encode %{
17187     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17188             as_FloatRegister($shift$$reg));
17189     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17190             as_FloatRegister($src$$reg),
17191             as_FloatRegister($tmp$$reg));
17192   %}
17193   ins_pipe(vshift128);
17194 %}
17195 
17196 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17197   predicate(n->as_Vector()->length() == 2);
17198   match(Set dst (LShiftVL src shift));
17199   ins_cost(INSN_COST);
17200   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17201   ins_encode %{
17202     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17203            as_FloatRegister($src$$reg),
17204            (int)$shift$$constant);
17205   %}
17206   ins_pipe(vshift128_imm);
17207 %}
17208 
17209 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17210   predicate(n->as_Vector()->length() == 2);
17211   match(Set dst (RShiftVL src shift));
17212   ins_cost(INSN_COST);
17213   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17214   ins_encode %{
17215     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17216             as_FloatRegister($src$$reg),
17217             (int)$shift$$constant);
17218   %}
17219   ins_pipe(vshift128_imm);
17220 %}
17221 
17222 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17223   predicate(n->as_Vector()->length() == 2);
17224   match(Set dst (URShiftVL src shift));
17225   ins_cost(INSN_COST);
17226   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17227   ins_encode %{
17228     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17229             as_FloatRegister($src$$reg),
17230             (int)$shift$$constant);
17231   %}
17232   ins_pipe(vshift128_imm);
17233 %}
17234 
17235 //----------PEEPHOLE RULES-----------------------------------------------------
17236 // These must follow all instruction definitions as they use the names
17237 // defined in the instructions definitions.
17238 //
17239 // peepmatch ( root_instr_name [preceding_instruction]* );
17240 //
17241 // peepconstraint %{
17242 // (instruction_number.operand_name relational_op instruction_number.operand_name
17243 //  [, ...] );
17244 // // instruction numbers are zero-based using left to right order in peepmatch
17245 //
17246 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17247 // // provide an instruction_number.operand_name for each operand that appears
17248 // // in the replacement instruction's match rule
17249 //
17250 // ---------VM FLAGS---------------------------------------------------------
17251 //
17252 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17253 //
17254 // Each peephole rule is given an identifying number starting with zero and
17255 // increasing by one in the order seen by the parser.  An individual peephole
17256 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17257 // on the command-line.
17258 //
17259 // ---------CURRENT LIMITATIONS----------------------------------------------
17260 //
17261 // Only match adjacent instructions in same basic block
17262 // Only equality constraints
17263 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17264 // Only one replacement instruction
17265 //
17266 // ---------EXAMPLE----------------------------------------------------------
17267 //
17268 // // pertinent parts of existing instructions in architecture description
17269 // instruct movI(iRegINoSp dst, iRegI src)
17270 // %{
17271 //   match(Set dst (CopyI src));
17272 // %}
17273 //
17274 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17275 // %{
17276 //   match(Set dst (AddI dst src));
17277 //   effect(KILL cr);
17278 // %}
17279 //
17280 // // Change (inc mov) to lea
17281 // peephole %{
17282 //   // increment preceeded by register-register move
17283 //   peepmatch ( incI_iReg movI );
17284 //   // require that the destination register of the increment
17285 //   // match the destination register of the move
17286 //   peepconstraint ( 0.dst == 1.dst );
17287 //   // construct a replacement instruction that sets
17288 //   // the destination to ( move's source register + one )
17289 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17290 // %}
17291 //
17292 
17293 // Implementation no longer uses movX instructions since
17294 // machine-independent system no longer uses CopyX nodes.
17295 //
17296 // peephole
17297 // %{
17298 //   peepmatch (incI_iReg movI);
17299 //   peepconstraint (0.dst == 1.dst);
17300 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17301 // %}
17302 
17303 // peephole
17304 // %{
17305 //   peepmatch (decI_iReg movI);
17306 //   peepconstraint (0.dst == 1.dst);
17307 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17308 // %}
17309 
17310 // peephole
17311 // %{
17312 //   peepmatch (addI_iReg_imm movI);
17313 //   peepconstraint (0.dst == 1.dst);
17314 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17315 // %}
17316 
17317 // peephole
17318 // %{
17319 //   peepmatch (incL_iReg movL);
17320 //   peepconstraint (0.dst == 1.dst);
17321 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17322 // %}
17323 
17324 // peephole
17325 // %{
17326 //   peepmatch (decL_iReg movL);
17327 //   peepconstraint (0.dst == 1.dst);
17328 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17329 // %}
17330 
17331 // peephole
17332 // %{
17333 //   peepmatch (addL_iReg_imm movL);
17334 //   peepconstraint (0.dst == 1.dst);
17335 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17336 // %}
17337 
17338 // peephole
17339 // %{
17340 //   peepmatch (addP_iReg_imm movP);
17341 //   peepconstraint (0.dst == 1.dst);
17342 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17343 // %}
17344 
17345 // // Change load of spilled value to only a spill
17346 // instruct storeI(memory mem, iRegI src)
17347 // %{
17348 //   match(Set mem (StoreI mem src));
17349 // %}
17350 //
17351 // instruct loadI(iRegINoSp dst, memory mem)
17352 // %{
17353 //   match(Set dst (LoadI mem));
17354 // %}
17355 //
17356 
17357 //----------SMARTSPILL RULES---------------------------------------------------
17358 // These must follow all instruction definitions as they use the names
17359 // defined in the instructions definitions.
17360 
17361 // Local Variables:
17362 // mode: c++
17363 // End: