1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 
1004 class CallStubImpl {
1005 
1006   //--------------------------------------------------------------
1007   //---<  Used for optimization in Compile::shorten_branches  >---
1008   //--------------------------------------------------------------
1009 
1010  public:
1011   // Size of call trampoline stub.
1012   static uint size_call_trampoline() {
1013     return 0; // no call trampolines on this platform
1014   }
1015 
1016   // number of relocations needed by a call trampoline stub
1017   static uint reloc_call_trampoline() {
1018     return 0; // no call trampolines on this platform
1019   }
1020 };
1021 
1022 class HandlerImpl {
1023 
1024  public:
1025 
1026   static int emit_exception_handler(CodeBuffer &cbuf);
1027   static int emit_deopt_handler(CodeBuffer& cbuf);
1028 
1029   static uint size_exception_handler() {
1030     return MacroAssembler::far_branch_size();
1031   }
1032 
1033   static uint size_deopt_handler() {
1034     // count one adr and one far branch instruction
1035     return 4 * NativeInstruction::instruction_size;
1036   }
1037 };
1038 
1039  bool is_CAS(int opcode, bool maybe_volatile);
1040 
1041   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1042 
1043   bool unnecessary_acquire(const Node *barrier);
1044   bool needs_acquiring_load(const Node *load);
1045 
1046   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1047 
1048   bool unnecessary_release(const Node *barrier);
1049   bool unnecessary_volatile(const Node *barrier);
1050   bool needs_releasing_store(const Node *store);
1051 
1052   // predicate controlling translation of CompareAndSwapX
1053   bool needs_acquiring_load_exclusive(const Node *load);
1054 
1055   // predicate controlling translation of StoreCM
1056   bool unnecessary_storestore(const Node *storecm);
1057 
1058   // predicate controlling addressing modes
1059   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1060 %}
1061 
1062 source %{
1063 
1064   // Optimizaton of volatile gets and puts
1065   // -------------------------------------
1066   //
1067   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1068   // use to implement volatile reads and writes. For a volatile read
1069   // we simply need
1070   //
1071   //   ldar<x>
1072   //
1073   // and for a volatile write we need
1074   //
1075   //   stlr<x>
1076   //
1077   // Alternatively, we can implement them by pairing a normal
1078   // load/store with a memory barrier. For a volatile read we need
1079   //
1080   //   ldr<x>
1081   //   dmb ishld
1082   //
1083   // for a volatile write
1084   //
1085   //   dmb ish
1086   //   str<x>
1087   //   dmb ish
1088   //
1089   // We can also use ldaxr and stlxr to implement compare and swap CAS
1090   // sequences. These are normally translated to an instruction
1091   // sequence like the following
1092   //
1093   //   dmb      ish
1094   // retry:
1095   //   ldxr<x>   rval raddr
1096   //   cmp       rval rold
1097   //   b.ne done
1098   //   stlxr<x>  rval, rnew, rold
1099   //   cbnz      rval retry
1100   // done:
1101   //   cset      r0, eq
1102   //   dmb ishld
1103   //
1104   // Note that the exclusive store is already using an stlxr
1105   // instruction. That is required to ensure visibility to other
1106   // threads of the exclusive write (assuming it succeeds) before that
1107   // of any subsequent writes.
1108   //
1109   // The following instruction sequence is an improvement on the above
1110   //
1111   // retry:
1112   //   ldaxr<x>  rval raddr
1113   //   cmp       rval rold
1114   //   b.ne done
1115   //   stlxr<x>  rval, rnew, rold
1116   //   cbnz      rval retry
1117   // done:
1118   //   cset      r0, eq
1119   //
1120   // We don't need the leading dmb ish since the stlxr guarantees
1121   // visibility of prior writes in the case that the swap is
1122   // successful. Crucially we don't have to worry about the case where
1123   // the swap is not successful since no valid program should be
1124   // relying on visibility of prior changes by the attempting thread
1125   // in the case where the CAS fails.
1126   //
1127   // Similarly, we don't need the trailing dmb ishld if we substitute
1128   // an ldaxr instruction since that will provide all the guarantees we
1129   // require regarding observation of changes made by other threads
1130   // before any change to the CAS address observed by the load.
1131   //
1132   // In order to generate the desired instruction sequence we need to
1133   // be able to identify specific 'signature' ideal graph node
1134   // sequences which i) occur as a translation of a volatile reads or
1135   // writes or CAS operations and ii) do not occur through any other
1136   // translation or graph transformation. We can then provide
1137   // alternative aldc matching rules which translate these node
1138   // sequences to the desired machine code sequences. Selection of the
1139   // alternative rules can be implemented by predicates which identify
1140   // the relevant node sequences.
1141   //
1142   // The ideal graph generator translates a volatile read to the node
1143   // sequence
1144   //
1145   //   LoadX[mo_acquire]
1146   //   MemBarAcquire
1147   //
1148   // As a special case when using the compressed oops optimization we
1149   // may also see this variant
1150   //
1151   //   LoadN[mo_acquire]
1152   //   DecodeN
1153   //   MemBarAcquire
1154   //
1155   // A volatile write is translated to the node sequence
1156   //
1157   //   MemBarRelease
1158   //   StoreX[mo_release] {CardMark}-optional
1159   //   MemBarVolatile
1160   //
1161   // n.b. the above node patterns are generated with a strict
1162   // 'signature' configuration of input and output dependencies (see
1163   // the predicates below for exact details). The card mark may be as
1164   // simple as a few extra nodes or, in a few GC configurations, may
1165   // include more complex control flow between the leading and
1166   // trailing memory barriers. However, whatever the card mark
1167   // configuration these signatures are unique to translated volatile
1168   // reads/stores -- they will not appear as a result of any other
1169   // bytecode translation or inlining nor as a consequence of
1170   // optimizing transforms.
1171   //
1172   // We also want to catch inlined unsafe volatile gets and puts and
1173   // be able to implement them using either ldar<x>/stlr<x> or some
1174   // combination of ldr<x>/stlr<x> and dmb instructions.
1175   //
1176   // Inlined unsafe volatiles puts manifest as a minor variant of the
1177   // normal volatile put node sequence containing an extra cpuorder
1178   // membar
1179   //
1180   //   MemBarRelease
1181   //   MemBarCPUOrder
1182   //   StoreX[mo_release] {CardMark}-optional
1183   //   MemBarCPUOrder
1184   //   MemBarVolatile
1185   //
1186   // n.b. as an aside, a cpuorder membar is not itself subject to
1187   // matching and translation by adlc rules.  However, the rule
1188   // predicates need to detect its presence in order to correctly
1189   // select the desired adlc rules.
1190   //
1191   // Inlined unsafe volatile gets manifest as a slightly different
1192   // node sequence to a normal volatile get because of the
1193   // introduction of some CPUOrder memory barriers to bracket the
1194   // Load. However, but the same basic skeleton of a LoadX feeding a
1195   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1196   // present
1197   //
1198   //   MemBarCPUOrder
1199   //        ||       \\
1200   //   MemBarCPUOrder LoadX[mo_acquire]
1201   //        ||            |
1202   //        ||       {DecodeN} optional
1203   //        ||       /
1204   //     MemBarAcquire
1205   //
1206   // In this case the acquire membar does not directly depend on the
1207   // load. However, we can be sure that the load is generated from an
1208   // inlined unsafe volatile get if we see it dependent on this unique
1209   // sequence of membar nodes. Similarly, given an acquire membar we
1210   // can know that it was added because of an inlined unsafe volatile
1211   // get if it is fed and feeds a cpuorder membar and if its feed
1212   // membar also feeds an acquiring load.
1213   //
1214   // Finally an inlined (Unsafe) CAS operation is translated to the
1215   // following ideal graph
1216   //
1217   //   MemBarRelease
1218   //   MemBarCPUOrder
1219   //   CompareAndSwapX {CardMark}-optional
1220   //   MemBarCPUOrder
1221   //   MemBarAcquire
1222   //
1223   // So, where we can identify these volatile read and write
1224   // signatures we can choose to plant either of the above two code
1225   // sequences. For a volatile read we can simply plant a normal
1226   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1227   // also choose to inhibit translation of the MemBarAcquire and
1228   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1229   //
1230   // When we recognise a volatile store signature we can choose to
1231   // plant at a dmb ish as a translation for the MemBarRelease, a
1232   // normal str<x> and then a dmb ish for the MemBarVolatile.
1233   // Alternatively, we can inhibit translation of the MemBarRelease
1234   // and MemBarVolatile and instead plant a simple stlr<x>
1235   // instruction.
1236   //
1237   // when we recognise a CAS signature we can choose to plant a dmb
1238   // ish as a translation for the MemBarRelease, the conventional
1239   // macro-instruction sequence for the CompareAndSwap node (which
1240   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1241   // Alternatively, we can elide generation of the dmb instructions
1242   // and plant the alternative CompareAndSwap macro-instruction
1243   // sequence (which uses ldaxr<x>).
1244   //
1245   // Of course, the above only applies when we see these signature
1246   // configurations. We still want to plant dmb instructions in any
1247   // other cases where we may see a MemBarAcquire, MemBarRelease or
1248   // MemBarVolatile. For example, at the end of a constructor which
1249   // writes final/volatile fields we will see a MemBarRelease
1250   // instruction and this needs a 'dmb ish' lest we risk the
1251   // constructed object being visible without making the
1252   // final/volatile field writes visible.
1253   //
1254   // n.b. the translation rules below which rely on detection of the
1255   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1256   // If we see anything other than the signature configurations we
1257   // always just translate the loads and stores to ldr<x> and str<x>
1258   // and translate acquire, release and volatile membars to the
1259   // relevant dmb instructions.
1260   //
1261 
1262   // is_CAS(int opcode, bool maybe_volatile)
1263   //
1264   // return true if opcode is one of the possible CompareAndSwapX
1265   // values otherwise false.
1266 
1267   bool is_CAS(int opcode, bool maybe_volatile)
1268   {
1269     switch(opcode) {
1270       // We handle these
1271     case Op_CompareAndSwapI:
1272     case Op_CompareAndSwapL:
1273     case Op_CompareAndSwapP:
1274     case Op_CompareAndSwapN:


1275     case Op_CompareAndSwapB:
1276     case Op_CompareAndSwapS:
1277     case Op_GetAndSetI:
1278     case Op_GetAndSetL:
1279     case Op_GetAndSetP:
1280     case Op_GetAndSetN:
1281     case Op_GetAndAddI:
1282     case Op_GetAndAddL:
1283       return true;
1284     case Op_CompareAndExchangeI:
1285     case Op_CompareAndExchangeN:
1286     case Op_CompareAndExchangeB:
1287     case Op_CompareAndExchangeS:
1288     case Op_CompareAndExchangeL:
1289     case Op_CompareAndExchangeP:
1290     case Op_WeakCompareAndSwapB:
1291     case Op_WeakCompareAndSwapS:
1292     case Op_WeakCompareAndSwapI:
1293     case Op_WeakCompareAndSwapL:
1294     case Op_WeakCompareAndSwapP:
1295     case Op_WeakCompareAndSwapN:




1296       return maybe_volatile;
1297     default:
1298       return false;
1299     }
1300   }
1301 
1302   // helper to determine the maximum number of Phi nodes we may need to
1303   // traverse when searching from a card mark membar for the merge mem
1304   // feeding a trailing membar or vice versa
1305 
1306 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1307 
1308 bool unnecessary_acquire(const Node *barrier)
1309 {
1310   assert(barrier->is_MemBar(), "expecting a membar");
1311 
1312   if (UseBarriersForVolatile) {
1313     // we need to plant a dmb
1314     return false;
1315   }
1316 
1317   MemBarNode* mb = barrier->as_MemBar();
1318 
1319   if (mb->trailing_load()) {
1320     return true;
1321   }
1322 
1323   if (mb->trailing_load_store()) {
1324     Node* load_store = mb->in(MemBarNode::Precedent);
1325     assert(load_store->is_LoadStore(), "unexpected graph shape");
1326     return is_CAS(load_store->Opcode(), true);
1327   }
1328 
1329   return false;
1330 }
1331 
1332 bool needs_acquiring_load(const Node *n)
1333 {
1334   assert(n->is_Load(), "expecting a load");
1335   if (UseBarriersForVolatile) {
1336     // we use a normal load and a dmb
1337     return false;
1338   }
1339 
1340   LoadNode *ld = n->as_Load();
1341 
1342   return ld->is_acquire();
1343 }
1344 
1345 bool unnecessary_release(const Node *n)
1346 {
1347   assert((n->is_MemBar() &&
1348           n->Opcode() == Op_MemBarRelease),
1349          "expecting a release membar");
1350 
1351   if (UseBarriersForVolatile) {
1352     // we need to plant a dmb
1353     return false;
1354   }
1355 
1356   MemBarNode *barrier = n->as_MemBar();
1357   if (!barrier->leading()) {
1358     return false;
1359   } else {
1360     Node* trailing = barrier->trailing_membar();
1361     MemBarNode* trailing_mb = trailing->as_MemBar();
1362     assert(trailing_mb->trailing(), "Not a trailing membar?");
1363     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1364 
1365     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1366     if (mem->is_Store()) {
1367       assert(mem->as_Store()->is_release(), "");
1368       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1369       return true;
1370     } else {
1371       assert(mem->is_LoadStore(), "");
1372       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1373       return is_CAS(mem->Opcode(), true);
1374     }
1375   }
1376   return false;
1377 }
1378 
1379 bool unnecessary_volatile(const Node *n)
1380 {
1381   // assert n->is_MemBar();
1382   if (UseBarriersForVolatile) {
1383     // we need to plant a dmb
1384     return false;
1385   }
1386 
1387   MemBarNode *mbvol = n->as_MemBar();
1388 
1389   bool release = mbvol->trailing_store();
1390   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1391 #ifdef ASSERT
1392   if (release) {
1393     Node* leading = mbvol->leading_membar();
1394     assert(leading->Opcode() == Op_MemBarRelease, "");
1395     assert(leading->as_MemBar()->leading_store(), "");
1396     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1397   }
1398 #endif
1399 
1400   return release;
1401 }
1402 
1403 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1404 
1405 bool needs_releasing_store(const Node *n)
1406 {
1407   // assert n->is_Store();
1408   if (UseBarriersForVolatile) {
1409     // we use a normal store and dmb combination
1410     return false;
1411   }
1412 
1413   StoreNode *st = n->as_Store();
1414 
1415   return st->trailing_membar() != NULL;
1416 }
1417 
1418 // predicate controlling translation of CAS
1419 //
1420 // returns true if CAS needs to use an acquiring load otherwise false
1421 
1422 bool needs_acquiring_load_exclusive(const Node *n)
1423 {
1424   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1425   if (UseBarriersForVolatile) {
1426     return false;
1427   }
1428 
1429   LoadStoreNode* ldst = n->as_LoadStore();
1430   if (is_CAS(n->Opcode(), false)) {
1431     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1432   } else {
1433     return ldst->trailing_membar() != NULL;
1434   }
1435 
1436   // so we can just return true here
1437   return true;
1438 }
1439 
1440 // predicate controlling translation of StoreCM
1441 //
1442 // returns true if a StoreStore must precede the card write otherwise
1443 // false
1444 
1445 bool unnecessary_storestore(const Node *storecm)
1446 {
1447   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1448 
1449   // we need to generate a dmb ishst between an object put and the
1450   // associated card mark when we are using CMS without conditional
1451   // card marking
1452 
1453   if (UseConcMarkSweepGC && !UseCondCardMark) {
1454     return false;
1455   }
1456 
1457   // a storestore is unnecesary in all other cases
1458 
1459   return true;
1460 }
1461 
1462 
1463 #define __ _masm.
1464 
1465 // advance declarations for helper functions to convert register
1466 // indices to register objects
1467 
1468 // the ad file has to provide implementations of certain methods
1469 // expected by the generic code
1470 //
1471 // REQUIRED FUNCTIONALITY
1472 
1473 //=============================================================================
1474 
1475 // !!!!! Special hack to get all types of calls to specify the byte offset
1476 //       from the start of the call to the point where the return address
1477 //       will point.
1478 
1479 int MachCallStaticJavaNode::ret_addr_offset()
1480 {
1481   // call should be a simple bl
1482   int off = 4;
1483   return off;
1484 }
1485 
1486 int MachCallDynamicJavaNode::ret_addr_offset()
1487 {
1488   return 16; // movz, movk, movk, bl
1489 }
1490 
1491 int MachCallRuntimeNode::ret_addr_offset() {
1492   // for generated stubs the call will be
1493   //   far_call(addr)
1494   // for real runtime callouts it will be six instructions
1495   // see aarch64_enc_java_to_runtime
1496   //   adr(rscratch2, retaddr)
1497   //   lea(rscratch1, RuntimeAddress(addr)
1498   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1499   //   blrt rscratch1
1500   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1501   if (cb) {
1502     return MacroAssembler::far_branch_size();
1503   } else {
1504     return 6 * NativeInstruction::instruction_size;
1505   }
1506 }
1507 
1508 // Indicate if the safepoint node needs the polling page as an input
1509 
1510 // the shared code plants the oop data at the start of the generated
1511 // code for the safepoint node and that needs ot be at the load
1512 // instruction itself. so we cannot plant a mov of the safepoint poll
1513 // address followed by a load. setting this to true means the mov is
1514 // scheduled as a prior instruction. that's better for scheduling
1515 // anyway.
1516 
1517 bool SafePointNode::needs_polling_address_input()
1518 {
1519   return true;
1520 }
1521 
1522 //=============================================================================
1523 
1524 #ifndef PRODUCT
1525 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1526   st->print("BREAKPOINT");
1527 }
1528 #endif
1529 
1530 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1531   MacroAssembler _masm(&cbuf);
1532   __ brk(0);
1533 }
1534 
1535 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1536   return MachNode::size(ra_);
1537 }
1538 
1539 //=============================================================================
1540 
1541 #ifndef PRODUCT
1542   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1543     st->print("nop \t# %d bytes pad for loops and calls", _count);
1544   }
1545 #endif
1546 
1547   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1548     MacroAssembler _masm(&cbuf);
1549     for (int i = 0; i < _count; i++) {
1550       __ nop();
1551     }
1552   }
1553 
1554   uint MachNopNode::size(PhaseRegAlloc*) const {
1555     return _count * NativeInstruction::instruction_size;
1556   }
1557 
1558 //=============================================================================
1559 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1560 
1561 int Compile::ConstantTable::calculate_table_base_offset() const {
1562   return 0;  // absolute addressing, no offset
1563 }
1564 
1565 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1566 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1567   ShouldNotReachHere();
1568 }
1569 
1570 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1571   // Empty encoding
1572 }
1573 
1574 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1575   return 0;
1576 }
1577 
1578 #ifndef PRODUCT
1579 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1580   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1581 }
1582 #endif
1583 
1584 #ifndef PRODUCT
1585 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1586   Compile* C = ra_->C;
1587 
1588   int framesize = C->frame_slots() << LogBytesPerInt;
1589 
1590   if (C->need_stack_bang(framesize))
1591     st->print("# stack bang size=%d\n\t", framesize);
1592 
1593   if (framesize < ((1 << 9) + 2 * wordSize)) {
1594     st->print("sub  sp, sp, #%d\n\t", framesize);
1595     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1596     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1597   } else {
1598     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1599     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1600     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1601     st->print("sub  sp, sp, rscratch1");
1602   }
1603 }
1604 #endif
1605 
1606 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1607   Compile* C = ra_->C;
1608   MacroAssembler _masm(&cbuf);
1609 
1610   // n.b. frame size includes space for return pc and rfp
1611   const long framesize = C->frame_size_in_bytes();
1612   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1613 
1614   // insert a nop at the start of the prolog so we can patch in a
1615   // branch if we need to invalidate the method later
1616   __ nop();
1617 
1618   int bangsize = C->bang_size_in_bytes();
1619   if (C->need_stack_bang(bangsize) && UseStackBanging)
1620     __ generate_stack_overflow_check(bangsize);
1621 
1622   __ build_frame(framesize);
1623 
1624   if (NotifySimulator) {
1625     __ notify(Assembler::method_entry);
1626   }
1627 
1628   if (VerifyStackAtCalls) {
1629     Unimplemented();
1630   }
1631 
1632   C->set_frame_complete(cbuf.insts_size());
1633 
1634   if (C->has_mach_constant_base_node()) {
1635     // NOTE: We set the table base offset here because users might be
1636     // emitted before MachConstantBaseNode.
1637     Compile::ConstantTable& constant_table = C->constant_table();
1638     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1639   }
1640 }
1641 
1642 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1643 {
1644   return MachNode::size(ra_); // too many variables; just compute it
1645                               // the hard way
1646 }
1647 
1648 int MachPrologNode::reloc() const
1649 {
1650   return 0;
1651 }
1652 
1653 //=============================================================================
1654 
1655 #ifndef PRODUCT
1656 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1657   Compile* C = ra_->C;
1658   int framesize = C->frame_slots() << LogBytesPerInt;
1659 
1660   st->print("# pop frame %d\n\t",framesize);
1661 
1662   if (framesize == 0) {
1663     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1664   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1665     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1666     st->print("add  sp, sp, #%d\n\t", framesize);
1667   } else {
1668     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1669     st->print("add  sp, sp, rscratch1\n\t");
1670     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1671   }
1672 
1673   if (do_polling() && C->is_method_compilation()) {
1674     st->print("# touch polling page\n\t");
1675     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1676     st->print("ldr zr, [rscratch1]");
1677   }
1678 }
1679 #endif
1680 
1681 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1682   Compile* C = ra_->C;
1683   MacroAssembler _masm(&cbuf);
1684   int framesize = C->frame_slots() << LogBytesPerInt;
1685 
1686   __ remove_frame(framesize);
1687 
1688   if (NotifySimulator) {
1689     __ notify(Assembler::method_reentry);
1690   }
1691 
1692   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1693     __ reserved_stack_check();
1694   }
1695 
1696   if (do_polling() && C->is_method_compilation()) {
1697     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1698   }
1699 }
1700 
1701 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1702   // Variable size. Determine dynamically.
1703   return MachNode::size(ra_);
1704 }
1705 
1706 int MachEpilogNode::reloc() const {
1707   // Return number of relocatable values contained in this instruction.
1708   return 1; // 1 for polling page.
1709 }
1710 
1711 const Pipeline * MachEpilogNode::pipeline() const {
1712   return MachNode::pipeline_class();
1713 }
1714 
1715 // This method seems to be obsolete. It is declared in machnode.hpp
1716 // and defined in all *.ad files, but it is never called. Should we
1717 // get rid of it?
1718 int MachEpilogNode::safepoint_offset() const {
1719   assert(do_polling(), "no return for this epilog node");
1720   return 4;
1721 }
1722 
1723 //=============================================================================
1724 
1725 // Figure out which register class each belongs in: rc_int, rc_float or
1726 // rc_stack.
1727 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1728 
1729 static enum RC rc_class(OptoReg::Name reg) {
1730 
1731   if (reg == OptoReg::Bad) {
1732     return rc_bad;
1733   }
1734 
1735   // we have 30 int registers * 2 halves
1736   // (rscratch1 and rscratch2 are omitted)
1737 
1738   if (reg < 60) {
1739     return rc_int;
1740   }
1741 
1742   // we have 32 float register * 2 halves
1743   if (reg < 60 + 128) {
1744     return rc_float;
1745   }
1746 
1747   // Between float regs & stack is the flags regs.
1748   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1749 
1750   return rc_stack;
1751 }
1752 
1753 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1754   Compile* C = ra_->C;
1755 
1756   // Get registers to move.
1757   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1758   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1759   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1760   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1761 
1762   enum RC src_hi_rc = rc_class(src_hi);
1763   enum RC src_lo_rc = rc_class(src_lo);
1764   enum RC dst_hi_rc = rc_class(dst_hi);
1765   enum RC dst_lo_rc = rc_class(dst_lo);
1766 
1767   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1768 
1769   if (src_hi != OptoReg::Bad) {
1770     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1771            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1772            "expected aligned-adjacent pairs");
1773   }
1774 
1775   if (src_lo == dst_lo && src_hi == dst_hi) {
1776     return 0;            // Self copy, no move.
1777   }
1778 
1779   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1780               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1781   int src_offset = ra_->reg2offset(src_lo);
1782   int dst_offset = ra_->reg2offset(dst_lo);
1783 
1784   if (bottom_type()->isa_vect() != NULL) {
1785     uint ireg = ideal_reg();
1786     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1787     if (cbuf) {
1788       MacroAssembler _masm(cbuf);
1789       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1790       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1791         // stack->stack
1792         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1793         if (ireg == Op_VecD) {
1794           __ unspill(rscratch1, true, src_offset);
1795           __ spill(rscratch1, true, dst_offset);
1796         } else {
1797           __ spill_copy128(src_offset, dst_offset);
1798         }
1799       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1800         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1801                ireg == Op_VecD ? __ T8B : __ T16B,
1802                as_FloatRegister(Matcher::_regEncode[src_lo]));
1803       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1804         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1805                        ireg == Op_VecD ? __ D : __ Q,
1806                        ra_->reg2offset(dst_lo));
1807       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1808         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1809                        ireg == Op_VecD ? __ D : __ Q,
1810                        ra_->reg2offset(src_lo));
1811       } else {
1812         ShouldNotReachHere();
1813       }
1814     }
1815   } else if (cbuf) {
1816     MacroAssembler _masm(cbuf);
1817     switch (src_lo_rc) {
1818     case rc_int:
1819       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1820         if (is64) {
1821             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1822                    as_Register(Matcher::_regEncode[src_lo]));
1823         } else {
1824             MacroAssembler _masm(cbuf);
1825             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1826                     as_Register(Matcher::_regEncode[src_lo]));
1827         }
1828       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1829         if (is64) {
1830             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1831                      as_Register(Matcher::_regEncode[src_lo]));
1832         } else {
1833             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1834                      as_Register(Matcher::_regEncode[src_lo]));
1835         }
1836       } else {                    // gpr --> stack spill
1837         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1838         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1839       }
1840       break;
1841     case rc_float:
1842       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1843         if (is64) {
1844             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1845                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1846         } else {
1847             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1848                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1849         }
1850       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1851           if (cbuf) {
1852             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1853                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1854         } else {
1855             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1856                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1857         }
1858       } else {                    // fpr --> stack spill
1859         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1860         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1861                  is64 ? __ D : __ S, dst_offset);
1862       }
1863       break;
1864     case rc_stack:
1865       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1866         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1867       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1868         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1869                    is64 ? __ D : __ S, src_offset);
1870       } else {                    // stack --> stack copy
1871         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1872         __ unspill(rscratch1, is64, src_offset);
1873         __ spill(rscratch1, is64, dst_offset);
1874       }
1875       break;
1876     default:
1877       assert(false, "bad rc_class for spill");
1878       ShouldNotReachHere();
1879     }
1880   }
1881 
1882   if (st) {
1883     st->print("spill ");
1884     if (src_lo_rc == rc_stack) {
1885       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1886     } else {
1887       st->print("%s -> ", Matcher::regName[src_lo]);
1888     }
1889     if (dst_lo_rc == rc_stack) {
1890       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1891     } else {
1892       st->print("%s", Matcher::regName[dst_lo]);
1893     }
1894     if (bottom_type()->isa_vect() != NULL) {
1895       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1896     } else {
1897       st->print("\t# spill size = %d", is64 ? 64:32);
1898     }
1899   }
1900 
1901   return 0;
1902 
1903 }
1904 
1905 #ifndef PRODUCT
1906 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1907   if (!ra_)
1908     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1909   else
1910     implementation(NULL, ra_, false, st);
1911 }
1912 #endif
1913 
1914 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1915   implementation(&cbuf, ra_, false, NULL);
1916 }
1917 
1918 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1919   return MachNode::size(ra_);
1920 }
1921 
1922 //=============================================================================
1923 
1924 #ifndef PRODUCT
1925 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1926   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1927   int reg = ra_->get_reg_first(this);
1928   st->print("add %s, rsp, #%d]\t# box lock",
1929             Matcher::regName[reg], offset);
1930 }
1931 #endif
1932 
1933 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1934   MacroAssembler _masm(&cbuf);
1935 
1936   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1937   int reg    = ra_->get_encode(this);
1938 
1939   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1940     __ add(as_Register(reg), sp, offset);
1941   } else {
1942     ShouldNotReachHere();
1943   }
1944 }
1945 
1946 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1947   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1948   return 4;
1949 }
1950 
1951 //=============================================================================
1952 
1953 #ifndef PRODUCT
1954 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1955 {
1956   st->print_cr("# MachUEPNode");
1957   if (UseCompressedClassPointers) {
1958     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1959     if (Universe::narrow_klass_shift() != 0) {
1960       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1961     }
1962   } else {
1963    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1964   }
1965   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1966   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1967 }
1968 #endif
1969 
1970 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1971 {
1972   // This is the unverified entry point.
1973   MacroAssembler _masm(&cbuf);
1974 
1975   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1976   Label skip;
1977   // TODO
1978   // can we avoid this skip and still use a reloc?
1979   __ br(Assembler::EQ, skip);
1980   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1981   __ bind(skip);
1982 }
1983 
1984 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1985 {
1986   return MachNode::size(ra_);
1987 }
1988 
1989 // REQUIRED EMIT CODE
1990 
1991 //=============================================================================
1992 
1993 // Emit exception handler code.
1994 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
1995 {
1996   // mov rscratch1 #exception_blob_entry_point
1997   // br rscratch1
1998   // Note that the code buffer's insts_mark is always relative to insts.
1999   // That's why we must use the macroassembler to generate a handler.
2000   MacroAssembler _masm(&cbuf);
2001   address base = __ start_a_stub(size_exception_handler());
2002   if (base == NULL) {
2003     ciEnv::current()->record_failure("CodeCache is full");
2004     return 0;  // CodeBuffer::expand failed
2005   }
2006   int offset = __ offset();
2007   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2008   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2009   __ end_a_stub();
2010   return offset;
2011 }
2012 
2013 // Emit deopt handler code.
2014 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2015 {
2016   // Note that the code buffer's insts_mark is always relative to insts.
2017   // That's why we must use the macroassembler to generate a handler.
2018   MacroAssembler _masm(&cbuf);
2019   address base = __ start_a_stub(size_deopt_handler());
2020   if (base == NULL) {
2021     ciEnv::current()->record_failure("CodeCache is full");
2022     return 0;  // CodeBuffer::expand failed
2023   }
2024   int offset = __ offset();
2025 
2026   __ adr(lr, __ pc());
2027   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2028 
2029   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2030   __ end_a_stub();
2031   return offset;
2032 }
2033 
2034 // REQUIRED MATCHER CODE
2035 
2036 //=============================================================================
2037 
2038 const bool Matcher::match_rule_supported(int opcode) {
2039 
2040   switch (opcode) {
2041   default:
2042     break;
2043   }
2044 
2045   if (!has_match_rule(opcode)) {
2046     return false;
2047   }
2048 
2049   return true;  // Per default match rules are supported.
2050 }
2051 
2052 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
2053 
2054   // TODO
2055   // identify extra cases that we might want to provide match rules for
2056   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2057   bool ret_value = match_rule_supported(opcode);
2058   // Add rules here.
2059 
2060   return ret_value;  // Per default match rules are supported.
2061 }
2062 
2063 const bool Matcher::has_predicated_vectors(void) {
2064   return false;
2065 }
2066 
2067 const int Matcher::float_pressure(int default_pressure_threshold) {
2068   return default_pressure_threshold;
2069 }
2070 
2071 int Matcher::regnum_to_fpu_offset(int regnum)
2072 {
2073   Unimplemented();
2074   return 0;
2075 }
2076 
2077 // Is this branch offset short enough that a short branch can be used?
2078 //
2079 // NOTE: If the platform does not provide any short branch variants, then
2080 //       this method should return false for offset 0.
2081 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2082   // The passed offset is relative to address of the branch.
2083 
2084   return (-32768 <= offset && offset < 32768);
2085 }
2086 
2087 const bool Matcher::isSimpleConstant64(jlong value) {
2088   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2089   // Probably always true, even if a temp register is required.
2090   return true;
2091 }
2092 
2093 // true just means we have fast l2f conversion
2094 const bool Matcher::convL2FSupported(void) {
2095   return true;
2096 }
2097 
2098 // Vector width in bytes.
2099 const int Matcher::vector_width_in_bytes(BasicType bt) {
2100   int size = MIN2(16,(int)MaxVectorSize);
2101   // Minimum 2 values in vector
2102   if (size < 2*type2aelembytes(bt)) size = 0;
2103   // But never < 4
2104   if (size < 4) size = 0;
2105   return size;
2106 }
2107 
2108 // Limits on vector size (number of elements) loaded into vector.
2109 const int Matcher::max_vector_size(const BasicType bt) {
2110   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2111 }
2112 const int Matcher::min_vector_size(const BasicType bt) {
2113 //  For the moment limit the vector size to 8 bytes
2114     int size = 8 / type2aelembytes(bt);
2115     if (size < 2) size = 2;
2116     return size;
2117 }
2118 
2119 // Vector ideal reg.
2120 const uint Matcher::vector_ideal_reg(int len) {
2121   switch(len) {
2122     case  8: return Op_VecD;
2123     case 16: return Op_VecX;
2124   }
2125   ShouldNotReachHere();
2126   return 0;
2127 }
2128 
2129 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2130   return Op_VecX;
2131 }
2132 
2133 // AES support not yet implemented
2134 const bool Matcher::pass_original_key_for_aes() {
2135   return false;
2136 }
2137 
2138 // x86 supports misaligned vectors store/load.
2139 const bool Matcher::misaligned_vectors_ok() {
2140   return !AlignVector; // can be changed by flag
2141 }
2142 
2143 // false => size gets scaled to BytesPerLong, ok.
2144 const bool Matcher::init_array_count_is_in_bytes = false;
2145 
2146 // Use conditional move (CMOVL)
2147 const int Matcher::long_cmove_cost() {
2148   // long cmoves are no more expensive than int cmoves
2149   return 0;
2150 }
2151 
2152 const int Matcher::float_cmove_cost() {
2153   // float cmoves are no more expensive than int cmoves
2154   return 0;
2155 }
2156 
2157 // Does the CPU require late expand (see block.cpp for description of late expand)?
2158 const bool Matcher::require_postalloc_expand = false;
2159 
2160 // Do we need to mask the count passed to shift instructions or does
2161 // the cpu only look at the lower 5/6 bits anyway?
2162 const bool Matcher::need_masked_shift_count = false;
2163 
2164 // This affects two different things:
2165 //  - how Decode nodes are matched
2166 //  - how ImplicitNullCheck opportunities are recognized
2167 // If true, the matcher will try to remove all Decodes and match them
2168 // (as operands) into nodes. NullChecks are not prepared to deal with
2169 // Decodes by final_graph_reshaping().
2170 // If false, final_graph_reshaping() forces the decode behind the Cmp
2171 // for a NullCheck. The matcher matches the Decode node into a register.
2172 // Implicit_null_check optimization moves the Decode along with the
2173 // memory operation back up before the NullCheck.
2174 bool Matcher::narrow_oop_use_complex_address() {
2175   return Universe::narrow_oop_shift() == 0;
2176 }
2177 
2178 bool Matcher::narrow_klass_use_complex_address() {
2179 // TODO
2180 // decide whether we need to set this to true
2181   return false;
2182 }
2183 
2184 bool Matcher::const_oop_prefer_decode() {
2185   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2186   return Universe::narrow_oop_base() == NULL;
2187 }
2188 
2189 bool Matcher::const_klass_prefer_decode() {
2190   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2191   return Universe::narrow_klass_base() == NULL;
2192 }
2193 
2194 // Is it better to copy float constants, or load them directly from
2195 // memory?  Intel can load a float constant from a direct address,
2196 // requiring no extra registers.  Most RISCs will have to materialize
2197 // an address into a register first, so they would do better to copy
2198 // the constant from stack.
2199 const bool Matcher::rematerialize_float_constants = false;
2200 
2201 // If CPU can load and store mis-aligned doubles directly then no
2202 // fixup is needed.  Else we split the double into 2 integer pieces
2203 // and move it piece-by-piece.  Only happens when passing doubles into
2204 // C code as the Java calling convention forces doubles to be aligned.
2205 const bool Matcher::misaligned_doubles_ok = true;
2206 
2207 // No-op on amd64
2208 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2209   Unimplemented();
2210 }
2211 
2212 // Advertise here if the CPU requires explicit rounding operations to
2213 // implement the UseStrictFP mode.
2214 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2215 
2216 // Are floats converted to double when stored to stack during
2217 // deoptimization?
2218 bool Matcher::float_in_double() { return false; }
2219 
2220 // Do ints take an entire long register or just half?
2221 // The relevant question is how the int is callee-saved:
2222 // the whole long is written but de-opt'ing will have to extract
2223 // the relevant 32 bits.
2224 const bool Matcher::int_in_long = true;
2225 
2226 // Return whether or not this register is ever used as an argument.
2227 // This function is used on startup to build the trampoline stubs in
2228 // generateOptoStub.  Registers not mentioned will be killed by the VM
2229 // call in the trampoline, and arguments in those registers not be
2230 // available to the callee.
2231 bool Matcher::can_be_java_arg(int reg)
2232 {
2233   return
2234     reg ==  R0_num || reg == R0_H_num ||
2235     reg ==  R1_num || reg == R1_H_num ||
2236     reg ==  R2_num || reg == R2_H_num ||
2237     reg ==  R3_num || reg == R3_H_num ||
2238     reg ==  R4_num || reg == R4_H_num ||
2239     reg ==  R5_num || reg == R5_H_num ||
2240     reg ==  R6_num || reg == R6_H_num ||
2241     reg ==  R7_num || reg == R7_H_num ||
2242     reg ==  V0_num || reg == V0_H_num ||
2243     reg ==  V1_num || reg == V1_H_num ||
2244     reg ==  V2_num || reg == V2_H_num ||
2245     reg ==  V3_num || reg == V3_H_num ||
2246     reg ==  V4_num || reg == V4_H_num ||
2247     reg ==  V5_num || reg == V5_H_num ||
2248     reg ==  V6_num || reg == V6_H_num ||
2249     reg ==  V7_num || reg == V7_H_num;
2250 }
2251 
2252 bool Matcher::is_spillable_arg(int reg)
2253 {
2254   return can_be_java_arg(reg);
2255 }
2256 
2257 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2258   return false;
2259 }
2260 
2261 RegMask Matcher::divI_proj_mask() {
2262   ShouldNotReachHere();
2263   return RegMask();
2264 }
2265 
2266 // Register for MODI projection of divmodI.
2267 RegMask Matcher::modI_proj_mask() {
2268   ShouldNotReachHere();
2269   return RegMask();
2270 }
2271 
2272 // Register for DIVL projection of divmodL.
2273 RegMask Matcher::divL_proj_mask() {
2274   ShouldNotReachHere();
2275   return RegMask();
2276 }
2277 
2278 // Register for MODL projection of divmodL.
2279 RegMask Matcher::modL_proj_mask() {
2280   ShouldNotReachHere();
2281   return RegMask();
2282 }
2283 
2284 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2285   return FP_REG_mask();
2286 }
2287 
2288 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2289   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2290     Node* u = addp->fast_out(i);
2291     if (u->is_Mem()) {
2292       int opsize = u->as_Mem()->memory_size();
2293       assert(opsize > 0, "unexpected memory operand size");
2294       if (u->as_Mem()->memory_size() != (1<<shift)) {
2295         return false;
2296       }
2297     }
2298   }
2299   return true;
2300 }
2301 
2302 const bool Matcher::convi2l_type_required = false;
2303 
2304 // Should the Matcher clone shifts on addressing modes, expecting them
2305 // to be subsumed into complex addressing expressions or compute them
2306 // into registers?
2307 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2308   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2309     return true;
2310   }
2311 
2312   Node *off = m->in(AddPNode::Offset);
2313   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2314       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2315       // Are there other uses besides address expressions?
2316       !is_visited(off)) {
2317     address_visited.set(off->_idx); // Flag as address_visited
2318     mstack.push(off->in(2), Visit);
2319     Node *conv = off->in(1);
2320     if (conv->Opcode() == Op_ConvI2L &&
2321         // Are there other uses besides address expressions?
2322         !is_visited(conv)) {
2323       address_visited.set(conv->_idx); // Flag as address_visited
2324       mstack.push(conv->in(1), Pre_Visit);
2325     } else {
2326       mstack.push(conv, Pre_Visit);
2327     }
2328     address_visited.test_set(m->_idx); // Flag as address_visited
2329     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2330     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2331     return true;
2332   } else if (off->Opcode() == Op_ConvI2L &&
2333              // Are there other uses besides address expressions?
2334              !is_visited(off)) {
2335     address_visited.test_set(m->_idx); // Flag as address_visited
2336     address_visited.set(off->_idx); // Flag as address_visited
2337     mstack.push(off->in(1), Pre_Visit);
2338     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2339     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2340     return true;
2341   }
2342   return false;
2343 }
2344 
2345 void Compile::reshape_address(AddPNode* addp) {
2346 }
2347 
2348 // helper for encoding java_to_runtime calls on sim
2349 //
2350 // this is needed to compute the extra arguments required when
2351 // planting a call to the simulator blrt instruction. the TypeFunc
2352 // can be queried to identify the counts for integral, and floating
2353 // arguments and the return type
2354 
2355 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2356 {
2357   int gps = 0;
2358   int fps = 0;
2359   const TypeTuple *domain = tf->domain();
2360   int max = domain->cnt();
2361   for (int i = TypeFunc::Parms; i < max; i++) {
2362     const Type *t = domain->field_at(i);
2363     switch(t->basic_type()) {
2364     case T_FLOAT:
2365     case T_DOUBLE:
2366       fps++;
2367     default:
2368       gps++;
2369     }
2370   }
2371   gpcnt = gps;
2372   fpcnt = fps;
2373   BasicType rt = tf->return_type();
2374   switch (rt) {
2375   case T_VOID:
2376     rtype = MacroAssembler::ret_type_void;
2377     break;
2378   default:
2379     rtype = MacroAssembler::ret_type_integral;
2380     break;
2381   case T_FLOAT:
2382     rtype = MacroAssembler::ret_type_float;
2383     break;
2384   case T_DOUBLE:
2385     rtype = MacroAssembler::ret_type_double;
2386     break;
2387   }
2388 }
2389 
2390 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2391   MacroAssembler _masm(&cbuf);                                          \
2392   {                                                                     \
2393     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2394     guarantee(DISP == 0, "mode not permitted for volatile");            \
2395     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2396     __ INSN(REG, as_Register(BASE));                                    \
2397   }
2398 
2399 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2400 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2401 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2402                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2403 
2404   // Used for all non-volatile memory accesses.  The use of
2405   // $mem->opcode() to discover whether this pattern uses sign-extended
2406   // offsets is something of a kludge.
2407   static void loadStore(MacroAssembler masm, mem_insn insn,
2408                          Register reg, int opcode,
2409                          Register base, int index, int size, int disp)
2410   {
2411     Address::extend scale;
2412 
2413     // Hooboy, this is fugly.  We need a way to communicate to the
2414     // encoder that the index needs to be sign extended, so we have to
2415     // enumerate all the cases.
2416     switch (opcode) {
2417     case INDINDEXSCALEDI2L:
2418     case INDINDEXSCALEDI2LN:
2419     case INDINDEXI2L:
2420     case INDINDEXI2LN:
2421       scale = Address::sxtw(size);
2422       break;
2423     default:
2424       scale = Address::lsl(size);
2425     }
2426 
2427     if (index == -1) {
2428       (masm.*insn)(reg, Address(base, disp));
2429     } else {
2430       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2431       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2432     }
2433   }
2434 
2435   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2436                          FloatRegister reg, int opcode,
2437                          Register base, int index, int size, int disp)
2438   {
2439     Address::extend scale;
2440 
2441     switch (opcode) {
2442     case INDINDEXSCALEDI2L:
2443     case INDINDEXSCALEDI2LN:
2444       scale = Address::sxtw(size);
2445       break;
2446     default:
2447       scale = Address::lsl(size);
2448     }
2449 
2450      if (index == -1) {
2451       (masm.*insn)(reg, Address(base, disp));
2452     } else {
2453       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2454       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2455     }
2456   }
2457 
2458   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2459                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2460                          int opcode, Register base, int index, int size, int disp)
2461   {
2462     if (index == -1) {
2463       (masm.*insn)(reg, T, Address(base, disp));
2464     } else {
2465       assert(disp == 0, "unsupported address mode");
2466       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2467     }
2468   }
2469 
2470 %}
2471 
2472 
2473 
2474 //----------ENCODING BLOCK-----------------------------------------------------
2475 // This block specifies the encoding classes used by the compiler to
2476 // output byte streams.  Encoding classes are parameterized macros
2477 // used by Machine Instruction Nodes in order to generate the bit
2478 // encoding of the instruction.  Operands specify their base encoding
2479 // interface with the interface keyword.  There are currently
2480 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2481 // COND_INTER.  REG_INTER causes an operand to generate a function
2482 // which returns its register number when queried.  CONST_INTER causes
2483 // an operand to generate a function which returns the value of the
2484 // constant when queried.  MEMORY_INTER causes an operand to generate
2485 // four functions which return the Base Register, the Index Register,
2486 // the Scale Value, and the Offset Value of the operand when queried.
2487 // COND_INTER causes an operand to generate six functions which return
2488 // the encoding code (ie - encoding bits for the instruction)
2489 // associated with each basic boolean condition for a conditional
2490 // instruction.
2491 //
2492 // Instructions specify two basic values for encoding.  Again, a
2493 // function is available to check if the constant displacement is an
2494 // oop. They use the ins_encode keyword to specify their encoding
2495 // classes (which must be a sequence of enc_class names, and their
2496 // parameters, specified in the encoding block), and they use the
2497 // opcode keyword to specify, in order, their primary, secondary, and
2498 // tertiary opcode.  Only the opcode sections which a particular
2499 // instruction needs for encoding need to be specified.
2500 encode %{
2501   // Build emit functions for each basic byte or larger field in the
2502   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2503   // from C++ code in the enc_class source block.  Emit functions will
2504   // live in the main source block for now.  In future, we can
2505   // generalize this by adding a syntax that specifies the sizes of
2506   // fields in an order, so that the adlc can build the emit functions
2507   // automagically
2508 
2509   // catch all for unimplemented encodings
2510   enc_class enc_unimplemented %{
2511     MacroAssembler _masm(&cbuf);
2512     __ unimplemented("C2 catch all");
2513   %}
2514 
2515   // BEGIN Non-volatile memory access
2516 
2517   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2518     Register dst_reg = as_Register($dst$$reg);
2519     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2520                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2521   %}
2522 
2523   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2524     Register dst_reg = as_Register($dst$$reg);
2525     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2526                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2527   %}
2528 
2529   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2530     Register dst_reg = as_Register($dst$$reg);
2531     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2532                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2533   %}
2534 
2535   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2536     Register dst_reg = as_Register($dst$$reg);
2537     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2538                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2539   %}
2540 
2541   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2542     Register dst_reg = as_Register($dst$$reg);
2543     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2544                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2545   %}
2546 
2547   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2548     Register dst_reg = as_Register($dst$$reg);
2549     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2550                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2551   %}
2552 
2553   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2554     Register dst_reg = as_Register($dst$$reg);
2555     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2556                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2557   %}
2558 
2559   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2560     Register dst_reg = as_Register($dst$$reg);
2561     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2562                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2563   %}
2564 
2565   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2566     Register dst_reg = as_Register($dst$$reg);
2567     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2568                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2569   %}
2570 
2571   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2572     Register dst_reg = as_Register($dst$$reg);
2573     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2574                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2575   %}
2576 
2577   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2578     Register dst_reg = as_Register($dst$$reg);
2579     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2580                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2581   %}
2582 
2583   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2584     Register dst_reg = as_Register($dst$$reg);
2585     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2586                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2587   %}
2588 
2589   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2590     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2591     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2592                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2593   %}
2594 
2595   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2596     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2597     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2598                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2599   %}
2600 
2601   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2602     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2603     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2604        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2605   %}
2606 
2607   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2608     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2609     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2610        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2611   %}
2612 
2613   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2614     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2615     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2616        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2617   %}
2618 
2619   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2620     Register src_reg = as_Register($src$$reg);
2621     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2622                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2623   %}
2624 
2625   enc_class aarch64_enc_strb0(memory mem) %{
2626     MacroAssembler _masm(&cbuf);
2627     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2628                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2629   %}
2630 
2631   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2632     MacroAssembler _masm(&cbuf);
2633     __ membar(Assembler::StoreStore);
2634     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2635                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2636   %}
2637 
2638   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2639     Register src_reg = as_Register($src$$reg);
2640     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2641                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2642   %}
2643 
2644   enc_class aarch64_enc_strh0(memory mem) %{
2645     MacroAssembler _masm(&cbuf);
2646     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2647                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2648   %}
2649 
2650   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2651     Register src_reg = as_Register($src$$reg);
2652     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2653                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2654   %}
2655 
2656   enc_class aarch64_enc_strw0(memory mem) %{
2657     MacroAssembler _masm(&cbuf);
2658     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2659                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2660   %}
2661 
2662   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2663     Register src_reg = as_Register($src$$reg);
2664     // we sometimes get asked to store the stack pointer into the
2665     // current thread -- we cannot do that directly on AArch64
2666     if (src_reg == r31_sp) {
2667       MacroAssembler _masm(&cbuf);
2668       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2669       __ mov(rscratch2, sp);
2670       src_reg = rscratch2;
2671     }
2672     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2673                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2674   %}
2675 
2676   enc_class aarch64_enc_str0(memory mem) %{
2677     MacroAssembler _masm(&cbuf);
2678     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2679                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2680   %}
2681 
2682   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2683     FloatRegister src_reg = as_FloatRegister($src$$reg);
2684     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2685                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2686   %}
2687 
2688   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2689     FloatRegister src_reg = as_FloatRegister($src$$reg);
2690     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2691                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2692   %}
2693 
2694   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2695     FloatRegister src_reg = as_FloatRegister($src$$reg);
2696     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2697        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2698   %}
2699 
2700   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2701     FloatRegister src_reg = as_FloatRegister($src$$reg);
2702     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2703        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2704   %}
2705 
2706   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2707     FloatRegister src_reg = as_FloatRegister($src$$reg);
2708     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2709        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2710   %}
2711 
2712   // END Non-volatile memory access
2713 
2714   // volatile loads and stores
2715 
2716   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2717     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2718                  rscratch1, stlrb);
2719   %}
2720 
2721   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2722     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2723                  rscratch1, stlrh);
2724   %}
2725 
2726   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2727     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2728                  rscratch1, stlrw);
2729   %}
2730 
2731 
2732   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2733     Register dst_reg = as_Register($dst$$reg);
2734     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2735              rscratch1, ldarb);
2736     __ sxtbw(dst_reg, dst_reg);
2737   %}
2738 
2739   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2740     Register dst_reg = as_Register($dst$$reg);
2741     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2742              rscratch1, ldarb);
2743     __ sxtb(dst_reg, dst_reg);
2744   %}
2745 
2746   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2747     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2748              rscratch1, ldarb);
2749   %}
2750 
2751   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2752     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2753              rscratch1, ldarb);
2754   %}
2755 
2756   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2757     Register dst_reg = as_Register($dst$$reg);
2758     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2759              rscratch1, ldarh);
2760     __ sxthw(dst_reg, dst_reg);
2761   %}
2762 
2763   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2764     Register dst_reg = as_Register($dst$$reg);
2765     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2766              rscratch1, ldarh);
2767     __ sxth(dst_reg, dst_reg);
2768   %}
2769 
2770   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2771     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2772              rscratch1, ldarh);
2773   %}
2774 
2775   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2776     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2777              rscratch1, ldarh);
2778   %}
2779 
2780   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2781     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2782              rscratch1, ldarw);
2783   %}
2784 
2785   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2786     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2787              rscratch1, ldarw);
2788   %}
2789 
2790   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2791     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2792              rscratch1, ldar);
2793   %}
2794 
2795   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2796     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2797              rscratch1, ldarw);
2798     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2799   %}
2800 
2801   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2802     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2803              rscratch1, ldar);
2804     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2805   %}
2806 
2807   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2808     Register src_reg = as_Register($src$$reg);
2809     // we sometimes get asked to store the stack pointer into the
2810     // current thread -- we cannot do that directly on AArch64
2811     if (src_reg == r31_sp) {
2812         MacroAssembler _masm(&cbuf);
2813       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2814       __ mov(rscratch2, sp);
2815       src_reg = rscratch2;
2816     }
2817     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2818                  rscratch1, stlr);
2819   %}
2820 
2821   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2822     {
2823       MacroAssembler _masm(&cbuf);
2824       FloatRegister src_reg = as_FloatRegister($src$$reg);
2825       __ fmovs(rscratch2, src_reg);
2826     }
2827     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2828                  rscratch1, stlrw);
2829   %}
2830 
2831   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2832     {
2833       MacroAssembler _masm(&cbuf);
2834       FloatRegister src_reg = as_FloatRegister($src$$reg);
2835       __ fmovd(rscratch2, src_reg);
2836     }
2837     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2838                  rscratch1, stlr);
2839   %}
2840 
2841   // synchronized read/update encodings
2842 
2843   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2844     MacroAssembler _masm(&cbuf);
2845     Register dst_reg = as_Register($dst$$reg);
2846     Register base = as_Register($mem$$base);
2847     int index = $mem$$index;
2848     int scale = $mem$$scale;
2849     int disp = $mem$$disp;
2850     if (index == -1) {
2851        if (disp != 0) {
2852         __ lea(rscratch1, Address(base, disp));
2853         __ ldaxr(dst_reg, rscratch1);
2854       } else {
2855         // TODO
2856         // should we ever get anything other than this case?
2857         __ ldaxr(dst_reg, base);
2858       }
2859     } else {
2860       Register index_reg = as_Register(index);
2861       if (disp == 0) {
2862         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2863         __ ldaxr(dst_reg, rscratch1);
2864       } else {
2865         __ lea(rscratch1, Address(base, disp));
2866         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2867         __ ldaxr(dst_reg, rscratch1);
2868       }
2869     }
2870   %}
2871 
2872   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2873     MacroAssembler _masm(&cbuf);
2874     Register src_reg = as_Register($src$$reg);
2875     Register base = as_Register($mem$$base);
2876     int index = $mem$$index;
2877     int scale = $mem$$scale;
2878     int disp = $mem$$disp;
2879     if (index == -1) {
2880        if (disp != 0) {
2881         __ lea(rscratch2, Address(base, disp));
2882         __ stlxr(rscratch1, src_reg, rscratch2);
2883       } else {
2884         // TODO
2885         // should we ever get anything other than this case?
2886         __ stlxr(rscratch1, src_reg, base);
2887       }
2888     } else {
2889       Register index_reg = as_Register(index);
2890       if (disp == 0) {
2891         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
2892         __ stlxr(rscratch1, src_reg, rscratch2);
2893       } else {
2894         __ lea(rscratch2, Address(base, disp));
2895         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
2896         __ stlxr(rscratch1, src_reg, rscratch2);
2897       }
2898     }
2899     __ cmpw(rscratch1, zr);
2900   %}
2901 
2902   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2903     MacroAssembler _masm(&cbuf);
2904     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2905     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2906                Assembler::xword, /*acquire*/ false, /*release*/ true,
2907                /*weak*/ false, noreg);
2908   %}
2909 
2910   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2911     MacroAssembler _masm(&cbuf);
2912     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2913     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2914                Assembler::word, /*acquire*/ false, /*release*/ true,
2915                /*weak*/ false, noreg);
2916   %}
2917 
2918   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2919     MacroAssembler _masm(&cbuf);
2920     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2921     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2922                Assembler::halfword, /*acquire*/ false, /*release*/ true,
2923                /*weak*/ false, noreg);
2924   %}
2925 
2926   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2927     MacroAssembler _masm(&cbuf);
2928     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2929     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2930                Assembler::byte, /*acquire*/ false, /*release*/ true,
2931                /*weak*/ false, noreg);
2932   %}
2933 
2934 
2935   // The only difference between aarch64_enc_cmpxchg and
2936   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
2937   // CompareAndSwap sequence to serve as a barrier on acquiring a
2938   // lock.
2939   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2940     MacroAssembler _masm(&cbuf);
2941     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2942     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2943                Assembler::xword, /*acquire*/ true, /*release*/ true,
2944                /*weak*/ false, noreg);
2945   %}
2946 
2947   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2948     MacroAssembler _masm(&cbuf);
2949     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2950     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2951                Assembler::word, /*acquire*/ true, /*release*/ true,
2952                /*weak*/ false, noreg);
2953   %}
2954 
2955   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2956     MacroAssembler _masm(&cbuf);
2957     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2958     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2959                Assembler::halfword, /*acquire*/ true, /*release*/ true,
2960                /*weak*/ false, noreg);
2961   %}
2962 
2963   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2964     MacroAssembler _masm(&cbuf);
2965     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2966     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2967                Assembler::byte, /*acquire*/ true, /*release*/ true,
2968                /*weak*/ false, noreg);
2969   %}
2970 
2971   // auxiliary used for CompareAndSwapX to set result register
2972   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
2973     MacroAssembler _masm(&cbuf);
2974     Register res_reg = as_Register($res$$reg);
2975     __ cset(res_reg, Assembler::EQ);
2976   %}
2977 
2978   // prefetch encodings
2979 
2980   enc_class aarch64_enc_prefetchw(memory mem) %{
2981     MacroAssembler _masm(&cbuf);
2982     Register base = as_Register($mem$$base);
2983     int index = $mem$$index;
2984     int scale = $mem$$scale;
2985     int disp = $mem$$disp;
2986     if (index == -1) {
2987       __ prfm(Address(base, disp), PSTL1KEEP);
2988     } else {
2989       Register index_reg = as_Register(index);
2990       if (disp == 0) {
2991         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
2992       } else {
2993         __ lea(rscratch1, Address(base, disp));
2994         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
2995       }
2996     }
2997   %}
2998 
2999   /// mov envcodings
3000 
3001   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3002     MacroAssembler _masm(&cbuf);
3003     u_int32_t con = (u_int32_t)$src$$constant;
3004     Register dst_reg = as_Register($dst$$reg);
3005     if (con == 0) {
3006       __ movw(dst_reg, zr);
3007     } else {
3008       __ movw(dst_reg, con);
3009     }
3010   %}
3011 
3012   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3013     MacroAssembler _masm(&cbuf);
3014     Register dst_reg = as_Register($dst$$reg);
3015     u_int64_t con = (u_int64_t)$src$$constant;
3016     if (con == 0) {
3017       __ mov(dst_reg, zr);
3018     } else {
3019       __ mov(dst_reg, con);
3020     }
3021   %}
3022 
3023   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3024     MacroAssembler _masm(&cbuf);
3025     Register dst_reg = as_Register($dst$$reg);
3026     address con = (address)$src$$constant;
3027     if (con == NULL || con == (address)1) {
3028       ShouldNotReachHere();
3029     } else {
3030       relocInfo::relocType rtype = $src->constant_reloc();
3031       if (rtype == relocInfo::oop_type) {
3032         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3033       } else if (rtype == relocInfo::metadata_type) {
3034         __ mov_metadata(dst_reg, (Metadata*)con);
3035       } else {
3036         assert(rtype == relocInfo::none, "unexpected reloc type");
3037         if (con < (address)(uintptr_t)os::vm_page_size()) {
3038           __ mov(dst_reg, con);
3039         } else {
3040           unsigned long offset;
3041           __ adrp(dst_reg, con, offset);
3042           __ add(dst_reg, dst_reg, offset);
3043         }
3044       }
3045     }
3046   %}
3047 
3048   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3049     MacroAssembler _masm(&cbuf);
3050     Register dst_reg = as_Register($dst$$reg);
3051     __ mov(dst_reg, zr);
3052   %}
3053 
3054   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3055     MacroAssembler _masm(&cbuf);
3056     Register dst_reg = as_Register($dst$$reg);
3057     __ mov(dst_reg, (u_int64_t)1);
3058   %}
3059 
3060   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3061     MacroAssembler _masm(&cbuf);
3062     address page = (address)$src$$constant;
3063     Register dst_reg = as_Register($dst$$reg);
3064     unsigned long off;
3065     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3066     assert(off == 0, "assumed offset == 0");
3067   %}
3068 
3069   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3070     MacroAssembler _masm(&cbuf);
3071     __ load_byte_map_base($dst$$Register);
3072   %}
3073 
3074   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3075     MacroAssembler _masm(&cbuf);
3076     Register dst_reg = as_Register($dst$$reg);
3077     address con = (address)$src$$constant;
3078     if (con == NULL) {
3079       ShouldNotReachHere();
3080     } else {
3081       relocInfo::relocType rtype = $src->constant_reloc();
3082       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3083       __ set_narrow_oop(dst_reg, (jobject)con);
3084     }
3085   %}
3086 
3087   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3088     MacroAssembler _masm(&cbuf);
3089     Register dst_reg = as_Register($dst$$reg);
3090     __ mov(dst_reg, zr);
3091   %}
3092 
3093   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3094     MacroAssembler _masm(&cbuf);
3095     Register dst_reg = as_Register($dst$$reg);
3096     address con = (address)$src$$constant;
3097     if (con == NULL) {
3098       ShouldNotReachHere();
3099     } else {
3100       relocInfo::relocType rtype = $src->constant_reloc();
3101       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3102       __ set_narrow_klass(dst_reg, (Klass *)con);
3103     }
3104   %}
3105 
3106   // arithmetic encodings
3107 
3108   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3109     MacroAssembler _masm(&cbuf);
3110     Register dst_reg = as_Register($dst$$reg);
3111     Register src_reg = as_Register($src1$$reg);
3112     int32_t con = (int32_t)$src2$$constant;
3113     // add has primary == 0, subtract has primary == 1
3114     if ($primary) { con = -con; }
3115     if (con < 0) {
3116       __ subw(dst_reg, src_reg, -con);
3117     } else {
3118       __ addw(dst_reg, src_reg, con);
3119     }
3120   %}
3121 
3122   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3123     MacroAssembler _masm(&cbuf);
3124     Register dst_reg = as_Register($dst$$reg);
3125     Register src_reg = as_Register($src1$$reg);
3126     int32_t con = (int32_t)$src2$$constant;
3127     // add has primary == 0, subtract has primary == 1
3128     if ($primary) { con = -con; }
3129     if (con < 0) {
3130       __ sub(dst_reg, src_reg, -con);
3131     } else {
3132       __ add(dst_reg, src_reg, con);
3133     }
3134   %}
3135 
3136   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3137     MacroAssembler _masm(&cbuf);
3138    Register dst_reg = as_Register($dst$$reg);
3139    Register src1_reg = as_Register($src1$$reg);
3140    Register src2_reg = as_Register($src2$$reg);
3141     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3142   %}
3143 
3144   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3145     MacroAssembler _masm(&cbuf);
3146    Register dst_reg = as_Register($dst$$reg);
3147    Register src1_reg = as_Register($src1$$reg);
3148    Register src2_reg = as_Register($src2$$reg);
3149     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3150   %}
3151 
3152   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3153     MacroAssembler _masm(&cbuf);
3154    Register dst_reg = as_Register($dst$$reg);
3155    Register src1_reg = as_Register($src1$$reg);
3156    Register src2_reg = as_Register($src2$$reg);
3157     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3158   %}
3159 
3160   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3161     MacroAssembler _masm(&cbuf);
3162    Register dst_reg = as_Register($dst$$reg);
3163    Register src1_reg = as_Register($src1$$reg);
3164    Register src2_reg = as_Register($src2$$reg);
3165     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3166   %}
3167 
3168   // compare instruction encodings
3169 
3170   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3171     MacroAssembler _masm(&cbuf);
3172     Register reg1 = as_Register($src1$$reg);
3173     Register reg2 = as_Register($src2$$reg);
3174     __ cmpw(reg1, reg2);
3175   %}
3176 
3177   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3178     MacroAssembler _masm(&cbuf);
3179     Register reg = as_Register($src1$$reg);
3180     int32_t val = $src2$$constant;
3181     if (val >= 0) {
3182       __ subsw(zr, reg, val);
3183     } else {
3184       __ addsw(zr, reg, -val);
3185     }
3186   %}
3187 
3188   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3189     MacroAssembler _masm(&cbuf);
3190     Register reg1 = as_Register($src1$$reg);
3191     u_int32_t val = (u_int32_t)$src2$$constant;
3192     __ movw(rscratch1, val);
3193     __ cmpw(reg1, rscratch1);
3194   %}
3195 
3196   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3197     MacroAssembler _masm(&cbuf);
3198     Register reg1 = as_Register($src1$$reg);
3199     Register reg2 = as_Register($src2$$reg);
3200     __ cmp(reg1, reg2);
3201   %}
3202 
3203   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3204     MacroAssembler _masm(&cbuf);
3205     Register reg = as_Register($src1$$reg);
3206     int64_t val = $src2$$constant;
3207     if (val >= 0) {
3208       __ subs(zr, reg, val);
3209     } else if (val != -val) {
3210       __ adds(zr, reg, -val);
3211     } else {
3212     // aargh, Long.MIN_VALUE is a special case
3213       __ orr(rscratch1, zr, (u_int64_t)val);
3214       __ subs(zr, reg, rscratch1);
3215     }
3216   %}
3217 
3218   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3219     MacroAssembler _masm(&cbuf);
3220     Register reg1 = as_Register($src1$$reg);
3221     u_int64_t val = (u_int64_t)$src2$$constant;
3222     __ mov(rscratch1, val);
3223     __ cmp(reg1, rscratch1);
3224   %}
3225 
3226   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3227     MacroAssembler _masm(&cbuf);
3228     Register reg1 = as_Register($src1$$reg);
3229     Register reg2 = as_Register($src2$$reg);
3230     __ cmp(reg1, reg2);
3231   %}
3232 
3233   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3234     MacroAssembler _masm(&cbuf);
3235     Register reg1 = as_Register($src1$$reg);
3236     Register reg2 = as_Register($src2$$reg);
3237     __ cmpw(reg1, reg2);
3238   %}
3239 
3240   enc_class aarch64_enc_testp(iRegP src) %{
3241     MacroAssembler _masm(&cbuf);
3242     Register reg = as_Register($src$$reg);
3243     __ cmp(reg, zr);
3244   %}
3245 
3246   enc_class aarch64_enc_testn(iRegN src) %{
3247     MacroAssembler _masm(&cbuf);
3248     Register reg = as_Register($src$$reg);
3249     __ cmpw(reg, zr);
3250   %}
3251 
3252   enc_class aarch64_enc_b(label lbl) %{
3253     MacroAssembler _masm(&cbuf);
3254     Label *L = $lbl$$label;
3255     __ b(*L);
3256   %}
3257 
3258   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3259     MacroAssembler _masm(&cbuf);
3260     Label *L = $lbl$$label;
3261     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3262   %}
3263 
3264   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3265     MacroAssembler _masm(&cbuf);
3266     Label *L = $lbl$$label;
3267     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3268   %}
3269 
3270   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3271   %{
3272      Register sub_reg = as_Register($sub$$reg);
3273      Register super_reg = as_Register($super$$reg);
3274      Register temp_reg = as_Register($temp$$reg);
3275      Register result_reg = as_Register($result$$reg);
3276 
3277      Label miss;
3278      MacroAssembler _masm(&cbuf);
3279      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3280                                      NULL, &miss,
3281                                      /*set_cond_codes:*/ true);
3282      if ($primary) {
3283        __ mov(result_reg, zr);
3284      }
3285      __ bind(miss);
3286   %}
3287 
3288   enc_class aarch64_enc_java_static_call(method meth) %{
3289     MacroAssembler _masm(&cbuf);
3290 
3291     address addr = (address)$meth$$method;
3292     address call;
3293     if (!_method) {
3294       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3295       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3296     } else {
3297       int method_index = resolved_method_index(cbuf);
3298       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3299                                                   : static_call_Relocation::spec(method_index);
3300       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3301 
3302       // Emit stub for static call
3303       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3304       if (stub == NULL) {
3305         ciEnv::current()->record_failure("CodeCache is full");
3306         return;
3307       }
3308     }
3309     if (call == NULL) {
3310       ciEnv::current()->record_failure("CodeCache is full");
3311       return;
3312     }
3313   %}
3314 
3315   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3316     MacroAssembler _masm(&cbuf);
3317     int method_index = resolved_method_index(cbuf);
3318     address call = __ ic_call((address)$meth$$method, method_index);
3319     if (call == NULL) {
3320       ciEnv::current()->record_failure("CodeCache is full");
3321       return;
3322     }
3323   %}
3324 
3325   enc_class aarch64_enc_call_epilog() %{
3326     MacroAssembler _masm(&cbuf);
3327     if (VerifyStackAtCalls) {
3328       // Check that stack depth is unchanged: find majik cookie on stack
3329       __ call_Unimplemented();
3330     }
3331   %}
3332 
3333   enc_class aarch64_enc_java_to_runtime(method meth) %{
3334     MacroAssembler _masm(&cbuf);
3335 
3336     // some calls to generated routines (arraycopy code) are scheduled
3337     // by C2 as runtime calls. if so we can call them using a br (they
3338     // will be in a reachable segment) otherwise we have to use a blrt
3339     // which loads the absolute address into a register.
3340     address entry = (address)$meth$$method;
3341     CodeBlob *cb = CodeCache::find_blob(entry);
3342     if (cb) {
3343       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3344       if (call == NULL) {
3345         ciEnv::current()->record_failure("CodeCache is full");
3346         return;
3347       }
3348     } else {
3349       int gpcnt;
3350       int fpcnt;
3351       int rtype;
3352       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3353       Label retaddr;
3354       __ adr(rscratch2, retaddr);
3355       __ lea(rscratch1, RuntimeAddress(entry));
3356       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3357       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3358       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3359       __ bind(retaddr);
3360       __ add(sp, sp, 2 * wordSize);
3361     }
3362   %}
3363 
3364   enc_class aarch64_enc_rethrow() %{
3365     MacroAssembler _masm(&cbuf);
3366     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3367   %}
3368 
3369   enc_class aarch64_enc_ret() %{
3370     MacroAssembler _masm(&cbuf);
3371     __ ret(lr);
3372   %}
3373 
3374   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3375     MacroAssembler _masm(&cbuf);
3376     Register target_reg = as_Register($jump_target$$reg);
3377     __ br(target_reg);
3378   %}
3379 
3380   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3381     MacroAssembler _masm(&cbuf);
3382     Register target_reg = as_Register($jump_target$$reg);
3383     // exception oop should be in r0
3384     // ret addr has been popped into lr
3385     // callee expects it in r3
3386     __ mov(r3, lr);
3387     __ br(target_reg);
3388   %}
3389 
3390   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3391     MacroAssembler _masm(&cbuf);
3392     Register oop = as_Register($object$$reg);
3393     Register box = as_Register($box$$reg);
3394     Register disp_hdr = as_Register($tmp$$reg);
3395     Register tmp = as_Register($tmp2$$reg);
3396     Label cont;
3397     Label object_has_monitor;
3398     Label cas_failed;
3399 
3400     assert_different_registers(oop, box, tmp, disp_hdr);
3401 
3402     // Load markOop from object into displaced_header.
3403     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3404 
3405     if (UseBiasedLocking && !UseOptoBiasInlining) {
3406       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3407     }
3408 
3409     // Handle existing monitor
3410     // we can use AArch64's bit test and branch here but
3411     // markoopDesc does not define a bit index just the bit value
3412     // so assert in case the bit pos changes
3413 #   define __monitor_value_log2 1
3414     assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
3415     __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
3416 #   undef __monitor_value_log2
3417 
3418     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
3419     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
3420 
3421     // Load Compare Value application register.
3422 
3423     // Initialize the box. (Must happen before we update the object mark!)
3424     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3425 
3426     // Compare object markOop with mark and if equal exchange scratch1
3427     // with object markOop.
3428     if (UseLSE) {
3429       __ mov(tmp, disp_hdr);
3430       __ casal(Assembler::xword, tmp, box, oop);
3431       __ cmp(tmp, disp_hdr);
3432       __ br(Assembler::EQ, cont);
3433     } else {
3434       Label retry_load;
3435       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3436         __ prfm(Address(oop), PSTL1STRM);
3437       __ bind(retry_load);
3438       __ ldaxr(tmp, oop);
3439       __ cmp(tmp, disp_hdr);
3440       __ br(Assembler::NE, cas_failed);
3441       // use stlxr to ensure update is immediately visible
3442       __ stlxr(tmp, box, oop);
3443       __ cbzw(tmp, cont);
3444       __ b(retry_load);
3445     }
3446 
3447     // Formerly:
3448     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3449     //               /*newv=*/box,
3450     //               /*addr=*/oop,
3451     //               /*tmp=*/tmp,
3452     //               cont,
3453     //               /*fail*/NULL);
3454 
3455     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3456 
3457     // If the compare-and-exchange succeeded, then we found an unlocked
3458     // object, will have now locked it will continue at label cont
3459 
3460     __ bind(cas_failed);
3461     // We did not see an unlocked object so try the fast recursive case.
3462 
3463     // Check if the owner is self by comparing the value in the
3464     // markOop of object (disp_hdr) with the stack pointer.
3465     __ mov(rscratch1, sp);
3466     __ sub(disp_hdr, disp_hdr, rscratch1);
3467     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3468     // If condition is true we are cont and hence we can store 0 as the
3469     // displaced header in the box, which indicates that it is a recursive lock.
3470     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3471     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3472 
3473     // Handle existing monitor.
3474     __ b(cont);
3475 
3476     __ bind(object_has_monitor);
3477     // The object's monitor m is unlocked iff m->owner == NULL,
3478     // otherwise m->owner may contain a thread or a stack address.
3479     //
3480     // Try to CAS m->owner from NULL to current thread.
3481     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3482     __ mov(disp_hdr, zr);
3483 
3484     if (UseLSE) {
3485       __ mov(rscratch1, disp_hdr);
3486       __ casal(Assembler::xword, rscratch1, rthread, tmp);
3487       __ cmp(rscratch1, disp_hdr);
3488     } else {
3489       Label retry_load, fail;
3490       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) {
3491         __ prfm(Address(tmp), PSTL1STRM);
3492       }
3493       __ bind(retry_load);
3494       __ ldaxr(rscratch1, tmp);
3495       __ cmp(disp_hdr, rscratch1);
3496       __ br(Assembler::NE, fail);
3497       // use stlxr to ensure update is immediately visible
3498       __ stlxr(rscratch1, rthread, tmp);
3499       __ cbnzw(rscratch1, retry_load);
3500       __ bind(fail);
3501     }
3502 
3503     // Label next;
3504     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3505     //               /*newv=*/rthread,
3506     //               /*addr=*/tmp,
3507     //               /*tmp=*/rscratch1,
3508     //               /*succeed*/next,
3509     //               /*fail*/NULL);
3510     // __ bind(next);
3511 
3512     // store a non-null value into the box.
3513     __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3514 
3515     // PPC port checks the following invariants
3516     // #ifdef ASSERT
3517     // bne(flag, cont);
3518     // We have acquired the monitor, check some invariants.
3519     // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
3520     // Invariant 1: _recursions should be 0.
3521     // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
3522     // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
3523     //                        "monitor->_recursions should be 0", -1);
3524     // Invariant 2: OwnerIsThread shouldn't be 0.
3525     // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
3526     //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
3527     //                           "monitor->OwnerIsThread shouldn't be 0", -1);
3528     // #endif
3529 
3530     __ bind(cont);
3531     // flag == EQ indicates success
3532     // flag == NE indicates failure
3533 
3534   %}
3535 
3536   // TODO
3537   // reimplement this with custom cmpxchgptr code
3538   // which avoids some of the unnecessary branching
3539   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3540     MacroAssembler _masm(&cbuf);
3541     Register oop = as_Register($object$$reg);
3542     Register box = as_Register($box$$reg);
3543     Register disp_hdr = as_Register($tmp$$reg);
3544     Register tmp = as_Register($tmp2$$reg);
3545     Label cont;
3546     Label object_has_monitor;
3547     Label cas_failed;
3548 
3549     assert_different_registers(oop, box, tmp, disp_hdr);
3550 
3551     if (UseBiasedLocking && !UseOptoBiasInlining) {
3552       __ biased_locking_exit(oop, tmp, cont);
3553     }
3554 
3555     // Find the lock address and load the displaced header from the stack.
3556     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3557 
3558     // If the displaced header is 0, we have a recursive unlock.
3559     __ cmp(disp_hdr, zr);
3560     __ br(Assembler::EQ, cont);
3561 
3562 
3563     // Handle existing monitor.
3564     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3565     __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3566 
3567     // Check if it is still a light weight lock, this is is true if we
3568     // see the stack address of the basicLock in the markOop of the
3569     // object.
3570 
3571       if (UseLSE) {
3572         __ mov(tmp, box);
3573         __ casl(Assembler::xword, tmp, disp_hdr, oop);
3574         __ cmp(tmp, box);
3575       } else {
3576         Label retry_load;
3577         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3578           __ prfm(Address(oop), PSTL1STRM);
3579         __ bind(retry_load);
3580         __ ldxr(tmp, oop);
3581         __ cmp(box, tmp);
3582         __ br(Assembler::NE, cas_failed);
3583         // use stlxr to ensure update is immediately visible
3584         __ stlxr(tmp, disp_hdr, oop);
3585         __ cbzw(tmp, cont);
3586         __ b(retry_load);
3587       }
3588 
3589     // __ cmpxchgptr(/*compare_value=*/box,
3590     //               /*exchange_value=*/disp_hdr,
3591     //               /*where=*/oop,
3592     //               /*result=*/tmp,
3593     //               cont,
3594     //               /*cas_failed*/NULL);
3595     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3596 
3597     __ bind(cas_failed);
3598 
3599     // Handle existing monitor.
3600     __ b(cont);
3601 
3602     __ bind(object_has_monitor);
3603     __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3604     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3605     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3606     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3607     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3608     __ cmp(rscratch1, zr);
3609     __ br(Assembler::NE, cont);
3610 
3611     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3612     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3613     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3614     __ cmp(rscratch1, zr);
3615     __ cbnz(rscratch1, cont);
3616     // need a release store here
3617     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3618     __ stlr(rscratch1, tmp); // rscratch1 is zero
3619 
3620     __ bind(cont);
3621     // flag == EQ indicates success
3622     // flag == NE indicates failure
3623   %}
3624 
3625 %}
3626 
3627 //----------FRAME--------------------------------------------------------------
3628 // Definition of frame structure and management information.
3629 //
3630 //  S T A C K   L A Y O U T    Allocators stack-slot number
3631 //                             |   (to get allocators register number
3632 //  G  Owned by    |        |  v    add OptoReg::stack0())
3633 //  r   CALLER     |        |
3634 //  o     |        +--------+      pad to even-align allocators stack-slot
3635 //  w     V        |  pad0  |        numbers; owned by CALLER
3636 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3637 //  h     ^        |   in   |  5
3638 //        |        |  args  |  4   Holes in incoming args owned by SELF
3639 //  |     |        |        |  3
3640 //  |     |        +--------+
3641 //  V     |        | old out|      Empty on Intel, window on Sparc
3642 //        |    old |preserve|      Must be even aligned.
3643 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3644 //        |        |   in   |  3   area for Intel ret address
3645 //     Owned by    |preserve|      Empty on Sparc.
3646 //       SELF      +--------+
3647 //        |        |  pad2  |  2   pad to align old SP
3648 //        |        +--------+  1
3649 //        |        | locks  |  0
3650 //        |        +--------+----> OptoReg::stack0(), even aligned
3651 //        |        |  pad1  | 11   pad to align new SP
3652 //        |        +--------+
3653 //        |        |        | 10
3654 //        |        | spills |  9   spills
3655 //        V        |        |  8   (pad0 slot for callee)
3656 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3657 //        ^        |  out   |  7
3658 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3659 //     Owned by    +--------+
3660 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3661 //        |    new |preserve|      Must be even-aligned.
3662 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3663 //        |        |        |
3664 //
3665 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3666 //         known from SELF's arguments and the Java calling convention.
3667 //         Region 6-7 is determined per call site.
3668 // Note 2: If the calling convention leaves holes in the incoming argument
3669 //         area, those holes are owned by SELF.  Holes in the outgoing area
3670 //         are owned by the CALLEE.  Holes should not be nessecary in the
3671 //         incoming area, as the Java calling convention is completely under
3672 //         the control of the AD file.  Doubles can be sorted and packed to
3673 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3674 //         varargs C calling conventions.
3675 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3676 //         even aligned with pad0 as needed.
3677 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3678 //           (the latter is true on Intel but is it false on AArch64?)
3679 //         region 6-11 is even aligned; it may be padded out more so that
3680 //         the region from SP to FP meets the minimum stack alignment.
3681 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3682 //         alignment.  Region 11, pad1, may be dynamically extended so that
3683 //         SP meets the minimum alignment.
3684 
3685 frame %{
3686   // What direction does stack grow in (assumed to be same for C & Java)
3687   stack_direction(TOWARDS_LOW);
3688 
3689   // These three registers define part of the calling convention
3690   // between compiled code and the interpreter.
3691 
3692   // Inline Cache Register or methodOop for I2C.
3693   inline_cache_reg(R12);
3694 
3695   // Method Oop Register when calling interpreter.
3696   interpreter_method_oop_reg(R12);
3697 
3698   // Number of stack slots consumed by locking an object
3699   sync_stack_slots(2);
3700 
3701   // Compiled code's Frame Pointer
3702   frame_pointer(R31);
3703 
3704   // Interpreter stores its frame pointer in a register which is
3705   // stored to the stack by I2CAdaptors.
3706   // I2CAdaptors convert from interpreted java to compiled java.
3707   interpreter_frame_pointer(R29);
3708 
3709   // Stack alignment requirement
3710   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3711 
3712   // Number of stack slots between incoming argument block and the start of
3713   // a new frame.  The PROLOG must add this many slots to the stack.  The
3714   // EPILOG must remove this many slots. aarch64 needs two slots for
3715   // return address and fp.
3716   // TODO think this is correct but check
3717   in_preserve_stack_slots(4);
3718 
3719   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3720   // for calls to C.  Supports the var-args backing area for register parms.
3721   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3722 
3723   // The after-PROLOG location of the return address.  Location of
3724   // return address specifies a type (REG or STACK) and a number
3725   // representing the register number (i.e. - use a register name) or
3726   // stack slot.
3727   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3728   // Otherwise, it is above the locks and verification slot and alignment word
3729   // TODO this may well be correct but need to check why that - 2 is there
3730   // ppc port uses 0 but we definitely need to allow for fixed_slots
3731   // which folds in the space used for monitors
3732   return_addr(STACK - 2 +
3733               align_up((Compile::current()->in_preserve_stack_slots() +
3734                         Compile::current()->fixed_slots()),
3735                        stack_alignment_in_slots()));
3736 
3737   // Body of function which returns an integer array locating
3738   // arguments either in registers or in stack slots.  Passed an array
3739   // of ideal registers called "sig" and a "length" count.  Stack-slot
3740   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3741   // arguments for a CALLEE.  Incoming stack arguments are
3742   // automatically biased by the preserve_stack_slots field above.
3743 
3744   calling_convention
3745   %{
3746     // No difference between ingoing/outgoing just pass false
3747     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3748   %}
3749 
3750   c_calling_convention
3751   %{
3752     // This is obviously always outgoing
3753     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3754   %}
3755 
3756   // Location of compiled Java return values.  Same as C for now.
3757   return_value
3758   %{
3759     // TODO do we allow ideal_reg == Op_RegN???
3760     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3761            "only return normal values");
3762 
3763     static const int lo[Op_RegL + 1] = { // enum name
3764       0,                                 // Op_Node
3765       0,                                 // Op_Set
3766       R0_num,                            // Op_RegN
3767       R0_num,                            // Op_RegI
3768       R0_num,                            // Op_RegP
3769       V0_num,                            // Op_RegF
3770       V0_num,                            // Op_RegD
3771       R0_num                             // Op_RegL
3772     };
3773 
3774     static const int hi[Op_RegL + 1] = { // enum name
3775       0,                                 // Op_Node
3776       0,                                 // Op_Set
3777       OptoReg::Bad,                       // Op_RegN
3778       OptoReg::Bad,                      // Op_RegI
3779       R0_H_num,                          // Op_RegP
3780       OptoReg::Bad,                      // Op_RegF
3781       V0_H_num,                          // Op_RegD
3782       R0_H_num                           // Op_RegL
3783     };
3784 
3785     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3786   %}
3787 %}
3788 
3789 //----------ATTRIBUTES---------------------------------------------------------
3790 //----------Operand Attributes-------------------------------------------------
3791 op_attrib op_cost(1);        // Required cost attribute
3792 
3793 //----------Instruction Attributes---------------------------------------------
3794 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3795 ins_attrib ins_size(32);        // Required size attribute (in bits)
3796 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3797                                 // a non-matching short branch variant
3798                                 // of some long branch?
3799 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3800                                 // be a power of 2) specifies the
3801                                 // alignment that some part of the
3802                                 // instruction (not necessarily the
3803                                 // start) requires.  If > 1, a
3804                                 // compute_padding() function must be
3805                                 // provided for the instruction
3806 
3807 //----------OPERANDS-----------------------------------------------------------
3808 // Operand definitions must precede instruction definitions for correct parsing
3809 // in the ADLC because operands constitute user defined types which are used in
3810 // instruction definitions.
3811 
3812 //----------Simple Operands----------------------------------------------------
3813 
3814 // Integer operands 32 bit
3815 // 32 bit immediate
3816 operand immI()
3817 %{
3818   match(ConI);
3819 
3820   op_cost(0);
3821   format %{ %}
3822   interface(CONST_INTER);
3823 %}
3824 
3825 // 32 bit zero
3826 operand immI0()
3827 %{
3828   predicate(n->get_int() == 0);
3829   match(ConI);
3830 
3831   op_cost(0);
3832   format %{ %}
3833   interface(CONST_INTER);
3834 %}
3835 
3836 // 32 bit unit increment
3837 operand immI_1()
3838 %{
3839   predicate(n->get_int() == 1);
3840   match(ConI);
3841 
3842   op_cost(0);
3843   format %{ %}
3844   interface(CONST_INTER);
3845 %}
3846 
3847 // 32 bit unit decrement
3848 operand immI_M1()
3849 %{
3850   predicate(n->get_int() == -1);
3851   match(ConI);
3852 
3853   op_cost(0);
3854   format %{ %}
3855   interface(CONST_INTER);
3856 %}
3857 
3858 // Shift values for add/sub extension shift
3859 operand immIExt()
3860 %{
3861   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3862   match(ConI);
3863 
3864   op_cost(0);
3865   format %{ %}
3866   interface(CONST_INTER);
3867 %}
3868 
3869 operand immI_le_4()
3870 %{
3871   predicate(n->get_int() <= 4);
3872   match(ConI);
3873 
3874   op_cost(0);
3875   format %{ %}
3876   interface(CONST_INTER);
3877 %}
3878 
3879 operand immI_31()
3880 %{
3881   predicate(n->get_int() == 31);
3882   match(ConI);
3883 
3884   op_cost(0);
3885   format %{ %}
3886   interface(CONST_INTER);
3887 %}
3888 
3889 operand immI_8()
3890 %{
3891   predicate(n->get_int() == 8);
3892   match(ConI);
3893 
3894   op_cost(0);
3895   format %{ %}
3896   interface(CONST_INTER);
3897 %}
3898 
3899 operand immI_16()
3900 %{
3901   predicate(n->get_int() == 16);
3902   match(ConI);
3903 
3904   op_cost(0);
3905   format %{ %}
3906   interface(CONST_INTER);
3907 %}
3908 
3909 operand immI_24()
3910 %{
3911   predicate(n->get_int() == 24);
3912   match(ConI);
3913 
3914   op_cost(0);
3915   format %{ %}
3916   interface(CONST_INTER);
3917 %}
3918 
3919 operand immI_32()
3920 %{
3921   predicate(n->get_int() == 32);
3922   match(ConI);
3923 
3924   op_cost(0);
3925   format %{ %}
3926   interface(CONST_INTER);
3927 %}
3928 
3929 operand immI_48()
3930 %{
3931   predicate(n->get_int() == 48);
3932   match(ConI);
3933 
3934   op_cost(0);
3935   format %{ %}
3936   interface(CONST_INTER);
3937 %}
3938 
3939 operand immI_56()
3940 %{
3941   predicate(n->get_int() == 56);
3942   match(ConI);
3943 
3944   op_cost(0);
3945   format %{ %}
3946   interface(CONST_INTER);
3947 %}
3948 
3949 operand immI_63()
3950 %{
3951   predicate(n->get_int() == 63);
3952   match(ConI);
3953 
3954   op_cost(0);
3955   format %{ %}
3956   interface(CONST_INTER);
3957 %}
3958 
3959 operand immI_64()
3960 %{
3961   predicate(n->get_int() == 64);
3962   match(ConI);
3963 
3964   op_cost(0);
3965   format %{ %}
3966   interface(CONST_INTER);
3967 %}
3968 
3969 operand immI_255()
3970 %{
3971   predicate(n->get_int() == 255);
3972   match(ConI);
3973 
3974   op_cost(0);
3975   format %{ %}
3976   interface(CONST_INTER);
3977 %}
3978 
3979 operand immI_65535()
3980 %{
3981   predicate(n->get_int() == 65535);
3982   match(ConI);
3983 
3984   op_cost(0);
3985   format %{ %}
3986   interface(CONST_INTER);
3987 %}
3988 
3989 operand immL_255()
3990 %{
3991   predicate(n->get_long() == 255L);
3992   match(ConL);
3993 
3994   op_cost(0);
3995   format %{ %}
3996   interface(CONST_INTER);
3997 %}
3998 
3999 operand immL_65535()
4000 %{
4001   predicate(n->get_long() == 65535L);
4002   match(ConL);
4003 
4004   op_cost(0);
4005   format %{ %}
4006   interface(CONST_INTER);
4007 %}
4008 
4009 operand immL_4294967295()
4010 %{
4011   predicate(n->get_long() == 4294967295L);
4012   match(ConL);
4013 
4014   op_cost(0);
4015   format %{ %}
4016   interface(CONST_INTER);
4017 %}
4018 
4019 operand immL_bitmask()
4020 %{
4021   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4022             && is_power_of_2(n->get_long() + 1));
4023   match(ConL);
4024 
4025   op_cost(0);
4026   format %{ %}
4027   interface(CONST_INTER);
4028 %}
4029 
4030 operand immI_bitmask()
4031 %{
4032   predicate(((n->get_int() & 0xc0000000) == 0)
4033             && is_power_of_2(n->get_int() + 1));
4034   match(ConI);
4035 
4036   op_cost(0);
4037   format %{ %}
4038   interface(CONST_INTER);
4039 %}
4040 
4041 // Scale values for scaled offset addressing modes (up to long but not quad)
4042 operand immIScale()
4043 %{
4044   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4045   match(ConI);
4046 
4047   op_cost(0);
4048   format %{ %}
4049   interface(CONST_INTER);
4050 %}
4051 
4052 // 26 bit signed offset -- for pc-relative branches
4053 operand immI26()
4054 %{
4055   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4056   match(ConI);
4057 
4058   op_cost(0);
4059   format %{ %}
4060   interface(CONST_INTER);
4061 %}
4062 
4063 // 19 bit signed offset -- for pc-relative loads
4064 operand immI19()
4065 %{
4066   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4067   match(ConI);
4068 
4069   op_cost(0);
4070   format %{ %}
4071   interface(CONST_INTER);
4072 %}
4073 
4074 // 12 bit unsigned offset -- for base plus immediate loads
4075 operand immIU12()
4076 %{
4077   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4078   match(ConI);
4079 
4080   op_cost(0);
4081   format %{ %}
4082   interface(CONST_INTER);
4083 %}
4084 
4085 operand immLU12()
4086 %{
4087   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4088   match(ConL);
4089 
4090   op_cost(0);
4091   format %{ %}
4092   interface(CONST_INTER);
4093 %}
4094 
4095 // Offset for scaled or unscaled immediate loads and stores
4096 operand immIOffset()
4097 %{
4098   predicate(Address::offset_ok_for_immed(n->get_int()));
4099   match(ConI);
4100 
4101   op_cost(0);
4102   format %{ %}
4103   interface(CONST_INTER);
4104 %}
4105 
4106 operand immIOffset4()
4107 %{
4108   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4109   match(ConI);
4110 
4111   op_cost(0);
4112   format %{ %}
4113   interface(CONST_INTER);
4114 %}
4115 
4116 operand immIOffset8()
4117 %{
4118   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4119   match(ConI);
4120 
4121   op_cost(0);
4122   format %{ %}
4123   interface(CONST_INTER);
4124 %}
4125 
4126 operand immIOffset16()
4127 %{
4128   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4129   match(ConI);
4130 
4131   op_cost(0);
4132   format %{ %}
4133   interface(CONST_INTER);
4134 %}
4135 
4136 operand immLoffset()
4137 %{
4138   predicate(Address::offset_ok_for_immed(n->get_long()));
4139   match(ConL);
4140 
4141   op_cost(0);
4142   format %{ %}
4143   interface(CONST_INTER);
4144 %}
4145 
4146 operand immLoffset4()
4147 %{
4148   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4149   match(ConL);
4150 
4151   op_cost(0);
4152   format %{ %}
4153   interface(CONST_INTER);
4154 %}
4155 
4156 operand immLoffset8()
4157 %{
4158   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4159   match(ConL);
4160 
4161   op_cost(0);
4162   format %{ %}
4163   interface(CONST_INTER);
4164 %}
4165 
4166 operand immLoffset16()
4167 %{
4168   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4169   match(ConL);
4170 
4171   op_cost(0);
4172   format %{ %}
4173   interface(CONST_INTER);
4174 %}
4175 
4176 // 32 bit integer valid for add sub immediate
4177 operand immIAddSub()
4178 %{
4179   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4180   match(ConI);
4181   op_cost(0);
4182   format %{ %}
4183   interface(CONST_INTER);
4184 %}
4185 
4186 // 32 bit unsigned integer valid for logical immediate
4187 // TODO -- check this is right when e.g the mask is 0x80000000
4188 operand immILog()
4189 %{
4190   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4191   match(ConI);
4192 
4193   op_cost(0);
4194   format %{ %}
4195   interface(CONST_INTER);
4196 %}
4197 
4198 // Integer operands 64 bit
4199 // 64 bit immediate
4200 operand immL()
4201 %{
4202   match(ConL);
4203 
4204   op_cost(0);
4205   format %{ %}
4206   interface(CONST_INTER);
4207 %}
4208 
4209 // 64 bit zero
4210 operand immL0()
4211 %{
4212   predicate(n->get_long() == 0);
4213   match(ConL);
4214 
4215   op_cost(0);
4216   format %{ %}
4217   interface(CONST_INTER);
4218 %}
4219 
4220 // 64 bit unit increment
4221 operand immL_1()
4222 %{
4223   predicate(n->get_long() == 1);
4224   match(ConL);
4225 
4226   op_cost(0);
4227   format %{ %}
4228   interface(CONST_INTER);
4229 %}
4230 
4231 // 64 bit unit decrement
4232 operand immL_M1()
4233 %{
4234   predicate(n->get_long() == -1);
4235   match(ConL);
4236 
4237   op_cost(0);
4238   format %{ %}
4239   interface(CONST_INTER);
4240 %}
4241 
4242 // 32 bit offset of pc in thread anchor
4243 
4244 operand immL_pc_off()
4245 %{
4246   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4247                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4248   match(ConL);
4249 
4250   op_cost(0);
4251   format %{ %}
4252   interface(CONST_INTER);
4253 %}
4254 
4255 // 64 bit integer valid for add sub immediate
4256 operand immLAddSub()
4257 %{
4258   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4259   match(ConL);
4260   op_cost(0);
4261   format %{ %}
4262   interface(CONST_INTER);
4263 %}
4264 
4265 // 64 bit integer valid for logical immediate
4266 operand immLLog()
4267 %{
4268   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4269   match(ConL);
4270   op_cost(0);
4271   format %{ %}
4272   interface(CONST_INTER);
4273 %}
4274 
4275 // Long Immediate: low 32-bit mask
4276 operand immL_32bits()
4277 %{
4278   predicate(n->get_long() == 0xFFFFFFFFL);
4279   match(ConL);
4280   op_cost(0);
4281   format %{ %}
4282   interface(CONST_INTER);
4283 %}
4284 
4285 // Pointer operands
4286 // Pointer Immediate
4287 operand immP()
4288 %{
4289   match(ConP);
4290 
4291   op_cost(0);
4292   format %{ %}
4293   interface(CONST_INTER);
4294 %}
4295 
4296 // NULL Pointer Immediate
4297 operand immP0()
4298 %{
4299   predicate(n->get_ptr() == 0);
4300   match(ConP);
4301 
4302   op_cost(0);
4303   format %{ %}
4304   interface(CONST_INTER);
4305 %}
4306 
4307 // Pointer Immediate One
4308 // this is used in object initialization (initial object header)
4309 operand immP_1()
4310 %{
4311   predicate(n->get_ptr() == 1);
4312   match(ConP);
4313 
4314   op_cost(0);
4315   format %{ %}
4316   interface(CONST_INTER);
4317 %}
4318 
4319 // Polling Page Pointer Immediate
4320 operand immPollPage()
4321 %{
4322   predicate((address)n->get_ptr() == os::get_polling_page());
4323   match(ConP);
4324 
4325   op_cost(0);
4326   format %{ %}
4327   interface(CONST_INTER);
4328 %}
4329 
4330 // Card Table Byte Map Base
4331 operand immByteMapBase()
4332 %{
4333   // Get base of card map
4334   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4335             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4336   match(ConP);
4337 
4338   op_cost(0);
4339   format %{ %}
4340   interface(CONST_INTER);
4341 %}
4342 
4343 // Pointer Immediate Minus One
4344 // this is used when we want to write the current PC to the thread anchor
4345 operand immP_M1()
4346 %{
4347   predicate(n->get_ptr() == -1);
4348   match(ConP);
4349 
4350   op_cost(0);
4351   format %{ %}
4352   interface(CONST_INTER);
4353 %}
4354 
4355 // Pointer Immediate Minus Two
4356 // this is used when we want to write the current PC to the thread anchor
4357 operand immP_M2()
4358 %{
4359   predicate(n->get_ptr() == -2);
4360   match(ConP);
4361 
4362   op_cost(0);
4363   format %{ %}
4364   interface(CONST_INTER);
4365 %}
4366 
4367 // Float and Double operands
4368 // Double Immediate
4369 operand immD()
4370 %{
4371   match(ConD);
4372   op_cost(0);
4373   format %{ %}
4374   interface(CONST_INTER);
4375 %}
4376 
4377 // Double Immediate: +0.0d
4378 operand immD0()
4379 %{
4380   predicate(jlong_cast(n->getd()) == 0);
4381   match(ConD);
4382 
4383   op_cost(0);
4384   format %{ %}
4385   interface(CONST_INTER);
4386 %}
4387 
4388 // constant 'double +0.0'.
4389 operand immDPacked()
4390 %{
4391   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4392   match(ConD);
4393   op_cost(0);
4394   format %{ %}
4395   interface(CONST_INTER);
4396 %}
4397 
4398 // Float Immediate
4399 operand immF()
4400 %{
4401   match(ConF);
4402   op_cost(0);
4403   format %{ %}
4404   interface(CONST_INTER);
4405 %}
4406 
4407 // Float Immediate: +0.0f.
4408 operand immF0()
4409 %{
4410   predicate(jint_cast(n->getf()) == 0);
4411   match(ConF);
4412 
4413   op_cost(0);
4414   format %{ %}
4415   interface(CONST_INTER);
4416 %}
4417 
4418 //
4419 operand immFPacked()
4420 %{
4421   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4422   match(ConF);
4423   op_cost(0);
4424   format %{ %}
4425   interface(CONST_INTER);
4426 %}
4427 
4428 // Narrow pointer operands
4429 // Narrow Pointer Immediate
4430 operand immN()
4431 %{
4432   match(ConN);
4433 
4434   op_cost(0);
4435   format %{ %}
4436   interface(CONST_INTER);
4437 %}
4438 
4439 // Narrow NULL Pointer Immediate
4440 operand immN0()
4441 %{
4442   predicate(n->get_narrowcon() == 0);
4443   match(ConN);
4444 
4445   op_cost(0);
4446   format %{ %}
4447   interface(CONST_INTER);
4448 %}
4449 
4450 operand immNKlass()
4451 %{
4452   match(ConNKlass);
4453 
4454   op_cost(0);
4455   format %{ %}
4456   interface(CONST_INTER);
4457 %}
4458 
4459 // Integer 32 bit Register Operands
4460 // Integer 32 bitRegister (excludes SP)
4461 operand iRegI()
4462 %{
4463   constraint(ALLOC_IN_RC(any_reg32));
4464   match(RegI);
4465   match(iRegINoSp);
4466   op_cost(0);
4467   format %{ %}
4468   interface(REG_INTER);
4469 %}
4470 
4471 // Integer 32 bit Register not Special
4472 operand iRegINoSp()
4473 %{
4474   constraint(ALLOC_IN_RC(no_special_reg32));
4475   match(RegI);
4476   op_cost(0);
4477   format %{ %}
4478   interface(REG_INTER);
4479 %}
4480 
4481 // Integer 64 bit Register Operands
4482 // Integer 64 bit Register (includes SP)
4483 operand iRegL()
4484 %{
4485   constraint(ALLOC_IN_RC(any_reg));
4486   match(RegL);
4487   match(iRegLNoSp);
4488   op_cost(0);
4489   format %{ %}
4490   interface(REG_INTER);
4491 %}
4492 
4493 // Integer 64 bit Register not Special
4494 operand iRegLNoSp()
4495 %{
4496   constraint(ALLOC_IN_RC(no_special_reg));
4497   match(RegL);
4498   match(iRegL_R0);
4499   format %{ %}
4500   interface(REG_INTER);
4501 %}
4502 
4503 // Pointer Register Operands
4504 // Pointer Register
4505 operand iRegP()
4506 %{
4507   constraint(ALLOC_IN_RC(ptr_reg));
4508   match(RegP);
4509   match(iRegPNoSp);
4510   match(iRegP_R0);
4511   //match(iRegP_R2);
4512   //match(iRegP_R4);
4513   //match(iRegP_R5);
4514   match(thread_RegP);
4515   op_cost(0);
4516   format %{ %}
4517   interface(REG_INTER);
4518 %}
4519 
4520 // Pointer 64 bit Register not Special
4521 operand iRegPNoSp()
4522 %{
4523   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4524   match(RegP);
4525   // match(iRegP);
4526   // match(iRegP_R0);
4527   // match(iRegP_R2);
4528   // match(iRegP_R4);
4529   // match(iRegP_R5);
4530   // match(thread_RegP);
4531   op_cost(0);
4532   format %{ %}
4533   interface(REG_INTER);
4534 %}
4535 
4536 // Pointer 64 bit Register R0 only
4537 operand iRegP_R0()
4538 %{
4539   constraint(ALLOC_IN_RC(r0_reg));
4540   match(RegP);
4541   // match(iRegP);
4542   match(iRegPNoSp);
4543   op_cost(0);
4544   format %{ %}
4545   interface(REG_INTER);
4546 %}
4547 
4548 // Pointer 64 bit Register R1 only
4549 operand iRegP_R1()
4550 %{
4551   constraint(ALLOC_IN_RC(r1_reg));
4552   match(RegP);
4553   // match(iRegP);
4554   match(iRegPNoSp);
4555   op_cost(0);
4556   format %{ %}
4557   interface(REG_INTER);
4558 %}
4559 
4560 // Pointer 64 bit Register R2 only
4561 operand iRegP_R2()
4562 %{
4563   constraint(ALLOC_IN_RC(r2_reg));
4564   match(RegP);
4565   // match(iRegP);
4566   match(iRegPNoSp);
4567   op_cost(0);
4568   format %{ %}
4569   interface(REG_INTER);
4570 %}
4571 
4572 // Pointer 64 bit Register R3 only
4573 operand iRegP_R3()
4574 %{
4575   constraint(ALLOC_IN_RC(r3_reg));
4576   match(RegP);
4577   // match(iRegP);
4578   match(iRegPNoSp);
4579   op_cost(0);
4580   format %{ %}
4581   interface(REG_INTER);
4582 %}
4583 
4584 // Pointer 64 bit Register R4 only
4585 operand iRegP_R4()
4586 %{
4587   constraint(ALLOC_IN_RC(r4_reg));
4588   match(RegP);
4589   // match(iRegP);
4590   match(iRegPNoSp);
4591   op_cost(0);
4592   format %{ %}
4593   interface(REG_INTER);
4594 %}
4595 
4596 // Pointer 64 bit Register R5 only
4597 operand iRegP_R5()
4598 %{
4599   constraint(ALLOC_IN_RC(r5_reg));
4600   match(RegP);
4601   // match(iRegP);
4602   match(iRegPNoSp);
4603   op_cost(0);
4604   format %{ %}
4605   interface(REG_INTER);
4606 %}
4607 
4608 // Pointer 64 bit Register R10 only
4609 operand iRegP_R10()
4610 %{
4611   constraint(ALLOC_IN_RC(r10_reg));
4612   match(RegP);
4613   // match(iRegP);
4614   match(iRegPNoSp);
4615   op_cost(0);
4616   format %{ %}
4617   interface(REG_INTER);
4618 %}
4619 
4620 // Long 64 bit Register R0 only
4621 operand iRegL_R0()
4622 %{
4623   constraint(ALLOC_IN_RC(r0_reg));
4624   match(RegL);
4625   match(iRegLNoSp);
4626   op_cost(0);
4627   format %{ %}
4628   interface(REG_INTER);
4629 %}
4630 
4631 // Long 64 bit Register R2 only
4632 operand iRegL_R2()
4633 %{
4634   constraint(ALLOC_IN_RC(r2_reg));
4635   match(RegL);
4636   match(iRegLNoSp);
4637   op_cost(0);
4638   format %{ %}
4639   interface(REG_INTER);
4640 %}
4641 
4642 // Long 64 bit Register R3 only
4643 operand iRegL_R3()
4644 %{
4645   constraint(ALLOC_IN_RC(r3_reg));
4646   match(RegL);
4647   match(iRegLNoSp);
4648   op_cost(0);
4649   format %{ %}
4650   interface(REG_INTER);
4651 %}
4652 
4653 // Long 64 bit Register R11 only
4654 operand iRegL_R11()
4655 %{
4656   constraint(ALLOC_IN_RC(r11_reg));
4657   match(RegL);
4658   match(iRegLNoSp);
4659   op_cost(0);
4660   format %{ %}
4661   interface(REG_INTER);
4662 %}
4663 
4664 // Pointer 64 bit Register FP only
4665 operand iRegP_FP()
4666 %{
4667   constraint(ALLOC_IN_RC(fp_reg));
4668   match(RegP);
4669   // match(iRegP);
4670   op_cost(0);
4671   format %{ %}
4672   interface(REG_INTER);
4673 %}
4674 
4675 // Register R0 only
4676 operand iRegI_R0()
4677 %{
4678   constraint(ALLOC_IN_RC(int_r0_reg));
4679   match(RegI);
4680   match(iRegINoSp);
4681   op_cost(0);
4682   format %{ %}
4683   interface(REG_INTER);
4684 %}
4685 
4686 // Register R2 only
4687 operand iRegI_R2()
4688 %{
4689   constraint(ALLOC_IN_RC(int_r2_reg));
4690   match(RegI);
4691   match(iRegINoSp);
4692   op_cost(0);
4693   format %{ %}
4694   interface(REG_INTER);
4695 %}
4696 
4697 // Register R3 only
4698 operand iRegI_R3()
4699 %{
4700   constraint(ALLOC_IN_RC(int_r3_reg));
4701   match(RegI);
4702   match(iRegINoSp);
4703   op_cost(0);
4704   format %{ %}
4705   interface(REG_INTER);
4706 %}
4707 
4708 
4709 // Register R4 only
4710 operand iRegI_R4()
4711 %{
4712   constraint(ALLOC_IN_RC(int_r4_reg));
4713   match(RegI);
4714   match(iRegINoSp);
4715   op_cost(0);
4716   format %{ %}
4717   interface(REG_INTER);
4718 %}
4719 
4720 
4721 // Pointer Register Operands
4722 // Narrow Pointer Register
4723 operand iRegN()
4724 %{
4725   constraint(ALLOC_IN_RC(any_reg32));
4726   match(RegN);
4727   match(iRegNNoSp);
4728   op_cost(0);
4729   format %{ %}
4730   interface(REG_INTER);
4731 %}
4732 
4733 operand iRegN_R0()
4734 %{
4735   constraint(ALLOC_IN_RC(r0_reg));
4736   match(iRegN);
4737   op_cost(0);
4738   format %{ %}
4739   interface(REG_INTER);
4740 %}
4741 
4742 operand iRegN_R2()
4743 %{
4744   constraint(ALLOC_IN_RC(r2_reg));
4745   match(iRegN);
4746   op_cost(0);
4747   format %{ %}
4748   interface(REG_INTER);
4749 %}
4750 
4751 operand iRegN_R3()
4752 %{
4753   constraint(ALLOC_IN_RC(r3_reg));
4754   match(iRegN);
4755   op_cost(0);
4756   format %{ %}
4757   interface(REG_INTER);
4758 %}
4759 
4760 // Integer 64 bit Register not Special
4761 operand iRegNNoSp()
4762 %{
4763   constraint(ALLOC_IN_RC(no_special_reg32));
4764   match(RegN);
4765   op_cost(0);
4766   format %{ %}
4767   interface(REG_INTER);
4768 %}
4769 
4770 // heap base register -- used for encoding immN0
4771 
4772 operand iRegIHeapbase()
4773 %{
4774   constraint(ALLOC_IN_RC(heapbase_reg));
4775   match(RegI);
4776   op_cost(0);
4777   format %{ %}
4778   interface(REG_INTER);
4779 %}
4780 
4781 // Float Register
4782 // Float register operands
4783 operand vRegF()
4784 %{
4785   constraint(ALLOC_IN_RC(float_reg));
4786   match(RegF);
4787 
4788   op_cost(0);
4789   format %{ %}
4790   interface(REG_INTER);
4791 %}
4792 
4793 // Double Register
4794 // Double register operands
4795 operand vRegD()
4796 %{
4797   constraint(ALLOC_IN_RC(double_reg));
4798   match(RegD);
4799 
4800   op_cost(0);
4801   format %{ %}
4802   interface(REG_INTER);
4803 %}
4804 
4805 operand vecD()
4806 %{
4807   constraint(ALLOC_IN_RC(vectord_reg));
4808   match(VecD);
4809 
4810   op_cost(0);
4811   format %{ %}
4812   interface(REG_INTER);
4813 %}
4814 
4815 operand vecX()
4816 %{
4817   constraint(ALLOC_IN_RC(vectorx_reg));
4818   match(VecX);
4819 
4820   op_cost(0);
4821   format %{ %}
4822   interface(REG_INTER);
4823 %}
4824 
4825 operand vRegD_V0()
4826 %{
4827   constraint(ALLOC_IN_RC(v0_reg));
4828   match(RegD);
4829   op_cost(0);
4830   format %{ %}
4831   interface(REG_INTER);
4832 %}
4833 
4834 operand vRegD_V1()
4835 %{
4836   constraint(ALLOC_IN_RC(v1_reg));
4837   match(RegD);
4838   op_cost(0);
4839   format %{ %}
4840   interface(REG_INTER);
4841 %}
4842 
4843 operand vRegD_V2()
4844 %{
4845   constraint(ALLOC_IN_RC(v2_reg));
4846   match(RegD);
4847   op_cost(0);
4848   format %{ %}
4849   interface(REG_INTER);
4850 %}
4851 
4852 operand vRegD_V3()
4853 %{
4854   constraint(ALLOC_IN_RC(v3_reg));
4855   match(RegD);
4856   op_cost(0);
4857   format %{ %}
4858   interface(REG_INTER);
4859 %}
4860 
4861 // Flags register, used as output of signed compare instructions
4862 
4863 // note that on AArch64 we also use this register as the output for
4864 // for floating point compare instructions (CmpF CmpD). this ensures
4865 // that ordered inequality tests use GT, GE, LT or LE none of which
4866 // pass through cases where the result is unordered i.e. one or both
4867 // inputs to the compare is a NaN. this means that the ideal code can
4868 // replace e.g. a GT with an LE and not end up capturing the NaN case
4869 // (where the comparison should always fail). EQ and NE tests are
4870 // always generated in ideal code so that unordered folds into the NE
4871 // case, matching the behaviour of AArch64 NE.
4872 //
4873 // This differs from x86 where the outputs of FP compares use a
4874 // special FP flags registers and where compares based on this
4875 // register are distinguished into ordered inequalities (cmpOpUCF) and
4876 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4877 // to explicitly handle the unordered case in branches. x86 also has
4878 // to include extra CMoveX rules to accept a cmpOpUCF input.
4879 
4880 operand rFlagsReg()
4881 %{
4882   constraint(ALLOC_IN_RC(int_flags));
4883   match(RegFlags);
4884 
4885   op_cost(0);
4886   format %{ "RFLAGS" %}
4887   interface(REG_INTER);
4888 %}
4889 
4890 // Flags register, used as output of unsigned compare instructions
4891 operand rFlagsRegU()
4892 %{
4893   constraint(ALLOC_IN_RC(int_flags));
4894   match(RegFlags);
4895 
4896   op_cost(0);
4897   format %{ "RFLAGSU" %}
4898   interface(REG_INTER);
4899 %}
4900 
4901 // Special Registers
4902 
4903 // Method Register
4904 operand inline_cache_RegP(iRegP reg)
4905 %{
4906   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4907   match(reg);
4908   match(iRegPNoSp);
4909   op_cost(0);
4910   format %{ %}
4911   interface(REG_INTER);
4912 %}
4913 
4914 operand interpreter_method_oop_RegP(iRegP reg)
4915 %{
4916   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4917   match(reg);
4918   match(iRegPNoSp);
4919   op_cost(0);
4920   format %{ %}
4921   interface(REG_INTER);
4922 %}
4923 
4924 // Thread Register
4925 operand thread_RegP(iRegP reg)
4926 %{
4927   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
4928   match(reg);
4929   op_cost(0);
4930   format %{ %}
4931   interface(REG_INTER);
4932 %}
4933 
4934 operand lr_RegP(iRegP reg)
4935 %{
4936   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
4937   match(reg);
4938   op_cost(0);
4939   format %{ %}
4940   interface(REG_INTER);
4941 %}
4942 
4943 //----------Memory Operands----------------------------------------------------
4944 
4945 operand indirect(iRegP reg)
4946 %{
4947   constraint(ALLOC_IN_RC(ptr_reg));
4948   match(reg);
4949   op_cost(0);
4950   format %{ "[$reg]" %}
4951   interface(MEMORY_INTER) %{
4952     base($reg);
4953     index(0xffffffff);
4954     scale(0x0);
4955     disp(0x0);
4956   %}
4957 %}
4958 
4959 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
4960 %{
4961   constraint(ALLOC_IN_RC(ptr_reg));
4962   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4963   match(AddP reg (LShiftL (ConvI2L ireg) scale));
4964   op_cost(0);
4965   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
4966   interface(MEMORY_INTER) %{
4967     base($reg);
4968     index($ireg);
4969     scale($scale);
4970     disp(0x0);
4971   %}
4972 %}
4973 
4974 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
4975 %{
4976   constraint(ALLOC_IN_RC(ptr_reg));
4977   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4978   match(AddP reg (LShiftL lreg scale));
4979   op_cost(0);
4980   format %{ "$reg, $lreg lsl($scale)" %}
4981   interface(MEMORY_INTER) %{
4982     base($reg);
4983     index($lreg);
4984     scale($scale);
4985     disp(0x0);
4986   %}
4987 %}
4988 
4989 operand indIndexI2L(iRegP reg, iRegI ireg)
4990 %{
4991   constraint(ALLOC_IN_RC(ptr_reg));
4992   match(AddP reg (ConvI2L ireg));
4993   op_cost(0);
4994   format %{ "$reg, $ireg, 0, I2L" %}
4995   interface(MEMORY_INTER) %{
4996     base($reg);
4997     index($ireg);
4998     scale(0x0);
4999     disp(0x0);
5000   %}
5001 %}
5002 
5003 operand indIndex(iRegP reg, iRegL lreg)
5004 %{
5005   constraint(ALLOC_IN_RC(ptr_reg));
5006   match(AddP reg lreg);
5007   op_cost(0);
5008   format %{ "$reg, $lreg" %}
5009   interface(MEMORY_INTER) %{
5010     base($reg);
5011     index($lreg);
5012     scale(0x0);
5013     disp(0x0);
5014   %}
5015 %}
5016 
5017 operand indOffI(iRegP reg, immIOffset off)
5018 %{
5019   constraint(ALLOC_IN_RC(ptr_reg));
5020   match(AddP reg off);
5021   op_cost(0);
5022   format %{ "[$reg, $off]" %}
5023   interface(MEMORY_INTER) %{
5024     base($reg);
5025     index(0xffffffff);
5026     scale(0x0);
5027     disp($off);
5028   %}
5029 %}
5030 
5031 operand indOffI4(iRegP reg, immIOffset4 off)
5032 %{
5033   constraint(ALLOC_IN_RC(ptr_reg));
5034   match(AddP reg off);
5035   op_cost(0);
5036   format %{ "[$reg, $off]" %}
5037   interface(MEMORY_INTER) %{
5038     base($reg);
5039     index(0xffffffff);
5040     scale(0x0);
5041     disp($off);
5042   %}
5043 %}
5044 
5045 operand indOffI8(iRegP reg, immIOffset8 off)
5046 %{
5047   constraint(ALLOC_IN_RC(ptr_reg));
5048   match(AddP reg off);
5049   op_cost(0);
5050   format %{ "[$reg, $off]" %}
5051   interface(MEMORY_INTER) %{
5052     base($reg);
5053     index(0xffffffff);
5054     scale(0x0);
5055     disp($off);
5056   %}
5057 %}
5058 
5059 operand indOffI16(iRegP reg, immIOffset16 off)
5060 %{
5061   constraint(ALLOC_IN_RC(ptr_reg));
5062   match(AddP reg off);
5063   op_cost(0);
5064   format %{ "[$reg, $off]" %}
5065   interface(MEMORY_INTER) %{
5066     base($reg);
5067     index(0xffffffff);
5068     scale(0x0);
5069     disp($off);
5070   %}
5071 %}
5072 
5073 operand indOffL(iRegP reg, immLoffset off)
5074 %{
5075   constraint(ALLOC_IN_RC(ptr_reg));
5076   match(AddP reg off);
5077   op_cost(0);
5078   format %{ "[$reg, $off]" %}
5079   interface(MEMORY_INTER) %{
5080     base($reg);
5081     index(0xffffffff);
5082     scale(0x0);
5083     disp($off);
5084   %}
5085 %}
5086 
5087 operand indOffL4(iRegP reg, immLoffset4 off)
5088 %{
5089   constraint(ALLOC_IN_RC(ptr_reg));
5090   match(AddP reg off);
5091   op_cost(0);
5092   format %{ "[$reg, $off]" %}
5093   interface(MEMORY_INTER) %{
5094     base($reg);
5095     index(0xffffffff);
5096     scale(0x0);
5097     disp($off);
5098   %}
5099 %}
5100 
5101 operand indOffL8(iRegP reg, immLoffset8 off)
5102 %{
5103   constraint(ALLOC_IN_RC(ptr_reg));
5104   match(AddP reg off);
5105   op_cost(0);
5106   format %{ "[$reg, $off]" %}
5107   interface(MEMORY_INTER) %{
5108     base($reg);
5109     index(0xffffffff);
5110     scale(0x0);
5111     disp($off);
5112   %}
5113 %}
5114 
5115 operand indOffL16(iRegP reg, immLoffset16 off)
5116 %{
5117   constraint(ALLOC_IN_RC(ptr_reg));
5118   match(AddP reg off);
5119   op_cost(0);
5120   format %{ "[$reg, $off]" %}
5121   interface(MEMORY_INTER) %{
5122     base($reg);
5123     index(0xffffffff);
5124     scale(0x0);
5125     disp($off);
5126   %}
5127 %}
5128 
5129 operand indirectN(iRegN reg)
5130 %{
5131   predicate(Universe::narrow_oop_shift() == 0);
5132   constraint(ALLOC_IN_RC(ptr_reg));
5133   match(DecodeN reg);
5134   op_cost(0);
5135   format %{ "[$reg]\t# narrow" %}
5136   interface(MEMORY_INTER) %{
5137     base($reg);
5138     index(0xffffffff);
5139     scale(0x0);
5140     disp(0x0);
5141   %}
5142 %}
5143 
5144 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5145 %{
5146   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5147   constraint(ALLOC_IN_RC(ptr_reg));
5148   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5149   op_cost(0);
5150   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5151   interface(MEMORY_INTER) %{
5152     base($reg);
5153     index($ireg);
5154     scale($scale);
5155     disp(0x0);
5156   %}
5157 %}
5158 
5159 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5160 %{
5161   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5162   constraint(ALLOC_IN_RC(ptr_reg));
5163   match(AddP (DecodeN reg) (LShiftL lreg scale));
5164   op_cost(0);
5165   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5166   interface(MEMORY_INTER) %{
5167     base($reg);
5168     index($lreg);
5169     scale($scale);
5170     disp(0x0);
5171   %}
5172 %}
5173 
5174 operand indIndexI2LN(iRegN reg, iRegI ireg)
5175 %{
5176   predicate(Universe::narrow_oop_shift() == 0);
5177   constraint(ALLOC_IN_RC(ptr_reg));
5178   match(AddP (DecodeN reg) (ConvI2L ireg));
5179   op_cost(0);
5180   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5181   interface(MEMORY_INTER) %{
5182     base($reg);
5183     index($ireg);
5184     scale(0x0);
5185     disp(0x0);
5186   %}
5187 %}
5188 
5189 operand indIndexN(iRegN reg, iRegL lreg)
5190 %{
5191   predicate(Universe::narrow_oop_shift() == 0);
5192   constraint(ALLOC_IN_RC(ptr_reg));
5193   match(AddP (DecodeN reg) lreg);
5194   op_cost(0);
5195   format %{ "$reg, $lreg\t# narrow" %}
5196   interface(MEMORY_INTER) %{
5197     base($reg);
5198     index($lreg);
5199     scale(0x0);
5200     disp(0x0);
5201   %}
5202 %}
5203 
5204 operand indOffIN(iRegN reg, immIOffset off)
5205 %{
5206   predicate(Universe::narrow_oop_shift() == 0);
5207   constraint(ALLOC_IN_RC(ptr_reg));
5208   match(AddP (DecodeN reg) off);
5209   op_cost(0);
5210   format %{ "[$reg, $off]\t# narrow" %}
5211   interface(MEMORY_INTER) %{
5212     base($reg);
5213     index(0xffffffff);
5214     scale(0x0);
5215     disp($off);
5216   %}
5217 %}
5218 
5219 operand indOffLN(iRegN reg, immLoffset off)
5220 %{
5221   predicate(Universe::narrow_oop_shift() == 0);
5222   constraint(ALLOC_IN_RC(ptr_reg));
5223   match(AddP (DecodeN reg) off);
5224   op_cost(0);
5225   format %{ "[$reg, $off]\t# narrow" %}
5226   interface(MEMORY_INTER) %{
5227     base($reg);
5228     index(0xffffffff);
5229     scale(0x0);
5230     disp($off);
5231   %}
5232 %}
5233 
5234 
5235 
5236 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5237 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5238 %{
5239   constraint(ALLOC_IN_RC(ptr_reg));
5240   match(AddP reg off);
5241   op_cost(0);
5242   format %{ "[$reg, $off]" %}
5243   interface(MEMORY_INTER) %{
5244     base($reg);
5245     index(0xffffffff);
5246     scale(0x0);
5247     disp($off);
5248   %}
5249 %}
5250 
5251 //----------Special Memory Operands--------------------------------------------
5252 // Stack Slot Operand - This operand is used for loading and storing temporary
5253 //                      values on the stack where a match requires a value to
5254 //                      flow through memory.
5255 operand stackSlotP(sRegP reg)
5256 %{
5257   constraint(ALLOC_IN_RC(stack_slots));
5258   op_cost(100);
5259   // No match rule because this operand is only generated in matching
5260   // match(RegP);
5261   format %{ "[$reg]" %}
5262   interface(MEMORY_INTER) %{
5263     base(0x1e);  // RSP
5264     index(0x0);  // No Index
5265     scale(0x0);  // No Scale
5266     disp($reg);  // Stack Offset
5267   %}
5268 %}
5269 
5270 operand stackSlotI(sRegI reg)
5271 %{
5272   constraint(ALLOC_IN_RC(stack_slots));
5273   // No match rule because this operand is only generated in matching
5274   // match(RegI);
5275   format %{ "[$reg]" %}
5276   interface(MEMORY_INTER) %{
5277     base(0x1e);  // RSP
5278     index(0x0);  // No Index
5279     scale(0x0);  // No Scale
5280     disp($reg);  // Stack Offset
5281   %}
5282 %}
5283 
5284 operand stackSlotF(sRegF reg)
5285 %{
5286   constraint(ALLOC_IN_RC(stack_slots));
5287   // No match rule because this operand is only generated in matching
5288   // match(RegF);
5289   format %{ "[$reg]" %}
5290   interface(MEMORY_INTER) %{
5291     base(0x1e);  // RSP
5292     index(0x0);  // No Index
5293     scale(0x0);  // No Scale
5294     disp($reg);  // Stack Offset
5295   %}
5296 %}
5297 
5298 operand stackSlotD(sRegD reg)
5299 %{
5300   constraint(ALLOC_IN_RC(stack_slots));
5301   // No match rule because this operand is only generated in matching
5302   // match(RegD);
5303   format %{ "[$reg]" %}
5304   interface(MEMORY_INTER) %{
5305     base(0x1e);  // RSP
5306     index(0x0);  // No Index
5307     scale(0x0);  // No Scale
5308     disp($reg);  // Stack Offset
5309   %}
5310 %}
5311 
5312 operand stackSlotL(sRegL reg)
5313 %{
5314   constraint(ALLOC_IN_RC(stack_slots));
5315   // No match rule because this operand is only generated in matching
5316   // match(RegL);
5317   format %{ "[$reg]" %}
5318   interface(MEMORY_INTER) %{
5319     base(0x1e);  // RSP
5320     index(0x0);  // No Index
5321     scale(0x0);  // No Scale
5322     disp($reg);  // Stack Offset
5323   %}
5324 %}
5325 
5326 // Operands for expressing Control Flow
5327 // NOTE: Label is a predefined operand which should not be redefined in
5328 //       the AD file. It is generically handled within the ADLC.
5329 
5330 //----------Conditional Branch Operands----------------------------------------
5331 // Comparison Op  - This is the operation of the comparison, and is limited to
5332 //                  the following set of codes:
5333 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5334 //
5335 // Other attributes of the comparison, such as unsignedness, are specified
5336 // by the comparison instruction that sets a condition code flags register.
5337 // That result is represented by a flags operand whose subtype is appropriate
5338 // to the unsignedness (etc.) of the comparison.
5339 //
5340 // Later, the instruction which matches both the Comparison Op (a Bool) and
5341 // the flags (produced by the Cmp) specifies the coding of the comparison op
5342 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5343 
5344 // used for signed integral comparisons and fp comparisons
5345 
5346 operand cmpOp()
5347 %{
5348   match(Bool);
5349 
5350   format %{ "" %}
5351   interface(COND_INTER) %{
5352     equal(0x0, "eq");
5353     not_equal(0x1, "ne");
5354     less(0xb, "lt");
5355     greater_equal(0xa, "ge");
5356     less_equal(0xd, "le");
5357     greater(0xc, "gt");
5358     overflow(0x6, "vs");
5359     no_overflow(0x7, "vc");
5360   %}
5361 %}
5362 
5363 // used for unsigned integral comparisons
5364 
5365 operand cmpOpU()
5366 %{
5367   match(Bool);
5368 
5369   format %{ "" %}
5370   interface(COND_INTER) %{
5371     equal(0x0, "eq");
5372     not_equal(0x1, "ne");
5373     less(0x3, "lo");
5374     greater_equal(0x2, "hs");
5375     less_equal(0x9, "ls");
5376     greater(0x8, "hi");
5377     overflow(0x6, "vs");
5378     no_overflow(0x7, "vc");
5379   %}
5380 %}
5381 
5382 // used for certain integral comparisons which can be
5383 // converted to cbxx or tbxx instructions
5384 
5385 operand cmpOpEqNe()
5386 %{
5387   match(Bool);
5388   match(CmpOp);
5389   op_cost(0);
5390   predicate(n->as_Bool()->_test._test == BoolTest::ne
5391             || n->as_Bool()->_test._test == BoolTest::eq);
5392 
5393   format %{ "" %}
5394   interface(COND_INTER) %{
5395     equal(0x0, "eq");
5396     not_equal(0x1, "ne");
5397     less(0xb, "lt");
5398     greater_equal(0xa, "ge");
5399     less_equal(0xd, "le");
5400     greater(0xc, "gt");
5401     overflow(0x6, "vs");
5402     no_overflow(0x7, "vc");
5403   %}
5404 %}
5405 
5406 // used for certain integral comparisons which can be
5407 // converted to cbxx or tbxx instructions
5408 
5409 operand cmpOpLtGe()
5410 %{
5411   match(Bool);
5412   match(CmpOp);
5413   op_cost(0);
5414 
5415   predicate(n->as_Bool()->_test._test == BoolTest::lt
5416             || n->as_Bool()->_test._test == BoolTest::ge);
5417 
5418   format %{ "" %}
5419   interface(COND_INTER) %{
5420     equal(0x0, "eq");
5421     not_equal(0x1, "ne");
5422     less(0xb, "lt");
5423     greater_equal(0xa, "ge");
5424     less_equal(0xd, "le");
5425     greater(0xc, "gt");
5426     overflow(0x6, "vs");
5427     no_overflow(0x7, "vc");
5428   %}
5429 %}
5430 
5431 // used for certain unsigned integral comparisons which can be
5432 // converted to cbxx or tbxx instructions
5433 
5434 operand cmpOpUEqNeLtGe()
5435 %{
5436   match(Bool);
5437   match(CmpOp);
5438   op_cost(0);
5439 
5440   predicate(n->as_Bool()->_test._test == BoolTest::eq
5441             || n->as_Bool()->_test._test == BoolTest::ne
5442             || n->as_Bool()->_test._test == BoolTest::lt
5443             || n->as_Bool()->_test._test == BoolTest::ge);
5444 
5445   format %{ "" %}
5446   interface(COND_INTER) %{
5447     equal(0x0, "eq");
5448     not_equal(0x1, "ne");
5449     less(0xb, "lt");
5450     greater_equal(0xa, "ge");
5451     less_equal(0xd, "le");
5452     greater(0xc, "gt");
5453     overflow(0x6, "vs");
5454     no_overflow(0x7, "vc");
5455   %}
5456 %}
5457 
5458 // Special operand allowing long args to int ops to be truncated for free
5459 
5460 operand iRegL2I(iRegL reg) %{
5461 
5462   op_cost(0);
5463 
5464   match(ConvL2I reg);
5465 
5466   format %{ "l2i($reg)" %}
5467 
5468   interface(REG_INTER)
5469 %}
5470 
5471 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5472 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5473 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5474 
5475 //----------OPERAND CLASSES----------------------------------------------------
5476 // Operand Classes are groups of operands that are used as to simplify
5477 // instruction definitions by not requiring the AD writer to specify
5478 // separate instructions for every form of operand when the
5479 // instruction accepts multiple operand types with the same basic
5480 // encoding and format. The classic case of this is memory operands.
5481 
5482 // memory is used to define read/write location for load/store
5483 // instruction defs. we can turn a memory op into an Address
5484 
5485 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5486                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5487 
5488 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5489 // operations. it allows the src to be either an iRegI or a (ConvL2I
5490 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5491 // can be elided because the 32-bit instruction will just employ the
5492 // lower 32 bits anyway.
5493 //
5494 // n.b. this does not elide all L2I conversions. if the truncated
5495 // value is consumed by more than one operation then the ConvL2I
5496 // cannot be bundled into the consuming nodes so an l2i gets planted
5497 // (actually a movw $dst $src) and the downstream instructions consume
5498 // the result of the l2i as an iRegI input. That's a shame since the
5499 // movw is actually redundant but its not too costly.
5500 
5501 opclass iRegIorL2I(iRegI, iRegL2I);
5502 
5503 //----------PIPELINE-----------------------------------------------------------
5504 // Rules which define the behavior of the target architectures pipeline.
5505 
5506 // For specific pipelines, eg A53, define the stages of that pipeline
5507 //pipe_desc(ISS, EX1, EX2, WR);
5508 #define ISS S0
5509 #define EX1 S1
5510 #define EX2 S2
5511 #define WR  S3
5512 
5513 // Integer ALU reg operation
5514 pipeline %{
5515 
5516 attributes %{
5517   // ARM instructions are of fixed length
5518   fixed_size_instructions;        // Fixed size instructions TODO does
5519   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5520   // ARM instructions come in 32-bit word units
5521   instruction_unit_size = 4;         // An instruction is 4 bytes long
5522   instruction_fetch_unit_size = 64;  // The processor fetches one line
5523   instruction_fetch_units = 1;       // of 64 bytes
5524 
5525   // List of nop instructions
5526   nops( MachNop );
5527 %}
5528 
5529 // We don't use an actual pipeline model so don't care about resources
5530 // or description. we do use pipeline classes to introduce fixed
5531 // latencies
5532 
5533 //----------RESOURCES----------------------------------------------------------
5534 // Resources are the functional units available to the machine
5535 
5536 resources( INS0, INS1, INS01 = INS0 | INS1,
5537            ALU0, ALU1, ALU = ALU0 | ALU1,
5538            MAC,
5539            DIV,
5540            BRANCH,
5541            LDST,
5542            NEON_FP);
5543 
5544 //----------PIPELINE DESCRIPTION-----------------------------------------------
5545 // Pipeline Description specifies the stages in the machine's pipeline
5546 
5547 // Define the pipeline as a generic 6 stage pipeline
5548 pipe_desc(S0, S1, S2, S3, S4, S5);
5549 
5550 //----------PIPELINE CLASSES---------------------------------------------------
5551 // Pipeline Classes describe the stages in which input and output are
5552 // referenced by the hardware pipeline.
5553 
5554 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5555 %{
5556   single_instruction;
5557   src1   : S1(read);
5558   src2   : S2(read);
5559   dst    : S5(write);
5560   INS01  : ISS;
5561   NEON_FP : S5;
5562 %}
5563 
5564 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5565 %{
5566   single_instruction;
5567   src1   : S1(read);
5568   src2   : S2(read);
5569   dst    : S5(write);
5570   INS01  : ISS;
5571   NEON_FP : S5;
5572 %}
5573 
5574 pipe_class fp_uop_s(vRegF dst, vRegF src)
5575 %{
5576   single_instruction;
5577   src    : S1(read);
5578   dst    : S5(write);
5579   INS01  : ISS;
5580   NEON_FP : S5;
5581 %}
5582 
5583 pipe_class fp_uop_d(vRegD dst, vRegD src)
5584 %{
5585   single_instruction;
5586   src    : S1(read);
5587   dst    : S5(write);
5588   INS01  : ISS;
5589   NEON_FP : S5;
5590 %}
5591 
5592 pipe_class fp_d2f(vRegF dst, vRegD src)
5593 %{
5594   single_instruction;
5595   src    : S1(read);
5596   dst    : S5(write);
5597   INS01  : ISS;
5598   NEON_FP : S5;
5599 %}
5600 
5601 pipe_class fp_f2d(vRegD dst, vRegF src)
5602 %{
5603   single_instruction;
5604   src    : S1(read);
5605   dst    : S5(write);
5606   INS01  : ISS;
5607   NEON_FP : S5;
5608 %}
5609 
5610 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5611 %{
5612   single_instruction;
5613   src    : S1(read);
5614   dst    : S5(write);
5615   INS01  : ISS;
5616   NEON_FP : S5;
5617 %}
5618 
5619 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5620 %{
5621   single_instruction;
5622   src    : S1(read);
5623   dst    : S5(write);
5624   INS01  : ISS;
5625   NEON_FP : S5;
5626 %}
5627 
5628 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5629 %{
5630   single_instruction;
5631   src    : S1(read);
5632   dst    : S5(write);
5633   INS01  : ISS;
5634   NEON_FP : S5;
5635 %}
5636 
5637 pipe_class fp_l2f(vRegF dst, iRegL src)
5638 %{
5639   single_instruction;
5640   src    : S1(read);
5641   dst    : S5(write);
5642   INS01  : ISS;
5643   NEON_FP : S5;
5644 %}
5645 
5646 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5647 %{
5648   single_instruction;
5649   src    : S1(read);
5650   dst    : S5(write);
5651   INS01  : ISS;
5652   NEON_FP : S5;
5653 %}
5654 
5655 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5656 %{
5657   single_instruction;
5658   src    : S1(read);
5659   dst    : S5(write);
5660   INS01  : ISS;
5661   NEON_FP : S5;
5662 %}
5663 
5664 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5665 %{
5666   single_instruction;
5667   src    : S1(read);
5668   dst    : S5(write);
5669   INS01  : ISS;
5670   NEON_FP : S5;
5671 %}
5672 
5673 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5674 %{
5675   single_instruction;
5676   src    : S1(read);
5677   dst    : S5(write);
5678   INS01  : ISS;
5679   NEON_FP : S5;
5680 %}
5681 
5682 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5683 %{
5684   single_instruction;
5685   src1   : S1(read);
5686   src2   : S2(read);
5687   dst    : S5(write);
5688   INS0   : ISS;
5689   NEON_FP : S5;
5690 %}
5691 
5692 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5693 %{
5694   single_instruction;
5695   src1   : S1(read);
5696   src2   : S2(read);
5697   dst    : S5(write);
5698   INS0   : ISS;
5699   NEON_FP : S5;
5700 %}
5701 
5702 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5703 %{
5704   single_instruction;
5705   cr     : S1(read);
5706   src1   : S1(read);
5707   src2   : S1(read);
5708   dst    : S3(write);
5709   INS01  : ISS;
5710   NEON_FP : S3;
5711 %}
5712 
5713 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5714 %{
5715   single_instruction;
5716   cr     : S1(read);
5717   src1   : S1(read);
5718   src2   : S1(read);
5719   dst    : S3(write);
5720   INS01  : ISS;
5721   NEON_FP : S3;
5722 %}
5723 
5724 pipe_class fp_imm_s(vRegF dst)
5725 %{
5726   single_instruction;
5727   dst    : S3(write);
5728   INS01  : ISS;
5729   NEON_FP : S3;
5730 %}
5731 
5732 pipe_class fp_imm_d(vRegD dst)
5733 %{
5734   single_instruction;
5735   dst    : S3(write);
5736   INS01  : ISS;
5737   NEON_FP : S3;
5738 %}
5739 
5740 pipe_class fp_load_constant_s(vRegF dst)
5741 %{
5742   single_instruction;
5743   dst    : S4(write);
5744   INS01  : ISS;
5745   NEON_FP : S4;
5746 %}
5747 
5748 pipe_class fp_load_constant_d(vRegD dst)
5749 %{
5750   single_instruction;
5751   dst    : S4(write);
5752   INS01  : ISS;
5753   NEON_FP : S4;
5754 %}
5755 
5756 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
5757 %{
5758   single_instruction;
5759   dst    : S5(write);
5760   src1   : S1(read);
5761   src2   : S1(read);
5762   INS01  : ISS;
5763   NEON_FP : S5;
5764 %}
5765 
5766 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
5767 %{
5768   single_instruction;
5769   dst    : S5(write);
5770   src1   : S1(read);
5771   src2   : S1(read);
5772   INS0   : ISS;
5773   NEON_FP : S5;
5774 %}
5775 
5776 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
5777 %{
5778   single_instruction;
5779   dst    : S5(write);
5780   src1   : S1(read);
5781   src2   : S1(read);
5782   dst    : S1(read);
5783   INS01  : ISS;
5784   NEON_FP : S5;
5785 %}
5786 
5787 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
5788 %{
5789   single_instruction;
5790   dst    : S5(write);
5791   src1   : S1(read);
5792   src2   : S1(read);
5793   dst    : S1(read);
5794   INS0   : ISS;
5795   NEON_FP : S5;
5796 %}
5797 
5798 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
5799 %{
5800   single_instruction;
5801   dst    : S4(write);
5802   src1   : S2(read);
5803   src2   : S2(read);
5804   INS01  : ISS;
5805   NEON_FP : S4;
5806 %}
5807 
5808 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
5809 %{
5810   single_instruction;
5811   dst    : S4(write);
5812   src1   : S2(read);
5813   src2   : S2(read);
5814   INS0   : ISS;
5815   NEON_FP : S4;
5816 %}
5817 
5818 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
5819 %{
5820   single_instruction;
5821   dst    : S3(write);
5822   src1   : S2(read);
5823   src2   : S2(read);
5824   INS01  : ISS;
5825   NEON_FP : S3;
5826 %}
5827 
5828 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
5829 %{
5830   single_instruction;
5831   dst    : S3(write);
5832   src1   : S2(read);
5833   src2   : S2(read);
5834   INS0   : ISS;
5835   NEON_FP : S3;
5836 %}
5837 
5838 pipe_class vshift64(vecD dst, vecD src, vecX shift)
5839 %{
5840   single_instruction;
5841   dst    : S3(write);
5842   src    : S1(read);
5843   shift  : S1(read);
5844   INS01  : ISS;
5845   NEON_FP : S3;
5846 %}
5847 
5848 pipe_class vshift128(vecX dst, vecX src, vecX shift)
5849 %{
5850   single_instruction;
5851   dst    : S3(write);
5852   src    : S1(read);
5853   shift  : S1(read);
5854   INS0   : ISS;
5855   NEON_FP : S3;
5856 %}
5857 
5858 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
5859 %{
5860   single_instruction;
5861   dst    : S3(write);
5862   src    : S1(read);
5863   INS01  : ISS;
5864   NEON_FP : S3;
5865 %}
5866 
5867 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
5868 %{
5869   single_instruction;
5870   dst    : S3(write);
5871   src    : S1(read);
5872   INS0   : ISS;
5873   NEON_FP : S3;
5874 %}
5875 
5876 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
5877 %{
5878   single_instruction;
5879   dst    : S5(write);
5880   src1   : S1(read);
5881   src2   : S1(read);
5882   INS01  : ISS;
5883   NEON_FP : S5;
5884 %}
5885 
5886 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
5887 %{
5888   single_instruction;
5889   dst    : S5(write);
5890   src1   : S1(read);
5891   src2   : S1(read);
5892   INS0   : ISS;
5893   NEON_FP : S5;
5894 %}
5895 
5896 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
5897 %{
5898   single_instruction;
5899   dst    : S5(write);
5900   src1   : S1(read);
5901   src2   : S1(read);
5902   INS0   : ISS;
5903   NEON_FP : S5;
5904 %}
5905 
5906 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
5907 %{
5908   single_instruction;
5909   dst    : S5(write);
5910   src1   : S1(read);
5911   src2   : S1(read);
5912   INS0   : ISS;
5913   NEON_FP : S5;
5914 %}
5915 
5916 pipe_class vsqrt_fp128(vecX dst, vecX src)
5917 %{
5918   single_instruction;
5919   dst    : S5(write);
5920   src    : S1(read);
5921   INS0   : ISS;
5922   NEON_FP : S5;
5923 %}
5924 
5925 pipe_class vunop_fp64(vecD dst, vecD src)
5926 %{
5927   single_instruction;
5928   dst    : S5(write);
5929   src    : S1(read);
5930   INS01  : ISS;
5931   NEON_FP : S5;
5932 %}
5933 
5934 pipe_class vunop_fp128(vecX dst, vecX src)
5935 %{
5936   single_instruction;
5937   dst    : S5(write);
5938   src    : S1(read);
5939   INS0   : ISS;
5940   NEON_FP : S5;
5941 %}
5942 
5943 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
5944 %{
5945   single_instruction;
5946   dst    : S3(write);
5947   src    : S1(read);
5948   INS01  : ISS;
5949   NEON_FP : S3;
5950 %}
5951 
5952 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
5953 %{
5954   single_instruction;
5955   dst    : S3(write);
5956   src    : S1(read);
5957   INS01  : ISS;
5958   NEON_FP : S3;
5959 %}
5960 
5961 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
5962 %{
5963   single_instruction;
5964   dst    : S3(write);
5965   src    : S1(read);
5966   INS01  : ISS;
5967   NEON_FP : S3;
5968 %}
5969 
5970 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
5971 %{
5972   single_instruction;
5973   dst    : S3(write);
5974   src    : S1(read);
5975   INS01  : ISS;
5976   NEON_FP : S3;
5977 %}
5978 
5979 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
5980 %{
5981   single_instruction;
5982   dst    : S3(write);
5983   src    : S1(read);
5984   INS01  : ISS;
5985   NEON_FP : S3;
5986 %}
5987 
5988 pipe_class vmovi_reg_imm64(vecD dst)
5989 %{
5990   single_instruction;
5991   dst    : S3(write);
5992   INS01  : ISS;
5993   NEON_FP : S3;
5994 %}
5995 
5996 pipe_class vmovi_reg_imm128(vecX dst)
5997 %{
5998   single_instruction;
5999   dst    : S3(write);
6000   INS0   : ISS;
6001   NEON_FP : S3;
6002 %}
6003 
6004 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6005 %{
6006   single_instruction;
6007   dst    : S5(write);
6008   mem    : ISS(read);
6009   INS01  : ISS;
6010   NEON_FP : S3;
6011 %}
6012 
6013 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6014 %{
6015   single_instruction;
6016   dst    : S5(write);
6017   mem    : ISS(read);
6018   INS01  : ISS;
6019   NEON_FP : S3;
6020 %}
6021 
6022 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6023 %{
6024   single_instruction;
6025   mem    : ISS(read);
6026   src    : S2(read);
6027   INS01  : ISS;
6028   NEON_FP : S3;
6029 %}
6030 
6031 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6032 %{
6033   single_instruction;
6034   mem    : ISS(read);
6035   src    : S2(read);
6036   INS01  : ISS;
6037   NEON_FP : S3;
6038 %}
6039 
6040 //------- Integer ALU operations --------------------------
6041 
6042 // Integer ALU reg-reg operation
6043 // Operands needed in EX1, result generated in EX2
6044 // Eg.  ADD     x0, x1, x2
6045 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6046 %{
6047   single_instruction;
6048   dst    : EX2(write);
6049   src1   : EX1(read);
6050   src2   : EX1(read);
6051   INS01  : ISS; // Dual issue as instruction 0 or 1
6052   ALU    : EX2;
6053 %}
6054 
6055 // Integer ALU reg-reg operation with constant shift
6056 // Shifted register must be available in LATE_ISS instead of EX1
6057 // Eg.  ADD     x0, x1, x2, LSL #2
6058 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6059 %{
6060   single_instruction;
6061   dst    : EX2(write);
6062   src1   : EX1(read);
6063   src2   : ISS(read);
6064   INS01  : ISS;
6065   ALU    : EX2;
6066 %}
6067 
6068 // Integer ALU reg operation with constant shift
6069 // Eg.  LSL     x0, x1, #shift
6070 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6071 %{
6072   single_instruction;
6073   dst    : EX2(write);
6074   src1   : ISS(read);
6075   INS01  : ISS;
6076   ALU    : EX2;
6077 %}
6078 
6079 // Integer ALU reg-reg operation with variable shift
6080 // Both operands must be available in LATE_ISS instead of EX1
6081 // Result is available in EX1 instead of EX2
6082 // Eg.  LSLV    x0, x1, x2
6083 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6084 %{
6085   single_instruction;
6086   dst    : EX1(write);
6087   src1   : ISS(read);
6088   src2   : ISS(read);
6089   INS01  : ISS;
6090   ALU    : EX1;
6091 %}
6092 
6093 // Integer ALU reg-reg operation with extract
6094 // As for _vshift above, but result generated in EX2
6095 // Eg.  EXTR    x0, x1, x2, #N
6096 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6097 %{
6098   single_instruction;
6099   dst    : EX2(write);
6100   src1   : ISS(read);
6101   src2   : ISS(read);
6102   INS1   : ISS; // Can only dual issue as Instruction 1
6103   ALU    : EX1;
6104 %}
6105 
6106 // Integer ALU reg operation
6107 // Eg.  NEG     x0, x1
6108 pipe_class ialu_reg(iRegI dst, iRegI src)
6109 %{
6110   single_instruction;
6111   dst    : EX2(write);
6112   src    : EX1(read);
6113   INS01  : ISS;
6114   ALU    : EX2;
6115 %}
6116 
6117 // Integer ALU reg mmediate operation
6118 // Eg.  ADD     x0, x1, #N
6119 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6120 %{
6121   single_instruction;
6122   dst    : EX2(write);
6123   src1   : EX1(read);
6124   INS01  : ISS;
6125   ALU    : EX2;
6126 %}
6127 
6128 // Integer ALU immediate operation (no source operands)
6129 // Eg.  MOV     x0, #N
6130 pipe_class ialu_imm(iRegI dst)
6131 %{
6132   single_instruction;
6133   dst    : EX1(write);
6134   INS01  : ISS;
6135   ALU    : EX1;
6136 %}
6137 
6138 //------- Compare operation -------------------------------
6139 
6140 // Compare reg-reg
6141 // Eg.  CMP     x0, x1
6142 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6143 %{
6144   single_instruction;
6145 //  fixed_latency(16);
6146   cr     : EX2(write);
6147   op1    : EX1(read);
6148   op2    : EX1(read);
6149   INS01  : ISS;
6150   ALU    : EX2;
6151 %}
6152 
6153 // Compare reg-reg
6154 // Eg.  CMP     x0, #N
6155 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6156 %{
6157   single_instruction;
6158 //  fixed_latency(16);
6159   cr     : EX2(write);
6160   op1    : EX1(read);
6161   INS01  : ISS;
6162   ALU    : EX2;
6163 %}
6164 
6165 //------- Conditional instructions ------------------------
6166 
6167 // Conditional no operands
6168 // Eg.  CSINC   x0, zr, zr, <cond>
6169 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6170 %{
6171   single_instruction;
6172   cr     : EX1(read);
6173   dst    : EX2(write);
6174   INS01  : ISS;
6175   ALU    : EX2;
6176 %}
6177 
6178 // Conditional 2 operand
6179 // EG.  CSEL    X0, X1, X2, <cond>
6180 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6181 %{
6182   single_instruction;
6183   cr     : EX1(read);
6184   src1   : EX1(read);
6185   src2   : EX1(read);
6186   dst    : EX2(write);
6187   INS01  : ISS;
6188   ALU    : EX2;
6189 %}
6190 
6191 // Conditional 2 operand
6192 // EG.  CSEL    X0, X1, X2, <cond>
6193 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6194 %{
6195   single_instruction;
6196   cr     : EX1(read);
6197   src    : EX1(read);
6198   dst    : EX2(write);
6199   INS01  : ISS;
6200   ALU    : EX2;
6201 %}
6202 
6203 //------- Multiply pipeline operations --------------------
6204 
6205 // Multiply reg-reg
6206 // Eg.  MUL     w0, w1, w2
6207 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6208 %{
6209   single_instruction;
6210   dst    : WR(write);
6211   src1   : ISS(read);
6212   src2   : ISS(read);
6213   INS01  : ISS;
6214   MAC    : WR;
6215 %}
6216 
6217 // Multiply accumulate
6218 // Eg.  MADD    w0, w1, w2, w3
6219 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6220 %{
6221   single_instruction;
6222   dst    : WR(write);
6223   src1   : ISS(read);
6224   src2   : ISS(read);
6225   src3   : ISS(read);
6226   INS01  : ISS;
6227   MAC    : WR;
6228 %}
6229 
6230 // Eg.  MUL     w0, w1, w2
6231 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6232 %{
6233   single_instruction;
6234   fixed_latency(3); // Maximum latency for 64 bit mul
6235   dst    : WR(write);
6236   src1   : ISS(read);
6237   src2   : ISS(read);
6238   INS01  : ISS;
6239   MAC    : WR;
6240 %}
6241 
6242 // Multiply accumulate
6243 // Eg.  MADD    w0, w1, w2, w3
6244 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6245 %{
6246   single_instruction;
6247   fixed_latency(3); // Maximum latency for 64 bit mul
6248   dst    : WR(write);
6249   src1   : ISS(read);
6250   src2   : ISS(read);
6251   src3   : ISS(read);
6252   INS01  : ISS;
6253   MAC    : WR;
6254 %}
6255 
6256 //------- Divide pipeline operations --------------------
6257 
6258 // Eg.  SDIV    w0, w1, w2
6259 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6260 %{
6261   single_instruction;
6262   fixed_latency(8); // Maximum latency for 32 bit divide
6263   dst    : WR(write);
6264   src1   : ISS(read);
6265   src2   : ISS(read);
6266   INS0   : ISS; // Can only dual issue as instruction 0
6267   DIV    : WR;
6268 %}
6269 
6270 // Eg.  SDIV    x0, x1, x2
6271 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6272 %{
6273   single_instruction;
6274   fixed_latency(16); // Maximum latency for 64 bit divide
6275   dst    : WR(write);
6276   src1   : ISS(read);
6277   src2   : ISS(read);
6278   INS0   : ISS; // Can only dual issue as instruction 0
6279   DIV    : WR;
6280 %}
6281 
6282 //------- Load pipeline operations ------------------------
6283 
6284 // Load - prefetch
6285 // Eg.  PFRM    <mem>
6286 pipe_class iload_prefetch(memory mem)
6287 %{
6288   single_instruction;
6289   mem    : ISS(read);
6290   INS01  : ISS;
6291   LDST   : WR;
6292 %}
6293 
6294 // Load - reg, mem
6295 // Eg.  LDR     x0, <mem>
6296 pipe_class iload_reg_mem(iRegI dst, memory mem)
6297 %{
6298   single_instruction;
6299   dst    : WR(write);
6300   mem    : ISS(read);
6301   INS01  : ISS;
6302   LDST   : WR;
6303 %}
6304 
6305 // Load - reg, reg
6306 // Eg.  LDR     x0, [sp, x1]
6307 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6308 %{
6309   single_instruction;
6310   dst    : WR(write);
6311   src    : ISS(read);
6312   INS01  : ISS;
6313   LDST   : WR;
6314 %}
6315 
6316 //------- Store pipeline operations -----------------------
6317 
6318 // Store - zr, mem
6319 // Eg.  STR     zr, <mem>
6320 pipe_class istore_mem(memory mem)
6321 %{
6322   single_instruction;
6323   mem    : ISS(read);
6324   INS01  : ISS;
6325   LDST   : WR;
6326 %}
6327 
6328 // Store - reg, mem
6329 // Eg.  STR     x0, <mem>
6330 pipe_class istore_reg_mem(iRegI src, memory mem)
6331 %{
6332   single_instruction;
6333   mem    : ISS(read);
6334   src    : EX2(read);
6335   INS01  : ISS;
6336   LDST   : WR;
6337 %}
6338 
6339 // Store - reg, reg
6340 // Eg. STR      x0, [sp, x1]
6341 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6342 %{
6343   single_instruction;
6344   dst    : ISS(read);
6345   src    : EX2(read);
6346   INS01  : ISS;
6347   LDST   : WR;
6348 %}
6349 
6350 //------- Store pipeline operations -----------------------
6351 
6352 // Branch
6353 pipe_class pipe_branch()
6354 %{
6355   single_instruction;
6356   INS01  : ISS;
6357   BRANCH : EX1;
6358 %}
6359 
6360 // Conditional branch
6361 pipe_class pipe_branch_cond(rFlagsReg cr)
6362 %{
6363   single_instruction;
6364   cr     : EX1(read);
6365   INS01  : ISS;
6366   BRANCH : EX1;
6367 %}
6368 
6369 // Compare & Branch
6370 // EG.  CBZ/CBNZ
6371 pipe_class pipe_cmp_branch(iRegI op1)
6372 %{
6373   single_instruction;
6374   op1    : EX1(read);
6375   INS01  : ISS;
6376   BRANCH : EX1;
6377 %}
6378 
6379 //------- Synchronisation operations ----------------------
6380 
6381 // Any operation requiring serialization.
6382 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6383 pipe_class pipe_serial()
6384 %{
6385   single_instruction;
6386   force_serialization;
6387   fixed_latency(16);
6388   INS01  : ISS(2); // Cannot dual issue with any other instruction
6389   LDST   : WR;
6390 %}
6391 
6392 // Generic big/slow expanded idiom - also serialized
6393 pipe_class pipe_slow()
6394 %{
6395   instruction_count(10);
6396   multiple_bundles;
6397   force_serialization;
6398   fixed_latency(16);
6399   INS01  : ISS(2); // Cannot dual issue with any other instruction
6400   LDST   : WR;
6401 %}
6402 
6403 // Empty pipeline class
6404 pipe_class pipe_class_empty()
6405 %{
6406   single_instruction;
6407   fixed_latency(0);
6408 %}
6409 
6410 // Default pipeline class.
6411 pipe_class pipe_class_default()
6412 %{
6413   single_instruction;
6414   fixed_latency(2);
6415 %}
6416 
6417 // Pipeline class for compares.
6418 pipe_class pipe_class_compare()
6419 %{
6420   single_instruction;
6421   fixed_latency(16);
6422 %}
6423 
6424 // Pipeline class for memory operations.
6425 pipe_class pipe_class_memory()
6426 %{
6427   single_instruction;
6428   fixed_latency(16);
6429 %}
6430 
6431 // Pipeline class for call.
6432 pipe_class pipe_class_call()
6433 %{
6434   single_instruction;
6435   fixed_latency(100);
6436 %}
6437 
6438 // Define the class for the Nop node.
6439 define %{
6440    MachNop = pipe_class_empty;
6441 %}
6442 
6443 %}
6444 //----------INSTRUCTIONS-------------------------------------------------------
6445 //
6446 // match      -- States which machine-independent subtree may be replaced
6447 //               by this instruction.
6448 // ins_cost   -- The estimated cost of this instruction is used by instruction
6449 //               selection to identify a minimum cost tree of machine
6450 //               instructions that matches a tree of machine-independent
6451 //               instructions.
6452 // format     -- A string providing the disassembly for this instruction.
6453 //               The value of an instruction's operand may be inserted
6454 //               by referring to it with a '$' prefix.
6455 // opcode     -- Three instruction opcodes may be provided.  These are referred
6456 //               to within an encode class as $primary, $secondary, and $tertiary
6457 //               rrspectively.  The primary opcode is commonly used to
6458 //               indicate the type of machine instruction, while secondary
6459 //               and tertiary are often used for prefix options or addressing
6460 //               modes.
6461 // ins_encode -- A list of encode classes with parameters. The encode class
6462 //               name must have been defined in an 'enc_class' specification
6463 //               in the encode section of the architecture description.
6464 
6465 // ============================================================================
6466 // Memory (Load/Store) Instructions
6467 
6468 // Load Instructions
6469 
6470 // Load Byte (8 bit signed)
6471 instruct loadB(iRegINoSp dst, memory mem)
6472 %{
6473   match(Set dst (LoadB mem));
6474   predicate(!needs_acquiring_load(n));
6475 
6476   ins_cost(4 * INSN_COST);
6477   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6478 
6479   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6480 
6481   ins_pipe(iload_reg_mem);
6482 %}
6483 
6484 // Load Byte (8 bit signed) into long
6485 instruct loadB2L(iRegLNoSp dst, memory mem)
6486 %{
6487   match(Set dst (ConvI2L (LoadB mem)));
6488   predicate(!needs_acquiring_load(n->in(1)));
6489 
6490   ins_cost(4 * INSN_COST);
6491   format %{ "ldrsb  $dst, $mem\t# byte" %}
6492 
6493   ins_encode(aarch64_enc_ldrsb(dst, mem));
6494 
6495   ins_pipe(iload_reg_mem);
6496 %}
6497 
6498 // Load Byte (8 bit unsigned)
6499 instruct loadUB(iRegINoSp dst, memory mem)
6500 %{
6501   match(Set dst (LoadUB mem));
6502   predicate(!needs_acquiring_load(n));
6503 
6504   ins_cost(4 * INSN_COST);
6505   format %{ "ldrbw  $dst, $mem\t# byte" %}
6506 
6507   ins_encode(aarch64_enc_ldrb(dst, mem));
6508 
6509   ins_pipe(iload_reg_mem);
6510 %}
6511 
6512 // Load Byte (8 bit unsigned) into long
6513 instruct loadUB2L(iRegLNoSp dst, memory mem)
6514 %{
6515   match(Set dst (ConvI2L (LoadUB mem)));
6516   predicate(!needs_acquiring_load(n->in(1)));
6517 
6518   ins_cost(4 * INSN_COST);
6519   format %{ "ldrb  $dst, $mem\t# byte" %}
6520 
6521   ins_encode(aarch64_enc_ldrb(dst, mem));
6522 
6523   ins_pipe(iload_reg_mem);
6524 %}
6525 
6526 // Load Short (16 bit signed)
6527 instruct loadS(iRegINoSp dst, memory mem)
6528 %{
6529   match(Set dst (LoadS mem));
6530   predicate(!needs_acquiring_load(n));
6531 
6532   ins_cost(4 * INSN_COST);
6533   format %{ "ldrshw  $dst, $mem\t# short" %}
6534 
6535   ins_encode(aarch64_enc_ldrshw(dst, mem));
6536 
6537   ins_pipe(iload_reg_mem);
6538 %}
6539 
6540 // Load Short (16 bit signed) into long
6541 instruct loadS2L(iRegLNoSp dst, memory mem)
6542 %{
6543   match(Set dst (ConvI2L (LoadS mem)));
6544   predicate(!needs_acquiring_load(n->in(1)));
6545 
6546   ins_cost(4 * INSN_COST);
6547   format %{ "ldrsh  $dst, $mem\t# short" %}
6548 
6549   ins_encode(aarch64_enc_ldrsh(dst, mem));
6550 
6551   ins_pipe(iload_reg_mem);
6552 %}
6553 
6554 // Load Char (16 bit unsigned)
6555 instruct loadUS(iRegINoSp dst, memory mem)
6556 %{
6557   match(Set dst (LoadUS mem));
6558   predicate(!needs_acquiring_load(n));
6559 
6560   ins_cost(4 * INSN_COST);
6561   format %{ "ldrh  $dst, $mem\t# short" %}
6562 
6563   ins_encode(aarch64_enc_ldrh(dst, mem));
6564 
6565   ins_pipe(iload_reg_mem);
6566 %}
6567 
6568 // Load Short/Char (16 bit unsigned) into long
6569 instruct loadUS2L(iRegLNoSp dst, memory mem)
6570 %{
6571   match(Set dst (ConvI2L (LoadUS mem)));
6572   predicate(!needs_acquiring_load(n->in(1)));
6573 
6574   ins_cost(4 * INSN_COST);
6575   format %{ "ldrh  $dst, $mem\t# short" %}
6576 
6577   ins_encode(aarch64_enc_ldrh(dst, mem));
6578 
6579   ins_pipe(iload_reg_mem);
6580 %}
6581 
6582 // Load Integer (32 bit signed)
6583 instruct loadI(iRegINoSp dst, memory mem)
6584 %{
6585   match(Set dst (LoadI mem));
6586   predicate(!needs_acquiring_load(n));
6587 
6588   ins_cost(4 * INSN_COST);
6589   format %{ "ldrw  $dst, $mem\t# int" %}
6590 
6591   ins_encode(aarch64_enc_ldrw(dst, mem));
6592 
6593   ins_pipe(iload_reg_mem);
6594 %}
6595 
6596 // Load Integer (32 bit signed) into long
6597 instruct loadI2L(iRegLNoSp dst, memory mem)
6598 %{
6599   match(Set dst (ConvI2L (LoadI mem)));
6600   predicate(!needs_acquiring_load(n->in(1)));
6601 
6602   ins_cost(4 * INSN_COST);
6603   format %{ "ldrsw  $dst, $mem\t# int" %}
6604 
6605   ins_encode(aarch64_enc_ldrsw(dst, mem));
6606 
6607   ins_pipe(iload_reg_mem);
6608 %}
6609 
6610 // Load Integer (32 bit unsigned) into long
6611 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6612 %{
6613   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6614   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6615 
6616   ins_cost(4 * INSN_COST);
6617   format %{ "ldrw  $dst, $mem\t# int" %}
6618 
6619   ins_encode(aarch64_enc_ldrw(dst, mem));
6620 
6621   ins_pipe(iload_reg_mem);
6622 %}
6623 
6624 // Load Long (64 bit signed)
6625 instruct loadL(iRegLNoSp dst, memory mem)
6626 %{
6627   match(Set dst (LoadL mem));
6628   predicate(!needs_acquiring_load(n));
6629 
6630   ins_cost(4 * INSN_COST);
6631   format %{ "ldr  $dst, $mem\t# int" %}
6632 
6633   ins_encode(aarch64_enc_ldr(dst, mem));
6634 
6635   ins_pipe(iload_reg_mem);
6636 %}
6637 
6638 // Load Range
6639 instruct loadRange(iRegINoSp dst, memory mem)
6640 %{
6641   match(Set dst (LoadRange mem));
6642 
6643   ins_cost(4 * INSN_COST);
6644   format %{ "ldrw  $dst, $mem\t# range" %}
6645 
6646   ins_encode(aarch64_enc_ldrw(dst, mem));
6647 
6648   ins_pipe(iload_reg_mem);
6649 %}
6650 
6651 // Load Pointer
6652 instruct loadP(iRegPNoSp dst, memory mem)
6653 %{
6654   match(Set dst (LoadP mem));
6655   predicate(!needs_acquiring_load(n));
6656 
6657   ins_cost(4 * INSN_COST);
6658   format %{ "ldr  $dst, $mem\t# ptr" %}
6659 
6660   ins_encode(aarch64_enc_ldr(dst, mem));
6661 
6662   ins_pipe(iload_reg_mem);
6663 %}
6664 
6665 // Load Compressed Pointer
6666 instruct loadN(iRegNNoSp dst, memory mem)
6667 %{
6668   match(Set dst (LoadN mem));
6669   predicate(!needs_acquiring_load(n));
6670 
6671   ins_cost(4 * INSN_COST);
6672   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6673 
6674   ins_encode(aarch64_enc_ldrw(dst, mem));
6675 
6676   ins_pipe(iload_reg_mem);
6677 %}
6678 
6679 // Load Klass Pointer
6680 instruct loadKlass(iRegPNoSp dst, memory mem)
6681 %{
6682   match(Set dst (LoadKlass mem));
6683   predicate(!needs_acquiring_load(n));
6684 
6685   ins_cost(4 * INSN_COST);
6686   format %{ "ldr  $dst, $mem\t# class" %}
6687 
6688   ins_encode(aarch64_enc_ldr(dst, mem));
6689 
6690   ins_pipe(iload_reg_mem);
6691 %}
6692 
6693 // Load Narrow Klass Pointer
6694 instruct loadNKlass(iRegNNoSp dst, memory mem)
6695 %{
6696   match(Set dst (LoadNKlass mem));
6697   predicate(!needs_acquiring_load(n));
6698 
6699   ins_cost(4 * INSN_COST);
6700   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6701 
6702   ins_encode(aarch64_enc_ldrw(dst, mem));
6703 
6704   ins_pipe(iload_reg_mem);
6705 %}
6706 
6707 // Load Float
6708 instruct loadF(vRegF dst, memory mem)
6709 %{
6710   match(Set dst (LoadF mem));
6711   predicate(!needs_acquiring_load(n));
6712 
6713   ins_cost(4 * INSN_COST);
6714   format %{ "ldrs  $dst, $mem\t# float" %}
6715 
6716   ins_encode( aarch64_enc_ldrs(dst, mem) );
6717 
6718   ins_pipe(pipe_class_memory);
6719 %}
6720 
6721 // Load Double
6722 instruct loadD(vRegD dst, memory mem)
6723 %{
6724   match(Set dst (LoadD mem));
6725   predicate(!needs_acquiring_load(n));
6726 
6727   ins_cost(4 * INSN_COST);
6728   format %{ "ldrd  $dst, $mem\t# double" %}
6729 
6730   ins_encode( aarch64_enc_ldrd(dst, mem) );
6731 
6732   ins_pipe(pipe_class_memory);
6733 %}
6734 
6735 
6736 // Load Int Constant
6737 instruct loadConI(iRegINoSp dst, immI src)
6738 %{
6739   match(Set dst src);
6740 
6741   ins_cost(INSN_COST);
6742   format %{ "mov $dst, $src\t# int" %}
6743 
6744   ins_encode( aarch64_enc_movw_imm(dst, src) );
6745 
6746   ins_pipe(ialu_imm);
6747 %}
6748 
6749 // Load Long Constant
6750 instruct loadConL(iRegLNoSp dst, immL src)
6751 %{
6752   match(Set dst src);
6753 
6754   ins_cost(INSN_COST);
6755   format %{ "mov $dst, $src\t# long" %}
6756 
6757   ins_encode( aarch64_enc_mov_imm(dst, src) );
6758 
6759   ins_pipe(ialu_imm);
6760 %}
6761 
6762 // Load Pointer Constant
6763 
6764 instruct loadConP(iRegPNoSp dst, immP con)
6765 %{
6766   match(Set dst con);
6767 
6768   ins_cost(INSN_COST * 4);
6769   format %{
6770     "mov  $dst, $con\t# ptr\n\t"
6771   %}
6772 
6773   ins_encode(aarch64_enc_mov_p(dst, con));
6774 
6775   ins_pipe(ialu_imm);
6776 %}
6777 
6778 // Load Null Pointer Constant
6779 
6780 instruct loadConP0(iRegPNoSp dst, immP0 con)
6781 %{
6782   match(Set dst con);
6783 
6784   ins_cost(INSN_COST);
6785   format %{ "mov  $dst, $con\t# NULL ptr" %}
6786 
6787   ins_encode(aarch64_enc_mov_p0(dst, con));
6788 
6789   ins_pipe(ialu_imm);
6790 %}
6791 
6792 // Load Pointer Constant One
6793 
6794 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6795 %{
6796   match(Set dst con);
6797 
6798   ins_cost(INSN_COST);
6799   format %{ "mov  $dst, $con\t# NULL ptr" %}
6800 
6801   ins_encode(aarch64_enc_mov_p1(dst, con));
6802 
6803   ins_pipe(ialu_imm);
6804 %}
6805 
6806 // Load Poll Page Constant
6807 
6808 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6809 %{
6810   match(Set dst con);
6811 
6812   ins_cost(INSN_COST);
6813   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6814 
6815   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6816 
6817   ins_pipe(ialu_imm);
6818 %}
6819 
6820 // Load Byte Map Base Constant
6821 
6822 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6823 %{
6824   match(Set dst con);
6825 
6826   ins_cost(INSN_COST);
6827   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6828 
6829   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6830 
6831   ins_pipe(ialu_imm);
6832 %}
6833 
6834 // Load Narrow Pointer Constant
6835 
6836 instruct loadConN(iRegNNoSp dst, immN con)
6837 %{
6838   match(Set dst con);
6839 
6840   ins_cost(INSN_COST * 4);
6841   format %{ "mov  $dst, $con\t# compressed ptr" %}
6842 
6843   ins_encode(aarch64_enc_mov_n(dst, con));
6844 
6845   ins_pipe(ialu_imm);
6846 %}
6847 
6848 // Load Narrow Null Pointer Constant
6849 
6850 instruct loadConN0(iRegNNoSp dst, immN0 con)
6851 %{
6852   match(Set dst con);
6853 
6854   ins_cost(INSN_COST);
6855   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6856 
6857   ins_encode(aarch64_enc_mov_n0(dst, con));
6858 
6859   ins_pipe(ialu_imm);
6860 %}
6861 
6862 // Load Narrow Klass Constant
6863 
6864 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6865 %{
6866   match(Set dst con);
6867 
6868   ins_cost(INSN_COST);
6869   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6870 
6871   ins_encode(aarch64_enc_mov_nk(dst, con));
6872 
6873   ins_pipe(ialu_imm);
6874 %}
6875 
6876 // Load Packed Float Constant
6877 
6878 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6879   match(Set dst con);
6880   ins_cost(INSN_COST * 4);
6881   format %{ "fmovs  $dst, $con"%}
6882   ins_encode %{
6883     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6884   %}
6885 
6886   ins_pipe(fp_imm_s);
6887 %}
6888 
6889 // Load Float Constant
6890 
6891 instruct loadConF(vRegF dst, immF con) %{
6892   match(Set dst con);
6893 
6894   ins_cost(INSN_COST * 4);
6895 
6896   format %{
6897     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6898   %}
6899 
6900   ins_encode %{
6901     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6902   %}
6903 
6904   ins_pipe(fp_load_constant_s);
6905 %}
6906 
6907 // Load Packed Double Constant
6908 
6909 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6910   match(Set dst con);
6911   ins_cost(INSN_COST);
6912   format %{ "fmovd  $dst, $con"%}
6913   ins_encode %{
6914     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6915   %}
6916 
6917   ins_pipe(fp_imm_d);
6918 %}
6919 
6920 // Load Double Constant
6921 
6922 instruct loadConD(vRegD dst, immD con) %{
6923   match(Set dst con);
6924 
6925   ins_cost(INSN_COST * 5);
6926   format %{
6927     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6928   %}
6929 
6930   ins_encode %{
6931     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6932   %}
6933 
6934   ins_pipe(fp_load_constant_d);
6935 %}
6936 
6937 // Store Instructions
6938 
6939 // Store CMS card-mark Immediate
6940 instruct storeimmCM0(immI0 zero, memory mem)
6941 %{
6942   match(Set mem (StoreCM mem zero));
6943   predicate(unnecessary_storestore(n));
6944 
6945   ins_cost(INSN_COST);
6946   format %{ "storestore (elided)\n\t"
6947             "strb zr, $mem\t# byte" %}
6948 
6949   ins_encode(aarch64_enc_strb0(mem));
6950 
6951   ins_pipe(istore_mem);
6952 %}
6953 
6954 // Store CMS card-mark Immediate with intervening StoreStore
6955 // needed when using CMS with no conditional card marking
6956 instruct storeimmCM0_ordered(immI0 zero, memory mem)
6957 %{
6958   match(Set mem (StoreCM mem zero));
6959 
6960   ins_cost(INSN_COST * 2);
6961   format %{ "storestore\n\t"
6962             "dmb ishst"
6963             "\n\tstrb zr, $mem\t# byte" %}
6964 
6965   ins_encode(aarch64_enc_strb0_ordered(mem));
6966 
6967   ins_pipe(istore_mem);
6968 %}
6969 
6970 // Store Byte
6971 instruct storeB(iRegIorL2I src, memory mem)
6972 %{
6973   match(Set mem (StoreB mem src));
6974   predicate(!needs_releasing_store(n));
6975 
6976   ins_cost(INSN_COST);
6977   format %{ "strb  $src, $mem\t# byte" %}
6978 
6979   ins_encode(aarch64_enc_strb(src, mem));
6980 
6981   ins_pipe(istore_reg_mem);
6982 %}
6983 
6984 
6985 instruct storeimmB0(immI0 zero, memory mem)
6986 %{
6987   match(Set mem (StoreB mem zero));
6988   predicate(!needs_releasing_store(n));
6989 
6990   ins_cost(INSN_COST);
6991   format %{ "strb rscractch2, $mem\t# byte" %}
6992 
6993   ins_encode(aarch64_enc_strb0(mem));
6994 
6995   ins_pipe(istore_mem);
6996 %}
6997 
6998 // Store Char/Short
6999 instruct storeC(iRegIorL2I src, memory mem)
7000 %{
7001   match(Set mem (StoreC mem src));
7002   predicate(!needs_releasing_store(n));
7003 
7004   ins_cost(INSN_COST);
7005   format %{ "strh  $src, $mem\t# short" %}
7006 
7007   ins_encode(aarch64_enc_strh(src, mem));
7008 
7009   ins_pipe(istore_reg_mem);
7010 %}
7011 
7012 instruct storeimmC0(immI0 zero, memory mem)
7013 %{
7014   match(Set mem (StoreC mem zero));
7015   predicate(!needs_releasing_store(n));
7016 
7017   ins_cost(INSN_COST);
7018   format %{ "strh  zr, $mem\t# short" %}
7019 
7020   ins_encode(aarch64_enc_strh0(mem));
7021 
7022   ins_pipe(istore_mem);
7023 %}
7024 
7025 // Store Integer
7026 
7027 instruct storeI(iRegIorL2I src, memory mem)
7028 %{
7029   match(Set mem(StoreI mem src));
7030   predicate(!needs_releasing_store(n));
7031 
7032   ins_cost(INSN_COST);
7033   format %{ "strw  $src, $mem\t# int" %}
7034 
7035   ins_encode(aarch64_enc_strw(src, mem));
7036 
7037   ins_pipe(istore_reg_mem);
7038 %}
7039 
7040 instruct storeimmI0(immI0 zero, memory mem)
7041 %{
7042   match(Set mem(StoreI mem zero));
7043   predicate(!needs_releasing_store(n));
7044 
7045   ins_cost(INSN_COST);
7046   format %{ "strw  zr, $mem\t# int" %}
7047 
7048   ins_encode(aarch64_enc_strw0(mem));
7049 
7050   ins_pipe(istore_mem);
7051 %}
7052 
7053 // Store Long (64 bit signed)
7054 instruct storeL(iRegL src, memory mem)
7055 %{
7056   match(Set mem (StoreL mem src));
7057   predicate(!needs_releasing_store(n));
7058 
7059   ins_cost(INSN_COST);
7060   format %{ "str  $src, $mem\t# int" %}
7061 
7062   ins_encode(aarch64_enc_str(src, mem));
7063 
7064   ins_pipe(istore_reg_mem);
7065 %}
7066 
7067 // Store Long (64 bit signed)
7068 instruct storeimmL0(immL0 zero, memory mem)
7069 %{
7070   match(Set mem (StoreL mem zero));
7071   predicate(!needs_releasing_store(n));
7072 
7073   ins_cost(INSN_COST);
7074   format %{ "str  zr, $mem\t# int" %}
7075 
7076   ins_encode(aarch64_enc_str0(mem));
7077 
7078   ins_pipe(istore_mem);
7079 %}
7080 
7081 // Store Pointer
7082 instruct storeP(iRegP src, memory mem)
7083 %{
7084   match(Set mem (StoreP mem src));
7085   predicate(!needs_releasing_store(n));
7086 
7087   ins_cost(INSN_COST);
7088   format %{ "str  $src, $mem\t# ptr" %}
7089 
7090   ins_encode(aarch64_enc_str(src, mem));
7091 
7092   ins_pipe(istore_reg_mem);
7093 %}
7094 
7095 // Store Pointer
7096 instruct storeimmP0(immP0 zero, memory mem)
7097 %{
7098   match(Set mem (StoreP mem zero));
7099   predicate(!needs_releasing_store(n));
7100 
7101   ins_cost(INSN_COST);
7102   format %{ "str zr, $mem\t# ptr" %}
7103 
7104   ins_encode(aarch64_enc_str0(mem));
7105 
7106   ins_pipe(istore_mem);
7107 %}
7108 
7109 // Store Compressed Pointer
7110 instruct storeN(iRegN src, memory mem)
7111 %{
7112   match(Set mem (StoreN mem src));
7113   predicate(!needs_releasing_store(n));
7114 
7115   ins_cost(INSN_COST);
7116   format %{ "strw  $src, $mem\t# compressed ptr" %}
7117 
7118   ins_encode(aarch64_enc_strw(src, mem));
7119 
7120   ins_pipe(istore_reg_mem);
7121 %}
7122 
7123 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7124 %{
7125   match(Set mem (StoreN mem zero));
7126   predicate(Universe::narrow_oop_base() == NULL &&
7127             Universe::narrow_klass_base() == NULL &&
7128             (!needs_releasing_store(n)));
7129 
7130   ins_cost(INSN_COST);
7131   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7132 
7133   ins_encode(aarch64_enc_strw(heapbase, mem));
7134 
7135   ins_pipe(istore_reg_mem);
7136 %}
7137 
7138 // Store Float
7139 instruct storeF(vRegF src, memory mem)
7140 %{
7141   match(Set mem (StoreF mem src));
7142   predicate(!needs_releasing_store(n));
7143 
7144   ins_cost(INSN_COST);
7145   format %{ "strs  $src, $mem\t# float" %}
7146 
7147   ins_encode( aarch64_enc_strs(src, mem) );
7148 
7149   ins_pipe(pipe_class_memory);
7150 %}
7151 
7152 // TODO
7153 // implement storeImmF0 and storeFImmPacked
7154 
7155 // Store Double
7156 instruct storeD(vRegD src, memory mem)
7157 %{
7158   match(Set mem (StoreD mem src));
7159   predicate(!needs_releasing_store(n));
7160 
7161   ins_cost(INSN_COST);
7162   format %{ "strd  $src, $mem\t# double" %}
7163 
7164   ins_encode( aarch64_enc_strd(src, mem) );
7165 
7166   ins_pipe(pipe_class_memory);
7167 %}
7168 
7169 // Store Compressed Klass Pointer
7170 instruct storeNKlass(iRegN src, memory mem)
7171 %{
7172   predicate(!needs_releasing_store(n));
7173   match(Set mem (StoreNKlass mem src));
7174 
7175   ins_cost(INSN_COST);
7176   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7177 
7178   ins_encode(aarch64_enc_strw(src, mem));
7179 
7180   ins_pipe(istore_reg_mem);
7181 %}
7182 
7183 // TODO
7184 // implement storeImmD0 and storeDImmPacked
7185 
7186 // prefetch instructions
7187 // Must be safe to execute with invalid address (cannot fault).
7188 
7189 instruct prefetchalloc( memory mem ) %{
7190   match(PrefetchAllocation mem);
7191 
7192   ins_cost(INSN_COST);
7193   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7194 
7195   ins_encode( aarch64_enc_prefetchw(mem) );
7196 
7197   ins_pipe(iload_prefetch);
7198 %}
7199 
7200 //  ---------------- volatile loads and stores ----------------
7201 
7202 // Load Byte (8 bit signed)
7203 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7204 %{
7205   match(Set dst (LoadB mem));
7206 
7207   ins_cost(VOLATILE_REF_COST);
7208   format %{ "ldarsb  $dst, $mem\t# byte" %}
7209 
7210   ins_encode(aarch64_enc_ldarsb(dst, mem));
7211 
7212   ins_pipe(pipe_serial);
7213 %}
7214 
7215 // Load Byte (8 bit signed) into long
7216 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7217 %{
7218   match(Set dst (ConvI2L (LoadB mem)));
7219 
7220   ins_cost(VOLATILE_REF_COST);
7221   format %{ "ldarsb  $dst, $mem\t# byte" %}
7222 
7223   ins_encode(aarch64_enc_ldarsb(dst, mem));
7224 
7225   ins_pipe(pipe_serial);
7226 %}
7227 
7228 // Load Byte (8 bit unsigned)
7229 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7230 %{
7231   match(Set dst (LoadUB mem));
7232 
7233   ins_cost(VOLATILE_REF_COST);
7234   format %{ "ldarb  $dst, $mem\t# byte" %}
7235 
7236   ins_encode(aarch64_enc_ldarb(dst, mem));
7237 
7238   ins_pipe(pipe_serial);
7239 %}
7240 
7241 // Load Byte (8 bit unsigned) into long
7242 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7243 %{
7244   match(Set dst (ConvI2L (LoadUB mem)));
7245 
7246   ins_cost(VOLATILE_REF_COST);
7247   format %{ "ldarb  $dst, $mem\t# byte" %}
7248 
7249   ins_encode(aarch64_enc_ldarb(dst, mem));
7250 
7251   ins_pipe(pipe_serial);
7252 %}
7253 
7254 // Load Short (16 bit signed)
7255 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7256 %{
7257   match(Set dst (LoadS mem));
7258 
7259   ins_cost(VOLATILE_REF_COST);
7260   format %{ "ldarshw  $dst, $mem\t# short" %}
7261 
7262   ins_encode(aarch64_enc_ldarshw(dst, mem));
7263 
7264   ins_pipe(pipe_serial);
7265 %}
7266 
7267 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7268 %{
7269   match(Set dst (LoadUS mem));
7270 
7271   ins_cost(VOLATILE_REF_COST);
7272   format %{ "ldarhw  $dst, $mem\t# short" %}
7273 
7274   ins_encode(aarch64_enc_ldarhw(dst, mem));
7275 
7276   ins_pipe(pipe_serial);
7277 %}
7278 
7279 // Load Short/Char (16 bit unsigned) into long
7280 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7281 %{
7282   match(Set dst (ConvI2L (LoadUS mem)));
7283 
7284   ins_cost(VOLATILE_REF_COST);
7285   format %{ "ldarh  $dst, $mem\t# short" %}
7286 
7287   ins_encode(aarch64_enc_ldarh(dst, mem));
7288 
7289   ins_pipe(pipe_serial);
7290 %}
7291 
7292 // Load Short/Char (16 bit signed) into long
7293 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7294 %{
7295   match(Set dst (ConvI2L (LoadS mem)));
7296 
7297   ins_cost(VOLATILE_REF_COST);
7298   format %{ "ldarh  $dst, $mem\t# short" %}
7299 
7300   ins_encode(aarch64_enc_ldarsh(dst, mem));
7301 
7302   ins_pipe(pipe_serial);
7303 %}
7304 
7305 // Load Integer (32 bit signed)
7306 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7307 %{
7308   match(Set dst (LoadI mem));
7309 
7310   ins_cost(VOLATILE_REF_COST);
7311   format %{ "ldarw  $dst, $mem\t# int" %}
7312 
7313   ins_encode(aarch64_enc_ldarw(dst, mem));
7314 
7315   ins_pipe(pipe_serial);
7316 %}
7317 
7318 // Load Integer (32 bit unsigned) into long
7319 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7320 %{
7321   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7322 
7323   ins_cost(VOLATILE_REF_COST);
7324   format %{ "ldarw  $dst, $mem\t# int" %}
7325 
7326   ins_encode(aarch64_enc_ldarw(dst, mem));
7327 
7328   ins_pipe(pipe_serial);
7329 %}
7330 
7331 // Load Long (64 bit signed)
7332 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7333 %{
7334   match(Set dst (LoadL mem));
7335 
7336   ins_cost(VOLATILE_REF_COST);
7337   format %{ "ldar  $dst, $mem\t# int" %}
7338 
7339   ins_encode(aarch64_enc_ldar(dst, mem));
7340 
7341   ins_pipe(pipe_serial);
7342 %}
7343 
7344 // Load Pointer
7345 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7346 %{
7347   match(Set dst (LoadP mem));
7348 
7349   ins_cost(VOLATILE_REF_COST);
7350   format %{ "ldar  $dst, $mem\t# ptr" %}
7351 
7352   ins_encode(aarch64_enc_ldar(dst, mem));
7353 
7354   ins_pipe(pipe_serial);
7355 %}
7356 
7357 // Load Compressed Pointer
7358 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7359 %{
7360   match(Set dst (LoadN mem));
7361 
7362   ins_cost(VOLATILE_REF_COST);
7363   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7364 
7365   ins_encode(aarch64_enc_ldarw(dst, mem));
7366 
7367   ins_pipe(pipe_serial);
7368 %}
7369 
7370 // Load Float
7371 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7372 %{
7373   match(Set dst (LoadF mem));
7374 
7375   ins_cost(VOLATILE_REF_COST);
7376   format %{ "ldars  $dst, $mem\t# float" %}
7377 
7378   ins_encode( aarch64_enc_fldars(dst, mem) );
7379 
7380   ins_pipe(pipe_serial);
7381 %}
7382 
7383 // Load Double
7384 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7385 %{
7386   match(Set dst (LoadD mem));
7387 
7388   ins_cost(VOLATILE_REF_COST);
7389   format %{ "ldard  $dst, $mem\t# double" %}
7390 
7391   ins_encode( aarch64_enc_fldard(dst, mem) );
7392 
7393   ins_pipe(pipe_serial);
7394 %}
7395 
7396 // Store Byte
7397 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7398 %{
7399   match(Set mem (StoreB mem src));
7400 
7401   ins_cost(VOLATILE_REF_COST);
7402   format %{ "stlrb  $src, $mem\t# byte" %}
7403 
7404   ins_encode(aarch64_enc_stlrb(src, mem));
7405 
7406   ins_pipe(pipe_class_memory);
7407 %}
7408 
7409 // Store Char/Short
7410 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7411 %{
7412   match(Set mem (StoreC mem src));
7413 
7414   ins_cost(VOLATILE_REF_COST);
7415   format %{ "stlrh  $src, $mem\t# short" %}
7416 
7417   ins_encode(aarch64_enc_stlrh(src, mem));
7418 
7419   ins_pipe(pipe_class_memory);
7420 %}
7421 
7422 // Store Integer
7423 
7424 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7425 %{
7426   match(Set mem(StoreI mem src));
7427 
7428   ins_cost(VOLATILE_REF_COST);
7429   format %{ "stlrw  $src, $mem\t# int" %}
7430 
7431   ins_encode(aarch64_enc_stlrw(src, mem));
7432 
7433   ins_pipe(pipe_class_memory);
7434 %}
7435 
7436 // Store Long (64 bit signed)
7437 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7438 %{
7439   match(Set mem (StoreL mem src));
7440 
7441   ins_cost(VOLATILE_REF_COST);
7442   format %{ "stlr  $src, $mem\t# int" %}
7443 
7444   ins_encode(aarch64_enc_stlr(src, mem));
7445 
7446   ins_pipe(pipe_class_memory);
7447 %}
7448 
7449 // Store Pointer
7450 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7451 %{
7452   match(Set mem (StoreP mem src));
7453 
7454   ins_cost(VOLATILE_REF_COST);
7455   format %{ "stlr  $src, $mem\t# ptr" %}
7456 
7457   ins_encode(aarch64_enc_stlr(src, mem));
7458 
7459   ins_pipe(pipe_class_memory);
7460 %}
7461 
7462 // Store Compressed Pointer
7463 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7464 %{
7465   match(Set mem (StoreN mem src));
7466 
7467   ins_cost(VOLATILE_REF_COST);
7468   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7469 
7470   ins_encode(aarch64_enc_stlrw(src, mem));
7471 
7472   ins_pipe(pipe_class_memory);
7473 %}
7474 
7475 // Store Float
7476 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7477 %{
7478   match(Set mem (StoreF mem src));
7479 
7480   ins_cost(VOLATILE_REF_COST);
7481   format %{ "stlrs  $src, $mem\t# float" %}
7482 
7483   ins_encode( aarch64_enc_fstlrs(src, mem) );
7484 
7485   ins_pipe(pipe_class_memory);
7486 %}
7487 
7488 // TODO
7489 // implement storeImmF0 and storeFImmPacked
7490 
7491 // Store Double
7492 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7493 %{
7494   match(Set mem (StoreD mem src));
7495 
7496   ins_cost(VOLATILE_REF_COST);
7497   format %{ "stlrd  $src, $mem\t# double" %}
7498 
7499   ins_encode( aarch64_enc_fstlrd(src, mem) );
7500 
7501   ins_pipe(pipe_class_memory);
7502 %}
7503 
7504 //  ---------------- end of volatile loads and stores ----------------
7505 
7506 // ============================================================================
7507 // BSWAP Instructions
7508 
7509 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7510   match(Set dst (ReverseBytesI src));
7511 
7512   ins_cost(INSN_COST);
7513   format %{ "revw  $dst, $src" %}
7514 
7515   ins_encode %{
7516     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7517   %}
7518 
7519   ins_pipe(ialu_reg);
7520 %}
7521 
7522 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7523   match(Set dst (ReverseBytesL src));
7524 
7525   ins_cost(INSN_COST);
7526   format %{ "rev  $dst, $src" %}
7527 
7528   ins_encode %{
7529     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7530   %}
7531 
7532   ins_pipe(ialu_reg);
7533 %}
7534 
7535 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7536   match(Set dst (ReverseBytesUS src));
7537 
7538   ins_cost(INSN_COST);
7539   format %{ "rev16w  $dst, $src" %}
7540 
7541   ins_encode %{
7542     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7543   %}
7544 
7545   ins_pipe(ialu_reg);
7546 %}
7547 
7548 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7549   match(Set dst (ReverseBytesS src));
7550 
7551   ins_cost(INSN_COST);
7552   format %{ "rev16w  $dst, $src\n\t"
7553             "sbfmw $dst, $dst, #0, #15" %}
7554 
7555   ins_encode %{
7556     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7557     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7558   %}
7559 
7560   ins_pipe(ialu_reg);
7561 %}
7562 
7563 // ============================================================================
7564 // Zero Count Instructions
7565 
7566 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7567   match(Set dst (CountLeadingZerosI src));
7568 
7569   ins_cost(INSN_COST);
7570   format %{ "clzw  $dst, $src" %}
7571   ins_encode %{
7572     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7573   %}
7574 
7575   ins_pipe(ialu_reg);
7576 %}
7577 
7578 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7579   match(Set dst (CountLeadingZerosL src));
7580 
7581   ins_cost(INSN_COST);
7582   format %{ "clz   $dst, $src" %}
7583   ins_encode %{
7584     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7585   %}
7586 
7587   ins_pipe(ialu_reg);
7588 %}
7589 
7590 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7591   match(Set dst (CountTrailingZerosI src));
7592 
7593   ins_cost(INSN_COST * 2);
7594   format %{ "rbitw  $dst, $src\n\t"
7595             "clzw   $dst, $dst" %}
7596   ins_encode %{
7597     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7598     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7599   %}
7600 
7601   ins_pipe(ialu_reg);
7602 %}
7603 
7604 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7605   match(Set dst (CountTrailingZerosL src));
7606 
7607   ins_cost(INSN_COST * 2);
7608   format %{ "rbit   $dst, $src\n\t"
7609             "clz    $dst, $dst" %}
7610   ins_encode %{
7611     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7612     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7613   %}
7614 
7615   ins_pipe(ialu_reg);
7616 %}
7617 
7618 //---------- Population Count Instructions -------------------------------------
7619 //
7620 
7621 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7622   predicate(UsePopCountInstruction);
7623   match(Set dst (PopCountI src));
7624   effect(TEMP tmp);
7625   ins_cost(INSN_COST * 13);
7626 
7627   format %{ "movw   $src, $src\n\t"
7628             "mov    $tmp, $src\t# vector (1D)\n\t"
7629             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7630             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7631             "mov    $dst, $tmp\t# vector (1D)" %}
7632   ins_encode %{
7633     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7634     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7635     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7636     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7637     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7638   %}
7639 
7640   ins_pipe(pipe_class_default);
7641 %}
7642 
7643 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7644   predicate(UsePopCountInstruction);
7645   match(Set dst (PopCountI (LoadI mem)));
7646   effect(TEMP tmp);
7647   ins_cost(INSN_COST * 13);
7648 
7649   format %{ "ldrs   $tmp, $mem\n\t"
7650             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7651             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7652             "mov    $dst, $tmp\t# vector (1D)" %}
7653   ins_encode %{
7654     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7655     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7656                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7657     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7658     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7659     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7660   %}
7661 
7662   ins_pipe(pipe_class_default);
7663 %}
7664 
7665 // Note: Long.bitCount(long) returns an int.
7666 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7667   predicate(UsePopCountInstruction);
7668   match(Set dst (PopCountL src));
7669   effect(TEMP tmp);
7670   ins_cost(INSN_COST * 13);
7671 
7672   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7673             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7674             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7675             "mov    $dst, $tmp\t# vector (1D)" %}
7676   ins_encode %{
7677     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7678     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7679     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7680     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7681   %}
7682 
7683   ins_pipe(pipe_class_default);
7684 %}
7685 
7686 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7687   predicate(UsePopCountInstruction);
7688   match(Set dst (PopCountL (LoadL mem)));
7689   effect(TEMP tmp);
7690   ins_cost(INSN_COST * 13);
7691 
7692   format %{ "ldrd   $tmp, $mem\n\t"
7693             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7694             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7695             "mov    $dst, $tmp\t# vector (1D)" %}
7696   ins_encode %{
7697     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7698     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7699                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7700     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7701     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7702     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7703   %}
7704 
7705   ins_pipe(pipe_class_default);
7706 %}
7707 
7708 // ============================================================================
7709 // MemBar Instruction
7710 
7711 instruct load_fence() %{
7712   match(LoadFence);
7713   ins_cost(VOLATILE_REF_COST);
7714 
7715   format %{ "load_fence" %}
7716 
7717   ins_encode %{
7718     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7719   %}
7720   ins_pipe(pipe_serial);
7721 %}
7722 
7723 instruct unnecessary_membar_acquire() %{
7724   predicate(unnecessary_acquire(n));
7725   match(MemBarAcquire);
7726   ins_cost(0);
7727 
7728   format %{ "membar_acquire (elided)" %}
7729 
7730   ins_encode %{
7731     __ block_comment("membar_acquire (elided)");
7732   %}
7733 
7734   ins_pipe(pipe_class_empty);
7735 %}
7736 
7737 instruct membar_acquire() %{
7738   match(MemBarAcquire);
7739   ins_cost(VOLATILE_REF_COST);
7740 
7741   format %{ "membar_acquire\n\t"
7742             "dmb ish" %}
7743 
7744   ins_encode %{
7745     __ block_comment("membar_acquire");
7746     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7747   %}
7748 
7749   ins_pipe(pipe_serial);
7750 %}
7751 
7752 
7753 instruct membar_acquire_lock() %{
7754   match(MemBarAcquireLock);
7755   ins_cost(VOLATILE_REF_COST);
7756 
7757   format %{ "membar_acquire_lock (elided)" %}
7758 
7759   ins_encode %{
7760     __ block_comment("membar_acquire_lock (elided)");
7761   %}
7762 
7763   ins_pipe(pipe_serial);
7764 %}
7765 
7766 instruct store_fence() %{
7767   match(StoreFence);
7768   ins_cost(VOLATILE_REF_COST);
7769 
7770   format %{ "store_fence" %}
7771 
7772   ins_encode %{
7773     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7774   %}
7775   ins_pipe(pipe_serial);
7776 %}
7777 
7778 instruct unnecessary_membar_release() %{
7779   predicate(unnecessary_release(n));
7780   match(MemBarRelease);
7781   ins_cost(0);
7782 
7783   format %{ "membar_release (elided)" %}
7784 
7785   ins_encode %{
7786     __ block_comment("membar_release (elided)");
7787   %}
7788   ins_pipe(pipe_serial);
7789 %}
7790 
7791 instruct membar_release() %{
7792   match(MemBarRelease);
7793   ins_cost(VOLATILE_REF_COST);
7794 
7795   format %{ "membar_release\n\t"
7796             "dmb ish" %}
7797 
7798   ins_encode %{
7799     __ block_comment("membar_release");
7800     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7801   %}
7802   ins_pipe(pipe_serial);
7803 %}
7804 
7805 instruct membar_storestore() %{
7806   match(MemBarStoreStore);
7807   ins_cost(VOLATILE_REF_COST);
7808 
7809   format %{ "MEMBAR-store-store" %}
7810 
7811   ins_encode %{
7812     __ membar(Assembler::StoreStore);
7813   %}
7814   ins_pipe(pipe_serial);
7815 %}
7816 
7817 instruct membar_release_lock() %{
7818   match(MemBarReleaseLock);
7819   ins_cost(VOLATILE_REF_COST);
7820 
7821   format %{ "membar_release_lock (elided)" %}
7822 
7823   ins_encode %{
7824     __ block_comment("membar_release_lock (elided)");
7825   %}
7826 
7827   ins_pipe(pipe_serial);
7828 %}
7829 
7830 instruct unnecessary_membar_volatile() %{
7831   predicate(unnecessary_volatile(n));
7832   match(MemBarVolatile);
7833   ins_cost(0);
7834 
7835   format %{ "membar_volatile (elided)" %}
7836 
7837   ins_encode %{
7838     __ block_comment("membar_volatile (elided)");
7839   %}
7840 
7841   ins_pipe(pipe_serial);
7842 %}
7843 
7844 instruct membar_volatile() %{
7845   match(MemBarVolatile);
7846   ins_cost(VOLATILE_REF_COST*100);
7847 
7848   format %{ "membar_volatile\n\t"
7849              "dmb ish"%}
7850 
7851   ins_encode %{
7852     __ block_comment("membar_volatile");
7853     __ membar(Assembler::StoreLoad);
7854   %}
7855 
7856   ins_pipe(pipe_serial);
7857 %}
7858 
7859 // ============================================================================
7860 // Cast/Convert Instructions
7861 
7862 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7863   match(Set dst (CastX2P src));
7864 
7865   ins_cost(INSN_COST);
7866   format %{ "mov $dst, $src\t# long -> ptr" %}
7867 
7868   ins_encode %{
7869     if ($dst$$reg != $src$$reg) {
7870       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7871     }
7872   %}
7873 
7874   ins_pipe(ialu_reg);
7875 %}
7876 
7877 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7878   match(Set dst (CastP2X src));
7879 
7880   ins_cost(INSN_COST);
7881   format %{ "mov $dst, $src\t# ptr -> long" %}
7882 
7883   ins_encode %{
7884     if ($dst$$reg != $src$$reg) {
7885       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7886     }
7887   %}
7888 
7889   ins_pipe(ialu_reg);
7890 %}
7891 
7892 // Convert oop into int for vectors alignment masking
7893 instruct convP2I(iRegINoSp dst, iRegP src) %{
7894   match(Set dst (ConvL2I (CastP2X src)));
7895 
7896   ins_cost(INSN_COST);
7897   format %{ "movw $dst, $src\t# ptr -> int" %}
7898   ins_encode %{
7899     __ movw($dst$$Register, $src$$Register);
7900   %}
7901 
7902   ins_pipe(ialu_reg);
7903 %}
7904 
7905 // Convert compressed oop into int for vectors alignment masking
7906 // in case of 32bit oops (heap < 4Gb).
7907 instruct convN2I(iRegINoSp dst, iRegN src)
7908 %{
7909   predicate(Universe::narrow_oop_shift() == 0);
7910   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7911 
7912   ins_cost(INSN_COST);
7913   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7914   ins_encode %{
7915     __ movw($dst$$Register, $src$$Register);
7916   %}
7917 
7918   ins_pipe(ialu_reg);
7919 %}
7920 
7921 
7922 // Convert oop pointer into compressed form
7923 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7924   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7925   match(Set dst (EncodeP src));
7926   effect(KILL cr);
7927   ins_cost(INSN_COST * 3);
7928   format %{ "encode_heap_oop $dst, $src" %}
7929   ins_encode %{
7930     Register s = $src$$Register;
7931     Register d = $dst$$Register;
7932     __ encode_heap_oop(d, s);
7933   %}
7934   ins_pipe(ialu_reg);
7935 %}
7936 
7937 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7938   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7939   match(Set dst (EncodeP src));
7940   ins_cost(INSN_COST * 3);
7941   format %{ "encode_heap_oop_not_null $dst, $src" %}
7942   ins_encode %{
7943     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7944   %}
7945   ins_pipe(ialu_reg);
7946 %}
7947 
7948 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7949   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7950             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7951   match(Set dst (DecodeN src));
7952   ins_cost(INSN_COST * 3);
7953   format %{ "decode_heap_oop $dst, $src" %}
7954   ins_encode %{
7955     Register s = $src$$Register;
7956     Register d = $dst$$Register;
7957     __ decode_heap_oop(d, s);
7958   %}
7959   ins_pipe(ialu_reg);
7960 %}
7961 
7962 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7963   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7964             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7965   match(Set dst (DecodeN src));
7966   ins_cost(INSN_COST * 3);
7967   format %{ "decode_heap_oop_not_null $dst, $src" %}
7968   ins_encode %{
7969     Register s = $src$$Register;
7970     Register d = $dst$$Register;
7971     __ decode_heap_oop_not_null(d, s);
7972   %}
7973   ins_pipe(ialu_reg);
7974 %}
7975 
7976 // n.b. AArch64 implementations of encode_klass_not_null and
7977 // decode_klass_not_null do not modify the flags register so, unlike
7978 // Intel, we don't kill CR as a side effect here
7979 
7980 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7981   match(Set dst (EncodePKlass src));
7982 
7983   ins_cost(INSN_COST * 3);
7984   format %{ "encode_klass_not_null $dst,$src" %}
7985 
7986   ins_encode %{
7987     Register src_reg = as_Register($src$$reg);
7988     Register dst_reg = as_Register($dst$$reg);
7989     __ encode_klass_not_null(dst_reg, src_reg);
7990   %}
7991 
7992    ins_pipe(ialu_reg);
7993 %}
7994 
7995 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
7996   match(Set dst (DecodeNKlass src));
7997 
7998   ins_cost(INSN_COST * 3);
7999   format %{ "decode_klass_not_null $dst,$src" %}
8000 
8001   ins_encode %{
8002     Register src_reg = as_Register($src$$reg);
8003     Register dst_reg = as_Register($dst$$reg);
8004     if (dst_reg != src_reg) {
8005       __ decode_klass_not_null(dst_reg, src_reg);
8006     } else {
8007       __ decode_klass_not_null(dst_reg);
8008     }
8009   %}
8010 
8011    ins_pipe(ialu_reg);
8012 %}
8013 
8014 instruct checkCastPP(iRegPNoSp dst)
8015 %{
8016   match(Set dst (CheckCastPP dst));
8017 
8018   size(0);
8019   format %{ "# checkcastPP of $dst" %}
8020   ins_encode(/* empty encoding */);
8021   ins_pipe(pipe_class_empty);
8022 %}
8023 
8024 instruct castPP(iRegPNoSp dst)
8025 %{
8026   match(Set dst (CastPP dst));
8027 
8028   size(0);
8029   format %{ "# castPP of $dst" %}
8030   ins_encode(/* empty encoding */);
8031   ins_pipe(pipe_class_empty);
8032 %}
8033 
8034 instruct castII(iRegI dst)
8035 %{
8036   match(Set dst (CastII dst));
8037 
8038   size(0);
8039   format %{ "# castII of $dst" %}
8040   ins_encode(/* empty encoding */);
8041   ins_cost(0);
8042   ins_pipe(pipe_class_empty);
8043 %}
8044 
8045 // ============================================================================
8046 // Atomic operation instructions
8047 //
8048 // Intel and SPARC both implement Ideal Node LoadPLocked and
8049 // Store{PIL}Conditional instructions using a normal load for the
8050 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8051 //
8052 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8053 // pair to lock object allocations from Eden space when not using
8054 // TLABs.
8055 //
8056 // There does not appear to be a Load{IL}Locked Ideal Node and the
8057 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8058 // and to use StoreIConditional only for 32-bit and StoreLConditional
8059 // only for 64-bit.
8060 //
8061 // We implement LoadPLocked and StorePLocked instructions using,
8062 // respectively the AArch64 hw load-exclusive and store-conditional
8063 // instructions. Whereas we must implement each of
8064 // Store{IL}Conditional using a CAS which employs a pair of
8065 // instructions comprising a load-exclusive followed by a
8066 // store-conditional.
8067 
8068 
8069 // Locked-load (linked load) of the current heap-top
8070 // used when updating the eden heap top
8071 // implemented using ldaxr on AArch64
8072 
8073 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8074 %{
8075   match(Set dst (LoadPLocked mem));
8076 
8077   ins_cost(VOLATILE_REF_COST);
8078 
8079   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8080 
8081   ins_encode(aarch64_enc_ldaxr(dst, mem));
8082 
8083   ins_pipe(pipe_serial);
8084 %}
8085 
8086 // Conditional-store of the updated heap-top.
8087 // Used during allocation of the shared heap.
8088 // Sets flag (EQ) on success.
8089 // implemented using stlxr on AArch64.
8090 
8091 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8092 %{
8093   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8094 
8095   ins_cost(VOLATILE_REF_COST);
8096 
8097  // TODO
8098  // do we need to do a store-conditional release or can we just use a
8099  // plain store-conditional?
8100 
8101   format %{
8102     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8103     "cmpw rscratch1, zr\t# EQ on successful write"
8104   %}
8105 
8106   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8107 
8108   ins_pipe(pipe_serial);
8109 %}
8110 
8111 
8112 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8113 // when attempting to rebias a lock towards the current thread.  We
8114 // must use the acquire form of cmpxchg in order to guarantee acquire
8115 // semantics in this case.
8116 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8117 %{
8118   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8119 
8120   ins_cost(VOLATILE_REF_COST);
8121 
8122   format %{
8123     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8124     "cmpw rscratch1, zr\t# EQ on successful write"
8125   %}
8126 
8127   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8128 
8129   ins_pipe(pipe_slow);
8130 %}
8131 
8132 // storeIConditional also has acquire semantics, for no better reason
8133 // than matching storeLConditional.  At the time of writing this
8134 // comment storeIConditional was not used anywhere by AArch64.
8135 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8136 %{
8137   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8138 
8139   ins_cost(VOLATILE_REF_COST);
8140 
8141   format %{
8142     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8143     "cmpw rscratch1, zr\t# EQ on successful write"
8144   %}
8145 
8146   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8147 
8148   ins_pipe(pipe_slow);
8149 %}
8150 
8151 // standard CompareAndSwapX when we are using barriers
8152 // these have higher priority than the rules selected by a predicate
8153 
8154 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8155 // can't match them
8156 
8157 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8158 
8159   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8160   ins_cost(2 * VOLATILE_REF_COST);
8161 
8162   effect(KILL cr);
8163 
8164   format %{
8165     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8166     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8167   %}
8168 
8169   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8170             aarch64_enc_cset_eq(res));
8171 
8172   ins_pipe(pipe_slow);
8173 %}
8174 
8175 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8176 
8177   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8178   ins_cost(2 * VOLATILE_REF_COST);
8179 
8180   effect(KILL cr);
8181 
8182   format %{
8183     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8184     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8185   %}
8186 
8187   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8188             aarch64_enc_cset_eq(res));
8189 
8190   ins_pipe(pipe_slow);
8191 %}
8192 
8193 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8194 
8195   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8196   ins_cost(2 * VOLATILE_REF_COST);
8197 
8198   effect(KILL cr);
8199 
8200  format %{
8201     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8202     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8203  %}
8204 
8205  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8206             aarch64_enc_cset_eq(res));
8207 
8208   ins_pipe(pipe_slow);
8209 %}
8210 
8211 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8212 
8213   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8214   ins_cost(2 * VOLATILE_REF_COST);
8215 
8216   effect(KILL cr);
8217 
8218  format %{
8219     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8220     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8221  %}
8222 
8223  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8224             aarch64_enc_cset_eq(res));
8225 
8226   ins_pipe(pipe_slow);
8227 %}
8228 
8229 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8230 
8231   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8232   ins_cost(2 * VOLATILE_REF_COST);
8233 
8234   effect(KILL cr);
8235 
8236  format %{
8237     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8238     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8239  %}
8240 
8241  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8242             aarch64_enc_cset_eq(res));
8243 
8244   ins_pipe(pipe_slow);
8245 %}
8246 
8247 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8248 
8249   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8250   ins_cost(2 * VOLATILE_REF_COST);
8251 
8252   effect(KILL cr);
8253 
8254  format %{
8255     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8256     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8257  %}
8258 
8259  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8260             aarch64_enc_cset_eq(res));
8261 
8262   ins_pipe(pipe_slow);
8263 %}
8264 
8265 // alternative CompareAndSwapX when we are eliding barriers
8266 
8267 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8268 
8269   predicate(needs_acquiring_load_exclusive(n));
8270   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8271   ins_cost(VOLATILE_REF_COST);
8272 
8273   effect(KILL cr);
8274 
8275   format %{
8276     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8277     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8278   %}
8279 
8280   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8281             aarch64_enc_cset_eq(res));
8282 
8283   ins_pipe(pipe_slow);
8284 %}
8285 
8286 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8287 
8288   predicate(needs_acquiring_load_exclusive(n));
8289   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8290   ins_cost(VOLATILE_REF_COST);
8291 
8292   effect(KILL cr);
8293 
8294   format %{
8295     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8296     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8297   %}
8298 
8299   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8300             aarch64_enc_cset_eq(res));
8301 
8302   ins_pipe(pipe_slow);
8303 %}
8304 
8305 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8306 
8307   predicate(needs_acquiring_load_exclusive(n));
8308   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8309   ins_cost(VOLATILE_REF_COST);
8310 
8311   effect(KILL cr);
8312 
8313  format %{
8314     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8315     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8316  %}
8317 
8318  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8319             aarch64_enc_cset_eq(res));
8320 
8321   ins_pipe(pipe_slow);
8322 %}
8323 
8324 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8325 
8326   predicate(needs_acquiring_load_exclusive(n));
8327   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8328   ins_cost(VOLATILE_REF_COST);
8329 
8330   effect(KILL cr);
8331 
8332  format %{
8333     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8334     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8335  %}
8336 
8337  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8338             aarch64_enc_cset_eq(res));
8339 
8340   ins_pipe(pipe_slow);
8341 %}
8342 
8343 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8344 
8345   predicate(needs_acquiring_load_exclusive(n));
8346   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8347   ins_cost(VOLATILE_REF_COST);
8348 
8349   effect(KILL cr);
8350 
8351  format %{
8352     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8353     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8354  %}
8355 
8356  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8357             aarch64_enc_cset_eq(res));
8358 
8359   ins_pipe(pipe_slow);
8360 %}
8361 
8362 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8363 
8364   predicate(needs_acquiring_load_exclusive(n));
8365   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8366   ins_cost(VOLATILE_REF_COST);
8367 
8368   effect(KILL cr);
8369 
8370  format %{
8371     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8372     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8373  %}
8374 
8375  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8376             aarch64_enc_cset_eq(res));
8377 
8378   ins_pipe(pipe_slow);
8379 %}
8380 
8381 
8382 // ---------------------------------------------------------------------
8383 
8384 
8385 // BEGIN This section of the file is automatically generated. Do not edit --------------
8386 
8387 // Sundry CAS operations.  Note that release is always true,
8388 // regardless of the memory ordering of the CAS.  This is because we
8389 // need the volatile case to be sequentially consistent but there is
8390 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8391 // can't check the type of memory ordering here, so we always emit a
8392 // STLXR.
8393 
8394 // This section is generated from aarch64_ad_cas.m4
8395 
8396 
8397 
8398 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8399   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8400   ins_cost(2 * VOLATILE_REF_COST);
8401   effect(TEMP_DEF res, KILL cr);
8402   format %{
8403     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8404   %}
8405   ins_encode %{
8406     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8407                Assembler::byte, /*acquire*/ false, /*release*/ true,
8408                /*weak*/ false, $res$$Register);
8409     __ sxtbw($res$$Register, $res$$Register);
8410   %}
8411   ins_pipe(pipe_slow);
8412 %}
8413 
8414 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8415   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8416   ins_cost(2 * VOLATILE_REF_COST);
8417   effect(TEMP_DEF res, KILL cr);
8418   format %{
8419     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8420   %}
8421   ins_encode %{
8422     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8423                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8424                /*weak*/ false, $res$$Register);
8425     __ sxthw($res$$Register, $res$$Register);
8426   %}
8427   ins_pipe(pipe_slow);
8428 %}
8429 
8430 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8431   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8432   ins_cost(2 * VOLATILE_REF_COST);
8433   effect(TEMP_DEF res, KILL cr);
8434   format %{
8435     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8436   %}
8437   ins_encode %{
8438     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8439                Assembler::word, /*acquire*/ false, /*release*/ true,
8440                /*weak*/ false, $res$$Register);
8441   %}
8442   ins_pipe(pipe_slow);
8443 %}
8444 
8445 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8446   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8447   ins_cost(2 * VOLATILE_REF_COST);
8448   effect(TEMP_DEF res, KILL cr);
8449   format %{
8450     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8451   %}
8452   ins_encode %{
8453     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8454                Assembler::xword, /*acquire*/ false, /*release*/ true,
8455                /*weak*/ false, $res$$Register);
8456   %}
8457   ins_pipe(pipe_slow);
8458 %}
8459 
8460 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8461   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8462   ins_cost(2 * VOLATILE_REF_COST);
8463   effect(TEMP_DEF res, KILL cr);
8464   format %{
8465     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8466   %}
8467   ins_encode %{
8468     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8469                Assembler::word, /*acquire*/ false, /*release*/ true,
8470                /*weak*/ false, $res$$Register);
8471   %}
8472   ins_pipe(pipe_slow);
8473 %}
8474 
8475 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8476   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8477   ins_cost(2 * VOLATILE_REF_COST);
8478   effect(TEMP_DEF res, KILL cr);
8479   format %{
8480     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8481   %}
8482   ins_encode %{
8483     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8484                Assembler::xword, /*acquire*/ false, /*release*/ true,
8485                /*weak*/ false, $res$$Register);
8486   %}
8487   ins_pipe(pipe_slow);
8488 %}
8489 
8490 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8491   predicate(needs_acquiring_load_exclusive(n));
8492   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8493   ins_cost(VOLATILE_REF_COST);
8494   effect(TEMP_DEF res, KILL cr);
8495   format %{
8496     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8497   %}
8498   ins_encode %{
8499     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8500                Assembler::byte, /*acquire*/ true, /*release*/ true,
8501                /*weak*/ false, $res$$Register);
8502     __ sxtbw($res$$Register, $res$$Register);
8503   %}
8504   ins_pipe(pipe_slow);
8505 %}
8506 
8507 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8508   predicate(needs_acquiring_load_exclusive(n));
8509   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8510   ins_cost(VOLATILE_REF_COST);
8511   effect(TEMP_DEF res, KILL cr);
8512   format %{
8513     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8514   %}
8515   ins_encode %{
8516     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8517                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8518                /*weak*/ false, $res$$Register);
8519     __ sxthw($res$$Register, $res$$Register);
8520   %}
8521   ins_pipe(pipe_slow);
8522 %}
8523 
8524 
8525 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8526   predicate(needs_acquiring_load_exclusive(n));
8527   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8528   ins_cost(VOLATILE_REF_COST);
8529   effect(TEMP_DEF res, KILL cr);
8530   format %{
8531     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8532   %}
8533   ins_encode %{
8534     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8535                Assembler::word, /*acquire*/ true, /*release*/ true,
8536                /*weak*/ false, $res$$Register);
8537   %}
8538   ins_pipe(pipe_slow);
8539 %}
8540 
8541 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8542   predicate(needs_acquiring_load_exclusive(n));
8543   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8544   ins_cost(VOLATILE_REF_COST);
8545   effect(TEMP_DEF res, KILL cr);
8546   format %{
8547     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8548   %}
8549   ins_encode %{
8550     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8551                Assembler::xword, /*acquire*/ true, /*release*/ true,
8552                /*weak*/ false, $res$$Register);
8553   %}
8554   ins_pipe(pipe_slow);
8555 %}
8556 
8557 
8558 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8559   predicate(needs_acquiring_load_exclusive(n));
8560   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8561   ins_cost(VOLATILE_REF_COST);
8562   effect(TEMP_DEF res, KILL cr);
8563   format %{
8564     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8565   %}
8566   ins_encode %{
8567     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8568                Assembler::word, /*acquire*/ true, /*release*/ true,
8569                /*weak*/ false, $res$$Register);
8570   %}
8571   ins_pipe(pipe_slow);
8572 %}
8573 
8574 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8575   predicate(needs_acquiring_load_exclusive(n));
8576   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8577   ins_cost(VOLATILE_REF_COST);
8578   effect(TEMP_DEF res, KILL cr);
8579   format %{
8580     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8581   %}
8582   ins_encode %{
8583     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8584                Assembler::xword, /*acquire*/ true, /*release*/ true,
8585                /*weak*/ false, $res$$Register);
8586   %}
8587   ins_pipe(pipe_slow);
8588 %}
8589 
8590 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8591   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8592   ins_cost(2 * VOLATILE_REF_COST);
8593   effect(KILL cr);
8594   format %{
8595     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8596     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8597   %}
8598   ins_encode %{
8599     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8600                Assembler::byte, /*acquire*/ false, /*release*/ true,
8601                /*weak*/ true, noreg);
8602     __ csetw($res$$Register, Assembler::EQ);
8603   %}
8604   ins_pipe(pipe_slow);
8605 %}
8606 
8607 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8608   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8609   ins_cost(2 * VOLATILE_REF_COST);
8610   effect(KILL cr);
8611   format %{
8612     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8613     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8614   %}
8615   ins_encode %{
8616     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8617                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8618                /*weak*/ true, noreg);
8619     __ csetw($res$$Register, Assembler::EQ);
8620   %}
8621   ins_pipe(pipe_slow);
8622 %}
8623 
8624 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8625   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8626   ins_cost(2 * VOLATILE_REF_COST);
8627   effect(KILL cr);
8628   format %{
8629     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8630     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8631   %}
8632   ins_encode %{
8633     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8634                Assembler::word, /*acquire*/ false, /*release*/ true,
8635                /*weak*/ true, noreg);
8636     __ csetw($res$$Register, Assembler::EQ);
8637   %}
8638   ins_pipe(pipe_slow);
8639 %}
8640 
8641 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8642   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8643   ins_cost(2 * VOLATILE_REF_COST);
8644   effect(KILL cr);
8645   format %{
8646     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8647     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8648   %}
8649   ins_encode %{
8650     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8651                Assembler::xword, /*acquire*/ false, /*release*/ true,
8652                /*weak*/ true, noreg);
8653     __ csetw($res$$Register, Assembler::EQ);
8654   %}
8655   ins_pipe(pipe_slow);
8656 %}
8657 
8658 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8659   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8660   ins_cost(2 * VOLATILE_REF_COST);
8661   effect(KILL cr);
8662   format %{
8663     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8664     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8665   %}
8666   ins_encode %{
8667     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8668                Assembler::word, /*acquire*/ false, /*release*/ true,
8669                /*weak*/ true, noreg);
8670     __ csetw($res$$Register, Assembler::EQ);
8671   %}
8672   ins_pipe(pipe_slow);
8673 %}
8674 
8675 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8676   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8677   ins_cost(2 * VOLATILE_REF_COST);
8678   effect(KILL cr);
8679   format %{
8680     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8681     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8682   %}
8683   ins_encode %{
8684     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8685                Assembler::xword, /*acquire*/ false, /*release*/ true,
8686                /*weak*/ true, noreg);
8687     __ csetw($res$$Register, Assembler::EQ);
8688   %}
8689   ins_pipe(pipe_slow);
8690 %}
8691 
8692 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8693   predicate(needs_acquiring_load_exclusive(n));
8694   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8695   ins_cost(VOLATILE_REF_COST);
8696   effect(KILL cr);
8697   format %{
8698     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8699     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8700   %}
8701   ins_encode %{
8702     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8703                Assembler::byte, /*acquire*/ true, /*release*/ true,
8704                /*weak*/ true, noreg);
8705     __ csetw($res$$Register, Assembler::EQ);
8706   %}
8707   ins_pipe(pipe_slow);
8708 %}
8709 
8710 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8711   predicate(needs_acquiring_load_exclusive(n));
8712   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8713   ins_cost(VOLATILE_REF_COST);
8714   effect(KILL cr);
8715   format %{
8716     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8717     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8718   %}
8719   ins_encode %{
8720     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8721                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8722                /*weak*/ true, noreg);
8723     __ csetw($res$$Register, Assembler::EQ);
8724   %}
8725   ins_pipe(pipe_slow);
8726 %}
8727 
8728 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8729   predicate(needs_acquiring_load_exclusive(n));
8730   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8731   ins_cost(VOLATILE_REF_COST);
8732   effect(KILL cr);
8733   format %{
8734     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8735     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8736   %}
8737   ins_encode %{
8738     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8739                Assembler::word, /*acquire*/ true, /*release*/ true,
8740                /*weak*/ true, noreg);
8741     __ csetw($res$$Register, Assembler::EQ);
8742   %}
8743   ins_pipe(pipe_slow);
8744 %}
8745 
8746 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8747   predicate(needs_acquiring_load_exclusive(n));
8748   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8749   ins_cost(VOLATILE_REF_COST);
8750   effect(KILL cr);
8751   format %{
8752     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8753     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8754   %}
8755   ins_encode %{
8756     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8757                Assembler::xword, /*acquire*/ true, /*release*/ true,
8758                /*weak*/ true, noreg);
8759     __ csetw($res$$Register, Assembler::EQ);
8760   %}
8761   ins_pipe(pipe_slow);
8762 %}
8763 
8764 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8765   predicate(needs_acquiring_load_exclusive(n));
8766   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8767   ins_cost(VOLATILE_REF_COST);
8768   effect(KILL cr);
8769   format %{
8770     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8771     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8772   %}
8773   ins_encode %{
8774     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8775                Assembler::word, /*acquire*/ true, /*release*/ true,
8776                /*weak*/ true, noreg);
8777     __ csetw($res$$Register, Assembler::EQ);
8778   %}
8779   ins_pipe(pipe_slow);
8780 %}
8781 
8782 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8783   predicate(needs_acquiring_load_exclusive(n));
8784   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8785   ins_cost(VOLATILE_REF_COST);
8786   effect(KILL cr);
8787   format %{
8788     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8789     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8790   %}
8791   ins_encode %{
8792     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8793                Assembler::xword, /*acquire*/ true, /*release*/ true,
8794                /*weak*/ true, noreg);
8795     __ csetw($res$$Register, Assembler::EQ);
8796   %}
8797   ins_pipe(pipe_slow);
8798 %}
8799 
8800 // END This section of the file is automatically generated. Do not edit --------------
8801 // ---------------------------------------------------------------------
8802 
8803 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
8804   match(Set prev (GetAndSetI mem newv));
8805   ins_cost(2 * VOLATILE_REF_COST);
8806   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8807   ins_encode %{
8808     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8809   %}
8810   ins_pipe(pipe_serial);
8811 %}
8812 
8813 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
8814   match(Set prev (GetAndSetL mem newv));
8815   ins_cost(2 * VOLATILE_REF_COST);
8816   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8817   ins_encode %{
8818     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8819   %}
8820   ins_pipe(pipe_serial);
8821 %}
8822 
8823 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
8824   match(Set prev (GetAndSetN mem newv));
8825   ins_cost(2 * VOLATILE_REF_COST);
8826   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8827   ins_encode %{
8828     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8829   %}
8830   ins_pipe(pipe_serial);
8831 %}
8832 
8833 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
8834   match(Set prev (GetAndSetP mem newv));
8835   ins_cost(2 * VOLATILE_REF_COST);
8836   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8837   ins_encode %{
8838     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8839   %}
8840   ins_pipe(pipe_serial);
8841 %}
8842 
8843 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
8844   predicate(needs_acquiring_load_exclusive(n));
8845   match(Set prev (GetAndSetI mem newv));
8846   ins_cost(VOLATILE_REF_COST);
8847   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
8848   ins_encode %{
8849     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8850   %}
8851   ins_pipe(pipe_serial);
8852 %}
8853 
8854 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
8855   predicate(needs_acquiring_load_exclusive(n));
8856   match(Set prev (GetAndSetL mem newv));
8857   ins_cost(VOLATILE_REF_COST);
8858   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8859   ins_encode %{
8860     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8861   %}
8862   ins_pipe(pipe_serial);
8863 %}
8864 
8865 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
8866   predicate(needs_acquiring_load_exclusive(n));
8867   match(Set prev (GetAndSetN mem newv));
8868   ins_cost(VOLATILE_REF_COST);
8869   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
8870   ins_encode %{
8871     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8872   %}
8873   ins_pipe(pipe_serial);
8874 %}
8875 
8876 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
8877   predicate(needs_acquiring_load_exclusive(n));
8878   match(Set prev (GetAndSetP mem newv));
8879   ins_cost(VOLATILE_REF_COST);
8880   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8881   ins_encode %{
8882     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8883   %}
8884   ins_pipe(pipe_serial);
8885 %}
8886 
8887 
8888 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8889   match(Set newval (GetAndAddL mem incr));
8890   ins_cost(2 * VOLATILE_REF_COST + 1);
8891   format %{ "get_and_addL $newval, [$mem], $incr" %}
8892   ins_encode %{
8893     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8894   %}
8895   ins_pipe(pipe_serial);
8896 %}
8897 
8898 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8899   predicate(n->as_LoadStore()->result_not_used());
8900   match(Set dummy (GetAndAddL mem incr));
8901   ins_cost(2 * VOLATILE_REF_COST);
8902   format %{ "get_and_addL [$mem], $incr" %}
8903   ins_encode %{
8904     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8905   %}
8906   ins_pipe(pipe_serial);
8907 %}
8908 
8909 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8910   match(Set newval (GetAndAddL mem incr));
8911   ins_cost(2 * VOLATILE_REF_COST + 1);
8912   format %{ "get_and_addL $newval, [$mem], $incr" %}
8913   ins_encode %{
8914     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8915   %}
8916   ins_pipe(pipe_serial);
8917 %}
8918 
8919 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8920   predicate(n->as_LoadStore()->result_not_used());
8921   match(Set dummy (GetAndAddL mem incr));
8922   ins_cost(2 * VOLATILE_REF_COST);
8923   format %{ "get_and_addL [$mem], $incr" %}
8924   ins_encode %{
8925     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8926   %}
8927   ins_pipe(pipe_serial);
8928 %}
8929 
8930 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8931   match(Set newval (GetAndAddI mem incr));
8932   ins_cost(2 * VOLATILE_REF_COST + 1);
8933   format %{ "get_and_addI $newval, [$mem], $incr" %}
8934   ins_encode %{
8935     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8936   %}
8937   ins_pipe(pipe_serial);
8938 %}
8939 
8940 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8941   predicate(n->as_LoadStore()->result_not_used());
8942   match(Set dummy (GetAndAddI mem incr));
8943   ins_cost(2 * VOLATILE_REF_COST);
8944   format %{ "get_and_addI [$mem], $incr" %}
8945   ins_encode %{
8946     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8947   %}
8948   ins_pipe(pipe_serial);
8949 %}
8950 
8951 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8952   match(Set newval (GetAndAddI mem incr));
8953   ins_cost(2 * VOLATILE_REF_COST + 1);
8954   format %{ "get_and_addI $newval, [$mem], $incr" %}
8955   ins_encode %{
8956     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8957   %}
8958   ins_pipe(pipe_serial);
8959 %}
8960 
8961 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
8962   predicate(n->as_LoadStore()->result_not_used());
8963   match(Set dummy (GetAndAddI mem incr));
8964   ins_cost(2 * VOLATILE_REF_COST);
8965   format %{ "get_and_addI [$mem], $incr" %}
8966   ins_encode %{
8967     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
8968   %}
8969   ins_pipe(pipe_serial);
8970 %}
8971 
8972 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
8973   predicate(needs_acquiring_load_exclusive(n));
8974   match(Set newval (GetAndAddL mem incr));
8975   ins_cost(VOLATILE_REF_COST + 1);
8976   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
8977   ins_encode %{
8978     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
8979   %}
8980   ins_pipe(pipe_serial);
8981 %}
8982 
8983 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
8984   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8985   match(Set dummy (GetAndAddL mem incr));
8986   ins_cost(VOLATILE_REF_COST);
8987   format %{ "get_and_addL_acq [$mem], $incr" %}
8988   ins_encode %{
8989     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
8990   %}
8991   ins_pipe(pipe_serial);
8992 %}
8993 
8994 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8995   predicate(needs_acquiring_load_exclusive(n));
8996   match(Set newval (GetAndAddL mem incr));
8997   ins_cost(VOLATILE_REF_COST + 1);
8998   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
8999   ins_encode %{
9000     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9001   %}
9002   ins_pipe(pipe_serial);
9003 %}
9004 
9005 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9006   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9007   match(Set dummy (GetAndAddL mem incr));
9008   ins_cost(VOLATILE_REF_COST);
9009   format %{ "get_and_addL_acq [$mem], $incr" %}
9010   ins_encode %{
9011     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9012   %}
9013   ins_pipe(pipe_serial);
9014 %}
9015 
9016 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9017   predicate(needs_acquiring_load_exclusive(n));
9018   match(Set newval (GetAndAddI mem incr));
9019   ins_cost(VOLATILE_REF_COST + 1);
9020   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9021   ins_encode %{
9022     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9023   %}
9024   ins_pipe(pipe_serial);
9025 %}
9026 
9027 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9028   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9029   match(Set dummy (GetAndAddI mem incr));
9030   ins_cost(VOLATILE_REF_COST);
9031   format %{ "get_and_addI_acq [$mem], $incr" %}
9032   ins_encode %{
9033     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9034   %}
9035   ins_pipe(pipe_serial);
9036 %}
9037 
9038 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9039   predicate(needs_acquiring_load_exclusive(n));
9040   match(Set newval (GetAndAddI mem incr));
9041   ins_cost(VOLATILE_REF_COST + 1);
9042   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9043   ins_encode %{
9044     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9045   %}
9046   ins_pipe(pipe_serial);
9047 %}
9048 
9049 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9050   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9051   match(Set dummy (GetAndAddI mem incr));
9052   ins_cost(VOLATILE_REF_COST);
9053   format %{ "get_and_addI_acq [$mem], $incr" %}
9054   ins_encode %{
9055     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9056   %}
9057   ins_pipe(pipe_serial);
9058 %}
9059 
9060 // Manifest a CmpL result in an integer register.
9061 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9062 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9063 %{
9064   match(Set dst (CmpL3 src1 src2));
9065   effect(KILL flags);
9066 
9067   ins_cost(INSN_COST * 6);
9068   format %{
9069       "cmp $src1, $src2"
9070       "csetw $dst, ne"
9071       "cnegw $dst, lt"
9072   %}
9073   // format %{ "CmpL3 $dst, $src1, $src2" %}
9074   ins_encode %{
9075     __ cmp($src1$$Register, $src2$$Register);
9076     __ csetw($dst$$Register, Assembler::NE);
9077     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9078   %}
9079 
9080   ins_pipe(pipe_class_default);
9081 %}
9082 
9083 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9084 %{
9085   match(Set dst (CmpL3 src1 src2));
9086   effect(KILL flags);
9087 
9088   ins_cost(INSN_COST * 6);
9089   format %{
9090       "cmp $src1, $src2"
9091       "csetw $dst, ne"
9092       "cnegw $dst, lt"
9093   %}
9094   ins_encode %{
9095     int32_t con = (int32_t)$src2$$constant;
9096      if (con < 0) {
9097       __ adds(zr, $src1$$Register, -con);
9098     } else {
9099       __ subs(zr, $src1$$Register, con);
9100     }
9101     __ csetw($dst$$Register, Assembler::NE);
9102     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9103   %}
9104 
9105   ins_pipe(pipe_class_default);
9106 %}
9107 
9108 // ============================================================================
9109 // Conditional Move Instructions
9110 
9111 // n.b. we have identical rules for both a signed compare op (cmpOp)
9112 // and an unsigned compare op (cmpOpU). it would be nice if we could
9113 // define an op class which merged both inputs and use it to type the
9114 // argument to a single rule. unfortunatelyt his fails because the
9115 // opclass does not live up to the COND_INTER interface of its
9116 // component operands. When the generic code tries to negate the
9117 // operand it ends up running the generci Machoper::negate method
9118 // which throws a ShouldNotHappen. So, we have to provide two flavours
9119 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9120 
9121 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9122   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9123 
9124   ins_cost(INSN_COST * 2);
9125   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9126 
9127   ins_encode %{
9128     __ cselw(as_Register($dst$$reg),
9129              as_Register($src2$$reg),
9130              as_Register($src1$$reg),
9131              (Assembler::Condition)$cmp$$cmpcode);
9132   %}
9133 
9134   ins_pipe(icond_reg_reg);
9135 %}
9136 
9137 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9138   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9139 
9140   ins_cost(INSN_COST * 2);
9141   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9142 
9143   ins_encode %{
9144     __ cselw(as_Register($dst$$reg),
9145              as_Register($src2$$reg),
9146              as_Register($src1$$reg),
9147              (Assembler::Condition)$cmp$$cmpcode);
9148   %}
9149 
9150   ins_pipe(icond_reg_reg);
9151 %}
9152 
9153 // special cases where one arg is zero
9154 
9155 // n.b. this is selected in preference to the rule above because it
9156 // avoids loading constant 0 into a source register
9157 
9158 // TODO
9159 // we ought only to be able to cull one of these variants as the ideal
9160 // transforms ought always to order the zero consistently (to left/right?)
9161 
9162 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9163   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9164 
9165   ins_cost(INSN_COST * 2);
9166   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9167 
9168   ins_encode %{
9169     __ cselw(as_Register($dst$$reg),
9170              as_Register($src$$reg),
9171              zr,
9172              (Assembler::Condition)$cmp$$cmpcode);
9173   %}
9174 
9175   ins_pipe(icond_reg);
9176 %}
9177 
9178 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9179   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9180 
9181   ins_cost(INSN_COST * 2);
9182   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9183 
9184   ins_encode %{
9185     __ cselw(as_Register($dst$$reg),
9186              as_Register($src$$reg),
9187              zr,
9188              (Assembler::Condition)$cmp$$cmpcode);
9189   %}
9190 
9191   ins_pipe(icond_reg);
9192 %}
9193 
9194 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9195   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9196 
9197   ins_cost(INSN_COST * 2);
9198   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9199 
9200   ins_encode %{
9201     __ cselw(as_Register($dst$$reg),
9202              zr,
9203              as_Register($src$$reg),
9204              (Assembler::Condition)$cmp$$cmpcode);
9205   %}
9206 
9207   ins_pipe(icond_reg);
9208 %}
9209 
9210 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9211   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9212 
9213   ins_cost(INSN_COST * 2);
9214   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9215 
9216   ins_encode %{
9217     __ cselw(as_Register($dst$$reg),
9218              zr,
9219              as_Register($src$$reg),
9220              (Assembler::Condition)$cmp$$cmpcode);
9221   %}
9222 
9223   ins_pipe(icond_reg);
9224 %}
9225 
9226 // special case for creating a boolean 0 or 1
9227 
9228 // n.b. this is selected in preference to the rule above because it
9229 // avoids loading constants 0 and 1 into a source register
9230 
9231 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9232   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9233 
9234   ins_cost(INSN_COST * 2);
9235   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9236 
9237   ins_encode %{
9238     // equivalently
9239     // cset(as_Register($dst$$reg),
9240     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9241     __ csincw(as_Register($dst$$reg),
9242              zr,
9243              zr,
9244              (Assembler::Condition)$cmp$$cmpcode);
9245   %}
9246 
9247   ins_pipe(icond_none);
9248 %}
9249 
9250 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9251   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9252 
9253   ins_cost(INSN_COST * 2);
9254   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9255 
9256   ins_encode %{
9257     // equivalently
9258     // cset(as_Register($dst$$reg),
9259     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9260     __ csincw(as_Register($dst$$reg),
9261              zr,
9262              zr,
9263              (Assembler::Condition)$cmp$$cmpcode);
9264   %}
9265 
9266   ins_pipe(icond_none);
9267 %}
9268 
9269 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9270   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9271 
9272   ins_cost(INSN_COST * 2);
9273   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9274 
9275   ins_encode %{
9276     __ csel(as_Register($dst$$reg),
9277             as_Register($src2$$reg),
9278             as_Register($src1$$reg),
9279             (Assembler::Condition)$cmp$$cmpcode);
9280   %}
9281 
9282   ins_pipe(icond_reg_reg);
9283 %}
9284 
9285 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9286   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9287 
9288   ins_cost(INSN_COST * 2);
9289   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9290 
9291   ins_encode %{
9292     __ csel(as_Register($dst$$reg),
9293             as_Register($src2$$reg),
9294             as_Register($src1$$reg),
9295             (Assembler::Condition)$cmp$$cmpcode);
9296   %}
9297 
9298   ins_pipe(icond_reg_reg);
9299 %}
9300 
9301 // special cases where one arg is zero
9302 
9303 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9304   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9305 
9306   ins_cost(INSN_COST * 2);
9307   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9308 
9309   ins_encode %{
9310     __ csel(as_Register($dst$$reg),
9311             zr,
9312             as_Register($src$$reg),
9313             (Assembler::Condition)$cmp$$cmpcode);
9314   %}
9315 
9316   ins_pipe(icond_reg);
9317 %}
9318 
9319 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9320   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9321 
9322   ins_cost(INSN_COST * 2);
9323   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9324 
9325   ins_encode %{
9326     __ csel(as_Register($dst$$reg),
9327             zr,
9328             as_Register($src$$reg),
9329             (Assembler::Condition)$cmp$$cmpcode);
9330   %}
9331 
9332   ins_pipe(icond_reg);
9333 %}
9334 
9335 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9336   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9337 
9338   ins_cost(INSN_COST * 2);
9339   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9340 
9341   ins_encode %{
9342     __ csel(as_Register($dst$$reg),
9343             as_Register($src$$reg),
9344             zr,
9345             (Assembler::Condition)$cmp$$cmpcode);
9346   %}
9347 
9348   ins_pipe(icond_reg);
9349 %}
9350 
9351 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9352   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9353 
9354   ins_cost(INSN_COST * 2);
9355   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9356 
9357   ins_encode %{
9358     __ csel(as_Register($dst$$reg),
9359             as_Register($src$$reg),
9360             zr,
9361             (Assembler::Condition)$cmp$$cmpcode);
9362   %}
9363 
9364   ins_pipe(icond_reg);
9365 %}
9366 
9367 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9368   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9369 
9370   ins_cost(INSN_COST * 2);
9371   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9372 
9373   ins_encode %{
9374     __ csel(as_Register($dst$$reg),
9375             as_Register($src2$$reg),
9376             as_Register($src1$$reg),
9377             (Assembler::Condition)$cmp$$cmpcode);
9378   %}
9379 
9380   ins_pipe(icond_reg_reg);
9381 %}
9382 
9383 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9384   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9385 
9386   ins_cost(INSN_COST * 2);
9387   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9388 
9389   ins_encode %{
9390     __ csel(as_Register($dst$$reg),
9391             as_Register($src2$$reg),
9392             as_Register($src1$$reg),
9393             (Assembler::Condition)$cmp$$cmpcode);
9394   %}
9395 
9396   ins_pipe(icond_reg_reg);
9397 %}
9398 
9399 // special cases where one arg is zero
9400 
9401 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9402   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9403 
9404   ins_cost(INSN_COST * 2);
9405   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9406 
9407   ins_encode %{
9408     __ csel(as_Register($dst$$reg),
9409             zr,
9410             as_Register($src$$reg),
9411             (Assembler::Condition)$cmp$$cmpcode);
9412   %}
9413 
9414   ins_pipe(icond_reg);
9415 %}
9416 
9417 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9418   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9419 
9420   ins_cost(INSN_COST * 2);
9421   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9422 
9423   ins_encode %{
9424     __ csel(as_Register($dst$$reg),
9425             zr,
9426             as_Register($src$$reg),
9427             (Assembler::Condition)$cmp$$cmpcode);
9428   %}
9429 
9430   ins_pipe(icond_reg);
9431 %}
9432 
9433 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9434   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9435 
9436   ins_cost(INSN_COST * 2);
9437   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9438 
9439   ins_encode %{
9440     __ csel(as_Register($dst$$reg),
9441             as_Register($src$$reg),
9442             zr,
9443             (Assembler::Condition)$cmp$$cmpcode);
9444   %}
9445 
9446   ins_pipe(icond_reg);
9447 %}
9448 
9449 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9450   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9451 
9452   ins_cost(INSN_COST * 2);
9453   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9454 
9455   ins_encode %{
9456     __ csel(as_Register($dst$$reg),
9457             as_Register($src$$reg),
9458             zr,
9459             (Assembler::Condition)$cmp$$cmpcode);
9460   %}
9461 
9462   ins_pipe(icond_reg);
9463 %}
9464 
9465 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9466   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9467 
9468   ins_cost(INSN_COST * 2);
9469   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9470 
9471   ins_encode %{
9472     __ cselw(as_Register($dst$$reg),
9473              as_Register($src2$$reg),
9474              as_Register($src1$$reg),
9475              (Assembler::Condition)$cmp$$cmpcode);
9476   %}
9477 
9478   ins_pipe(icond_reg_reg);
9479 %}
9480 
9481 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9482   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9483 
9484   ins_cost(INSN_COST * 2);
9485   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9486 
9487   ins_encode %{
9488     __ cselw(as_Register($dst$$reg),
9489              as_Register($src2$$reg),
9490              as_Register($src1$$reg),
9491              (Assembler::Condition)$cmp$$cmpcode);
9492   %}
9493 
9494   ins_pipe(icond_reg_reg);
9495 %}
9496 
9497 // special cases where one arg is zero
9498 
9499 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9500   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9501 
9502   ins_cost(INSN_COST * 2);
9503   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9504 
9505   ins_encode %{
9506     __ cselw(as_Register($dst$$reg),
9507              zr,
9508              as_Register($src$$reg),
9509              (Assembler::Condition)$cmp$$cmpcode);
9510   %}
9511 
9512   ins_pipe(icond_reg);
9513 %}
9514 
9515 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9516   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9517 
9518   ins_cost(INSN_COST * 2);
9519   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9520 
9521   ins_encode %{
9522     __ cselw(as_Register($dst$$reg),
9523              zr,
9524              as_Register($src$$reg),
9525              (Assembler::Condition)$cmp$$cmpcode);
9526   %}
9527 
9528   ins_pipe(icond_reg);
9529 %}
9530 
9531 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9532   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9533 
9534   ins_cost(INSN_COST * 2);
9535   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9536 
9537   ins_encode %{
9538     __ cselw(as_Register($dst$$reg),
9539              as_Register($src$$reg),
9540              zr,
9541              (Assembler::Condition)$cmp$$cmpcode);
9542   %}
9543 
9544   ins_pipe(icond_reg);
9545 %}
9546 
9547 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9548   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9549 
9550   ins_cost(INSN_COST * 2);
9551   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9552 
9553   ins_encode %{
9554     __ cselw(as_Register($dst$$reg),
9555              as_Register($src$$reg),
9556              zr,
9557              (Assembler::Condition)$cmp$$cmpcode);
9558   %}
9559 
9560   ins_pipe(icond_reg);
9561 %}
9562 
9563 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9564 %{
9565   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9566 
9567   ins_cost(INSN_COST * 3);
9568 
9569   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9570   ins_encode %{
9571     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9572     __ fcsels(as_FloatRegister($dst$$reg),
9573               as_FloatRegister($src2$$reg),
9574               as_FloatRegister($src1$$reg),
9575               cond);
9576   %}
9577 
9578   ins_pipe(fp_cond_reg_reg_s);
9579 %}
9580 
9581 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9582 %{
9583   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9584 
9585   ins_cost(INSN_COST * 3);
9586 
9587   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9588   ins_encode %{
9589     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9590     __ fcsels(as_FloatRegister($dst$$reg),
9591               as_FloatRegister($src2$$reg),
9592               as_FloatRegister($src1$$reg),
9593               cond);
9594   %}
9595 
9596   ins_pipe(fp_cond_reg_reg_s);
9597 %}
9598 
9599 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9600 %{
9601   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9602 
9603   ins_cost(INSN_COST * 3);
9604 
9605   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9606   ins_encode %{
9607     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9608     __ fcseld(as_FloatRegister($dst$$reg),
9609               as_FloatRegister($src2$$reg),
9610               as_FloatRegister($src1$$reg),
9611               cond);
9612   %}
9613 
9614   ins_pipe(fp_cond_reg_reg_d);
9615 %}
9616 
9617 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9618 %{
9619   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9620 
9621   ins_cost(INSN_COST * 3);
9622 
9623   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9624   ins_encode %{
9625     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9626     __ fcseld(as_FloatRegister($dst$$reg),
9627               as_FloatRegister($src2$$reg),
9628               as_FloatRegister($src1$$reg),
9629               cond);
9630   %}
9631 
9632   ins_pipe(fp_cond_reg_reg_d);
9633 %}
9634 
9635 // ============================================================================
9636 // Arithmetic Instructions
9637 //
9638 
9639 // Integer Addition
9640 
9641 // TODO
9642 // these currently employ operations which do not set CR and hence are
9643 // not flagged as killing CR but we would like to isolate the cases
9644 // where we want to set flags from those where we don't. need to work
9645 // out how to do that.
9646 
9647 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9648   match(Set dst (AddI src1 src2));
9649 
9650   ins_cost(INSN_COST);
9651   format %{ "addw  $dst, $src1, $src2" %}
9652 
9653   ins_encode %{
9654     __ addw(as_Register($dst$$reg),
9655             as_Register($src1$$reg),
9656             as_Register($src2$$reg));
9657   %}
9658 
9659   ins_pipe(ialu_reg_reg);
9660 %}
9661 
9662 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9663   match(Set dst (AddI src1 src2));
9664 
9665   ins_cost(INSN_COST);
9666   format %{ "addw $dst, $src1, $src2" %}
9667 
9668   // use opcode to indicate that this is an add not a sub
9669   opcode(0x0);
9670 
9671   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9672 
9673   ins_pipe(ialu_reg_imm);
9674 %}
9675 
9676 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9677   match(Set dst (AddI (ConvL2I src1) src2));
9678 
9679   ins_cost(INSN_COST);
9680   format %{ "addw $dst, $src1, $src2" %}
9681 
9682   // use opcode to indicate that this is an add not a sub
9683   opcode(0x0);
9684 
9685   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9686 
9687   ins_pipe(ialu_reg_imm);
9688 %}
9689 
9690 // Pointer Addition
9691 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9692   match(Set dst (AddP src1 src2));
9693 
9694   ins_cost(INSN_COST);
9695   format %{ "add $dst, $src1, $src2\t# ptr" %}
9696 
9697   ins_encode %{
9698     __ add(as_Register($dst$$reg),
9699            as_Register($src1$$reg),
9700            as_Register($src2$$reg));
9701   %}
9702 
9703   ins_pipe(ialu_reg_reg);
9704 %}
9705 
9706 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9707   match(Set dst (AddP src1 (ConvI2L src2)));
9708 
9709   ins_cost(1.9 * INSN_COST);
9710   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9711 
9712   ins_encode %{
9713     __ add(as_Register($dst$$reg),
9714            as_Register($src1$$reg),
9715            as_Register($src2$$reg), ext::sxtw);
9716   %}
9717 
9718   ins_pipe(ialu_reg_reg);
9719 %}
9720 
9721 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9722   match(Set dst (AddP src1 (LShiftL src2 scale)));
9723 
9724   ins_cost(1.9 * INSN_COST);
9725   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9726 
9727   ins_encode %{
9728     __ lea(as_Register($dst$$reg),
9729            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9730                    Address::lsl($scale$$constant)));
9731   %}
9732 
9733   ins_pipe(ialu_reg_reg_shift);
9734 %}
9735 
9736 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9737   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9738 
9739   ins_cost(1.9 * INSN_COST);
9740   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9741 
9742   ins_encode %{
9743     __ lea(as_Register($dst$$reg),
9744            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9745                    Address::sxtw($scale$$constant)));
9746   %}
9747 
9748   ins_pipe(ialu_reg_reg_shift);
9749 %}
9750 
9751 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9752   match(Set dst (LShiftL (ConvI2L src) scale));
9753 
9754   ins_cost(INSN_COST);
9755   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9756 
9757   ins_encode %{
9758     __ sbfiz(as_Register($dst$$reg),
9759           as_Register($src$$reg),
9760           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9761   %}
9762 
9763   ins_pipe(ialu_reg_shift);
9764 %}
9765 
9766 // Pointer Immediate Addition
9767 // n.b. this needs to be more expensive than using an indirect memory
9768 // operand
9769 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9770   match(Set dst (AddP src1 src2));
9771 
9772   ins_cost(INSN_COST);
9773   format %{ "add $dst, $src1, $src2\t# ptr" %}
9774 
9775   // use opcode to indicate that this is an add not a sub
9776   opcode(0x0);
9777 
9778   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9779 
9780   ins_pipe(ialu_reg_imm);
9781 %}
9782 
9783 // Long Addition
9784 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9785 
9786   match(Set dst (AddL src1 src2));
9787 
9788   ins_cost(INSN_COST);
9789   format %{ "add  $dst, $src1, $src2" %}
9790 
9791   ins_encode %{
9792     __ add(as_Register($dst$$reg),
9793            as_Register($src1$$reg),
9794            as_Register($src2$$reg));
9795   %}
9796 
9797   ins_pipe(ialu_reg_reg);
9798 %}
9799 
9800 // No constant pool entries requiredLong Immediate Addition.
9801 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9802   match(Set dst (AddL src1 src2));
9803 
9804   ins_cost(INSN_COST);
9805   format %{ "add $dst, $src1, $src2" %}
9806 
9807   // use opcode to indicate that this is an add not a sub
9808   opcode(0x0);
9809 
9810   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9811 
9812   ins_pipe(ialu_reg_imm);
9813 %}
9814 
9815 // Integer Subtraction
9816 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9817   match(Set dst (SubI src1 src2));
9818 
9819   ins_cost(INSN_COST);
9820   format %{ "subw  $dst, $src1, $src2" %}
9821 
9822   ins_encode %{
9823     __ subw(as_Register($dst$$reg),
9824             as_Register($src1$$reg),
9825             as_Register($src2$$reg));
9826   %}
9827 
9828   ins_pipe(ialu_reg_reg);
9829 %}
9830 
9831 // Immediate Subtraction
9832 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9833   match(Set dst (SubI src1 src2));
9834 
9835   ins_cost(INSN_COST);
9836   format %{ "subw $dst, $src1, $src2" %}
9837 
9838   // use opcode to indicate that this is a sub not an add
9839   opcode(0x1);
9840 
9841   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9842 
9843   ins_pipe(ialu_reg_imm);
9844 %}
9845 
9846 // Long Subtraction
9847 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9848 
9849   match(Set dst (SubL src1 src2));
9850 
9851   ins_cost(INSN_COST);
9852   format %{ "sub  $dst, $src1, $src2" %}
9853 
9854   ins_encode %{
9855     __ sub(as_Register($dst$$reg),
9856            as_Register($src1$$reg),
9857            as_Register($src2$$reg));
9858   %}
9859 
9860   ins_pipe(ialu_reg_reg);
9861 %}
9862 
9863 // No constant pool entries requiredLong Immediate Subtraction.
9864 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9865   match(Set dst (SubL src1 src2));
9866 
9867   ins_cost(INSN_COST);
9868   format %{ "sub$dst, $src1, $src2" %}
9869 
9870   // use opcode to indicate that this is a sub not an add
9871   opcode(0x1);
9872 
9873   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9874 
9875   ins_pipe(ialu_reg_imm);
9876 %}
9877 
9878 // Integer Negation (special case for sub)
9879 
9880 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9881   match(Set dst (SubI zero src));
9882 
9883   ins_cost(INSN_COST);
9884   format %{ "negw $dst, $src\t# int" %}
9885 
9886   ins_encode %{
9887     __ negw(as_Register($dst$$reg),
9888             as_Register($src$$reg));
9889   %}
9890 
9891   ins_pipe(ialu_reg);
9892 %}
9893 
9894 // Long Negation
9895 
9896 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
9897   match(Set dst (SubL zero src));
9898 
9899   ins_cost(INSN_COST);
9900   format %{ "neg $dst, $src\t# long" %}
9901 
9902   ins_encode %{
9903     __ neg(as_Register($dst$$reg),
9904            as_Register($src$$reg));
9905   %}
9906 
9907   ins_pipe(ialu_reg);
9908 %}
9909 
9910 // Integer Multiply
9911 
9912 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9913   match(Set dst (MulI src1 src2));
9914 
9915   ins_cost(INSN_COST * 3);
9916   format %{ "mulw  $dst, $src1, $src2" %}
9917 
9918   ins_encode %{
9919     __ mulw(as_Register($dst$$reg),
9920             as_Register($src1$$reg),
9921             as_Register($src2$$reg));
9922   %}
9923 
9924   ins_pipe(imul_reg_reg);
9925 %}
9926 
9927 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9928   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9929 
9930   ins_cost(INSN_COST * 3);
9931   format %{ "smull  $dst, $src1, $src2" %}
9932 
9933   ins_encode %{
9934     __ smull(as_Register($dst$$reg),
9935              as_Register($src1$$reg),
9936              as_Register($src2$$reg));
9937   %}
9938 
9939   ins_pipe(imul_reg_reg);
9940 %}
9941 
9942 // Long Multiply
9943 
9944 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9945   match(Set dst (MulL src1 src2));
9946 
9947   ins_cost(INSN_COST * 5);
9948   format %{ "mul  $dst, $src1, $src2" %}
9949 
9950   ins_encode %{
9951     __ mul(as_Register($dst$$reg),
9952            as_Register($src1$$reg),
9953            as_Register($src2$$reg));
9954   %}
9955 
9956   ins_pipe(lmul_reg_reg);
9957 %}
9958 
9959 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9960 %{
9961   match(Set dst (MulHiL src1 src2));
9962 
9963   ins_cost(INSN_COST * 7);
9964   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9965 
9966   ins_encode %{
9967     __ smulh(as_Register($dst$$reg),
9968              as_Register($src1$$reg),
9969              as_Register($src2$$reg));
9970   %}
9971 
9972   ins_pipe(lmul_reg_reg);
9973 %}
9974 
9975 // Combined Integer Multiply & Add/Sub
9976 
9977 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9978   match(Set dst (AddI src3 (MulI src1 src2)));
9979 
9980   ins_cost(INSN_COST * 3);
9981   format %{ "madd  $dst, $src1, $src2, $src3" %}
9982 
9983   ins_encode %{
9984     __ maddw(as_Register($dst$$reg),
9985              as_Register($src1$$reg),
9986              as_Register($src2$$reg),
9987              as_Register($src3$$reg));
9988   %}
9989 
9990   ins_pipe(imac_reg_reg);
9991 %}
9992 
9993 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9994   match(Set dst (SubI src3 (MulI src1 src2)));
9995 
9996   ins_cost(INSN_COST * 3);
9997   format %{ "msub  $dst, $src1, $src2, $src3" %}
9998 
9999   ins_encode %{
10000     __ msubw(as_Register($dst$$reg),
10001              as_Register($src1$$reg),
10002              as_Register($src2$$reg),
10003              as_Register($src3$$reg));
10004   %}
10005 
10006   ins_pipe(imac_reg_reg);
10007 %}
10008 
10009 // Combined Integer Multiply & Neg
10010 
10011 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10012   match(Set dst (MulI (SubI zero src1) src2));
10013   match(Set dst (MulI src1 (SubI zero src2)));
10014 
10015   ins_cost(INSN_COST * 3);
10016   format %{ "mneg  $dst, $src1, $src2" %}
10017 
10018   ins_encode %{
10019     __ mnegw(as_Register($dst$$reg),
10020              as_Register($src1$$reg),
10021              as_Register($src2$$reg));
10022   %}
10023 
10024   ins_pipe(imac_reg_reg);
10025 %}
10026 
10027 // Combined Long Multiply & Add/Sub
10028 
10029 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10030   match(Set dst (AddL src3 (MulL src1 src2)));
10031 
10032   ins_cost(INSN_COST * 5);
10033   format %{ "madd  $dst, $src1, $src2, $src3" %}
10034 
10035   ins_encode %{
10036     __ madd(as_Register($dst$$reg),
10037             as_Register($src1$$reg),
10038             as_Register($src2$$reg),
10039             as_Register($src3$$reg));
10040   %}
10041 
10042   ins_pipe(lmac_reg_reg);
10043 %}
10044 
10045 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10046   match(Set dst (SubL src3 (MulL src1 src2)));
10047 
10048   ins_cost(INSN_COST * 5);
10049   format %{ "msub  $dst, $src1, $src2, $src3" %}
10050 
10051   ins_encode %{
10052     __ msub(as_Register($dst$$reg),
10053             as_Register($src1$$reg),
10054             as_Register($src2$$reg),
10055             as_Register($src3$$reg));
10056   %}
10057 
10058   ins_pipe(lmac_reg_reg);
10059 %}
10060 
10061 // Combined Long Multiply & Neg
10062 
10063 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10064   match(Set dst (MulL (SubL zero src1) src2));
10065   match(Set dst (MulL src1 (SubL zero src2)));
10066 
10067   ins_cost(INSN_COST * 5);
10068   format %{ "mneg  $dst, $src1, $src2" %}
10069 
10070   ins_encode %{
10071     __ mneg(as_Register($dst$$reg),
10072             as_Register($src1$$reg),
10073             as_Register($src2$$reg));
10074   %}
10075 
10076   ins_pipe(lmac_reg_reg);
10077 %}
10078 
10079 // Integer Divide
10080 
10081 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10082   match(Set dst (DivI src1 src2));
10083 
10084   ins_cost(INSN_COST * 19);
10085   format %{ "sdivw  $dst, $src1, $src2" %}
10086 
10087   ins_encode(aarch64_enc_divw(dst, src1, src2));
10088   ins_pipe(idiv_reg_reg);
10089 %}
10090 
10091 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10092   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10093   ins_cost(INSN_COST);
10094   format %{ "lsrw $dst, $src1, $div1" %}
10095   ins_encode %{
10096     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10097   %}
10098   ins_pipe(ialu_reg_shift);
10099 %}
10100 
10101 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10102   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10103   ins_cost(INSN_COST);
10104   format %{ "addw $dst, $src, LSR $div1" %}
10105 
10106   ins_encode %{
10107     __ addw(as_Register($dst$$reg),
10108               as_Register($src$$reg),
10109               as_Register($src$$reg),
10110               Assembler::LSR, 31);
10111   %}
10112   ins_pipe(ialu_reg);
10113 %}
10114 
10115 // Long Divide
10116 
10117 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10118   match(Set dst (DivL src1 src2));
10119 
10120   ins_cost(INSN_COST * 35);
10121   format %{ "sdiv   $dst, $src1, $src2" %}
10122 
10123   ins_encode(aarch64_enc_div(dst, src1, src2));
10124   ins_pipe(ldiv_reg_reg);
10125 %}
10126 
10127 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10128   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10129   ins_cost(INSN_COST);
10130   format %{ "lsr $dst, $src1, $div1" %}
10131   ins_encode %{
10132     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10133   %}
10134   ins_pipe(ialu_reg_shift);
10135 %}
10136 
10137 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10138   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10139   ins_cost(INSN_COST);
10140   format %{ "add $dst, $src, $div1" %}
10141 
10142   ins_encode %{
10143     __ add(as_Register($dst$$reg),
10144               as_Register($src$$reg),
10145               as_Register($src$$reg),
10146               Assembler::LSR, 63);
10147   %}
10148   ins_pipe(ialu_reg);
10149 %}
10150 
10151 // Integer Remainder
10152 
10153 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10154   match(Set dst (ModI src1 src2));
10155 
10156   ins_cost(INSN_COST * 22);
10157   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10158             "msubw($dst, rscratch1, $src2, $src1" %}
10159 
10160   ins_encode(aarch64_enc_modw(dst, src1, src2));
10161   ins_pipe(idiv_reg_reg);
10162 %}
10163 
10164 // Long Remainder
10165 
10166 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10167   match(Set dst (ModL src1 src2));
10168 
10169   ins_cost(INSN_COST * 38);
10170   format %{ "sdiv   rscratch1, $src1, $src2\n"
10171             "msub($dst, rscratch1, $src2, $src1" %}
10172 
10173   ins_encode(aarch64_enc_mod(dst, src1, src2));
10174   ins_pipe(ldiv_reg_reg);
10175 %}
10176 
10177 // Integer Shifts
10178 
10179 // Shift Left Register
10180 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10181   match(Set dst (LShiftI src1 src2));
10182 
10183   ins_cost(INSN_COST * 2);
10184   format %{ "lslvw  $dst, $src1, $src2" %}
10185 
10186   ins_encode %{
10187     __ lslvw(as_Register($dst$$reg),
10188              as_Register($src1$$reg),
10189              as_Register($src2$$reg));
10190   %}
10191 
10192   ins_pipe(ialu_reg_reg_vshift);
10193 %}
10194 
10195 // Shift Left Immediate
10196 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10197   match(Set dst (LShiftI src1 src2));
10198 
10199   ins_cost(INSN_COST);
10200   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10201 
10202   ins_encode %{
10203     __ lslw(as_Register($dst$$reg),
10204             as_Register($src1$$reg),
10205             $src2$$constant & 0x1f);
10206   %}
10207 
10208   ins_pipe(ialu_reg_shift);
10209 %}
10210 
10211 // Shift Right Logical Register
10212 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10213   match(Set dst (URShiftI src1 src2));
10214 
10215   ins_cost(INSN_COST * 2);
10216   format %{ "lsrvw  $dst, $src1, $src2" %}
10217 
10218   ins_encode %{
10219     __ lsrvw(as_Register($dst$$reg),
10220              as_Register($src1$$reg),
10221              as_Register($src2$$reg));
10222   %}
10223 
10224   ins_pipe(ialu_reg_reg_vshift);
10225 %}
10226 
10227 // Shift Right Logical Immediate
10228 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10229   match(Set dst (URShiftI src1 src2));
10230 
10231   ins_cost(INSN_COST);
10232   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10233 
10234   ins_encode %{
10235     __ lsrw(as_Register($dst$$reg),
10236             as_Register($src1$$reg),
10237             $src2$$constant & 0x1f);
10238   %}
10239 
10240   ins_pipe(ialu_reg_shift);
10241 %}
10242 
10243 // Shift Right Arithmetic Register
10244 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10245   match(Set dst (RShiftI src1 src2));
10246 
10247   ins_cost(INSN_COST * 2);
10248   format %{ "asrvw  $dst, $src1, $src2" %}
10249 
10250   ins_encode %{
10251     __ asrvw(as_Register($dst$$reg),
10252              as_Register($src1$$reg),
10253              as_Register($src2$$reg));
10254   %}
10255 
10256   ins_pipe(ialu_reg_reg_vshift);
10257 %}
10258 
10259 // Shift Right Arithmetic Immediate
10260 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10261   match(Set dst (RShiftI src1 src2));
10262 
10263   ins_cost(INSN_COST);
10264   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10265 
10266   ins_encode %{
10267     __ asrw(as_Register($dst$$reg),
10268             as_Register($src1$$reg),
10269             $src2$$constant & 0x1f);
10270   %}
10271 
10272   ins_pipe(ialu_reg_shift);
10273 %}
10274 
10275 // Combined Int Mask and Right Shift (using UBFM)
10276 // TODO
10277 
10278 // Long Shifts
10279 
10280 // Shift Left Register
10281 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10282   match(Set dst (LShiftL src1 src2));
10283 
10284   ins_cost(INSN_COST * 2);
10285   format %{ "lslv  $dst, $src1, $src2" %}
10286 
10287   ins_encode %{
10288     __ lslv(as_Register($dst$$reg),
10289             as_Register($src1$$reg),
10290             as_Register($src2$$reg));
10291   %}
10292 
10293   ins_pipe(ialu_reg_reg_vshift);
10294 %}
10295 
10296 // Shift Left Immediate
10297 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10298   match(Set dst (LShiftL src1 src2));
10299 
10300   ins_cost(INSN_COST);
10301   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10302 
10303   ins_encode %{
10304     __ lsl(as_Register($dst$$reg),
10305             as_Register($src1$$reg),
10306             $src2$$constant & 0x3f);
10307   %}
10308 
10309   ins_pipe(ialu_reg_shift);
10310 %}
10311 
10312 // Shift Right Logical Register
10313 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10314   match(Set dst (URShiftL src1 src2));
10315 
10316   ins_cost(INSN_COST * 2);
10317   format %{ "lsrv  $dst, $src1, $src2" %}
10318 
10319   ins_encode %{
10320     __ lsrv(as_Register($dst$$reg),
10321             as_Register($src1$$reg),
10322             as_Register($src2$$reg));
10323   %}
10324 
10325   ins_pipe(ialu_reg_reg_vshift);
10326 %}
10327 
10328 // Shift Right Logical Immediate
10329 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10330   match(Set dst (URShiftL src1 src2));
10331 
10332   ins_cost(INSN_COST);
10333   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10334 
10335   ins_encode %{
10336     __ lsr(as_Register($dst$$reg),
10337            as_Register($src1$$reg),
10338            $src2$$constant & 0x3f);
10339   %}
10340 
10341   ins_pipe(ialu_reg_shift);
10342 %}
10343 
10344 // A special-case pattern for card table stores.
10345 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10346   match(Set dst (URShiftL (CastP2X src1) src2));
10347 
10348   ins_cost(INSN_COST);
10349   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10350 
10351   ins_encode %{
10352     __ lsr(as_Register($dst$$reg),
10353            as_Register($src1$$reg),
10354            $src2$$constant & 0x3f);
10355   %}
10356 
10357   ins_pipe(ialu_reg_shift);
10358 %}
10359 
10360 // Shift Right Arithmetic Register
10361 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10362   match(Set dst (RShiftL src1 src2));
10363 
10364   ins_cost(INSN_COST * 2);
10365   format %{ "asrv  $dst, $src1, $src2" %}
10366 
10367   ins_encode %{
10368     __ asrv(as_Register($dst$$reg),
10369             as_Register($src1$$reg),
10370             as_Register($src2$$reg));
10371   %}
10372 
10373   ins_pipe(ialu_reg_reg_vshift);
10374 %}
10375 
10376 // Shift Right Arithmetic Immediate
10377 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10378   match(Set dst (RShiftL src1 src2));
10379 
10380   ins_cost(INSN_COST);
10381   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10382 
10383   ins_encode %{
10384     __ asr(as_Register($dst$$reg),
10385            as_Register($src1$$reg),
10386            $src2$$constant & 0x3f);
10387   %}
10388 
10389   ins_pipe(ialu_reg_shift);
10390 %}
10391 
10392 // BEGIN This section of the file is automatically generated. Do not edit --------------
10393 
10394 instruct regL_not_reg(iRegLNoSp dst,
10395                          iRegL src1, immL_M1 m1,
10396                          rFlagsReg cr) %{
10397   match(Set dst (XorL src1 m1));
10398   ins_cost(INSN_COST);
10399   format %{ "eon  $dst, $src1, zr" %}
10400 
10401   ins_encode %{
10402     __ eon(as_Register($dst$$reg),
10403               as_Register($src1$$reg),
10404               zr,
10405               Assembler::LSL, 0);
10406   %}
10407 
10408   ins_pipe(ialu_reg);
10409 %}
10410 instruct regI_not_reg(iRegINoSp dst,
10411                          iRegIorL2I src1, immI_M1 m1,
10412                          rFlagsReg cr) %{
10413   match(Set dst (XorI src1 m1));
10414   ins_cost(INSN_COST);
10415   format %{ "eonw  $dst, $src1, zr" %}
10416 
10417   ins_encode %{
10418     __ eonw(as_Register($dst$$reg),
10419               as_Register($src1$$reg),
10420               zr,
10421               Assembler::LSL, 0);
10422   %}
10423 
10424   ins_pipe(ialu_reg);
10425 %}
10426 
10427 instruct AndI_reg_not_reg(iRegINoSp dst,
10428                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10429                          rFlagsReg cr) %{
10430   match(Set dst (AndI src1 (XorI src2 m1)));
10431   ins_cost(INSN_COST);
10432   format %{ "bicw  $dst, $src1, $src2" %}
10433 
10434   ins_encode %{
10435     __ bicw(as_Register($dst$$reg),
10436               as_Register($src1$$reg),
10437               as_Register($src2$$reg),
10438               Assembler::LSL, 0);
10439   %}
10440 
10441   ins_pipe(ialu_reg_reg);
10442 %}
10443 
10444 instruct AndL_reg_not_reg(iRegLNoSp dst,
10445                          iRegL src1, iRegL src2, immL_M1 m1,
10446                          rFlagsReg cr) %{
10447   match(Set dst (AndL src1 (XorL src2 m1)));
10448   ins_cost(INSN_COST);
10449   format %{ "bic  $dst, $src1, $src2" %}
10450 
10451   ins_encode %{
10452     __ bic(as_Register($dst$$reg),
10453               as_Register($src1$$reg),
10454               as_Register($src2$$reg),
10455               Assembler::LSL, 0);
10456   %}
10457 
10458   ins_pipe(ialu_reg_reg);
10459 %}
10460 
10461 instruct OrI_reg_not_reg(iRegINoSp dst,
10462                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10463                          rFlagsReg cr) %{
10464   match(Set dst (OrI src1 (XorI src2 m1)));
10465   ins_cost(INSN_COST);
10466   format %{ "ornw  $dst, $src1, $src2" %}
10467 
10468   ins_encode %{
10469     __ ornw(as_Register($dst$$reg),
10470               as_Register($src1$$reg),
10471               as_Register($src2$$reg),
10472               Assembler::LSL, 0);
10473   %}
10474 
10475   ins_pipe(ialu_reg_reg);
10476 %}
10477 
10478 instruct OrL_reg_not_reg(iRegLNoSp dst,
10479                          iRegL src1, iRegL src2, immL_M1 m1,
10480                          rFlagsReg cr) %{
10481   match(Set dst (OrL src1 (XorL src2 m1)));
10482   ins_cost(INSN_COST);
10483   format %{ "orn  $dst, $src1, $src2" %}
10484 
10485   ins_encode %{
10486     __ orn(as_Register($dst$$reg),
10487               as_Register($src1$$reg),
10488               as_Register($src2$$reg),
10489               Assembler::LSL, 0);
10490   %}
10491 
10492   ins_pipe(ialu_reg_reg);
10493 %}
10494 
10495 instruct XorI_reg_not_reg(iRegINoSp dst,
10496                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10497                          rFlagsReg cr) %{
10498   match(Set dst (XorI m1 (XorI src2 src1)));
10499   ins_cost(INSN_COST);
10500   format %{ "eonw  $dst, $src1, $src2" %}
10501 
10502   ins_encode %{
10503     __ eonw(as_Register($dst$$reg),
10504               as_Register($src1$$reg),
10505               as_Register($src2$$reg),
10506               Assembler::LSL, 0);
10507   %}
10508 
10509   ins_pipe(ialu_reg_reg);
10510 %}
10511 
10512 instruct XorL_reg_not_reg(iRegLNoSp dst,
10513                          iRegL src1, iRegL src2, immL_M1 m1,
10514                          rFlagsReg cr) %{
10515   match(Set dst (XorL m1 (XorL src2 src1)));
10516   ins_cost(INSN_COST);
10517   format %{ "eon  $dst, $src1, $src2" %}
10518 
10519   ins_encode %{
10520     __ eon(as_Register($dst$$reg),
10521               as_Register($src1$$reg),
10522               as_Register($src2$$reg),
10523               Assembler::LSL, 0);
10524   %}
10525 
10526   ins_pipe(ialu_reg_reg);
10527 %}
10528 
10529 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10530                          iRegIorL2I src1, iRegIorL2I src2,
10531                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10532   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10533   ins_cost(1.9 * INSN_COST);
10534   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10535 
10536   ins_encode %{
10537     __ bicw(as_Register($dst$$reg),
10538               as_Register($src1$$reg),
10539               as_Register($src2$$reg),
10540               Assembler::LSR,
10541               $src3$$constant & 0x1f);
10542   %}
10543 
10544   ins_pipe(ialu_reg_reg_shift);
10545 %}
10546 
10547 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10548                          iRegL src1, iRegL src2,
10549                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10550   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10551   ins_cost(1.9 * INSN_COST);
10552   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10553 
10554   ins_encode %{
10555     __ bic(as_Register($dst$$reg),
10556               as_Register($src1$$reg),
10557               as_Register($src2$$reg),
10558               Assembler::LSR,
10559               $src3$$constant & 0x3f);
10560   %}
10561 
10562   ins_pipe(ialu_reg_reg_shift);
10563 %}
10564 
10565 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10566                          iRegIorL2I src1, iRegIorL2I src2,
10567                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10568   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10569   ins_cost(1.9 * INSN_COST);
10570   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10571 
10572   ins_encode %{
10573     __ bicw(as_Register($dst$$reg),
10574               as_Register($src1$$reg),
10575               as_Register($src2$$reg),
10576               Assembler::ASR,
10577               $src3$$constant & 0x1f);
10578   %}
10579 
10580   ins_pipe(ialu_reg_reg_shift);
10581 %}
10582 
10583 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10584                          iRegL src1, iRegL src2,
10585                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10586   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10587   ins_cost(1.9 * INSN_COST);
10588   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10589 
10590   ins_encode %{
10591     __ bic(as_Register($dst$$reg),
10592               as_Register($src1$$reg),
10593               as_Register($src2$$reg),
10594               Assembler::ASR,
10595               $src3$$constant & 0x3f);
10596   %}
10597 
10598   ins_pipe(ialu_reg_reg_shift);
10599 %}
10600 
10601 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10602                          iRegIorL2I src1, iRegIorL2I src2,
10603                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10604   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10605   ins_cost(1.9 * INSN_COST);
10606   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10607 
10608   ins_encode %{
10609     __ bicw(as_Register($dst$$reg),
10610               as_Register($src1$$reg),
10611               as_Register($src2$$reg),
10612               Assembler::LSL,
10613               $src3$$constant & 0x1f);
10614   %}
10615 
10616   ins_pipe(ialu_reg_reg_shift);
10617 %}
10618 
10619 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10620                          iRegL src1, iRegL src2,
10621                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10622   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10623   ins_cost(1.9 * INSN_COST);
10624   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10625 
10626   ins_encode %{
10627     __ bic(as_Register($dst$$reg),
10628               as_Register($src1$$reg),
10629               as_Register($src2$$reg),
10630               Assembler::LSL,
10631               $src3$$constant & 0x3f);
10632   %}
10633 
10634   ins_pipe(ialu_reg_reg_shift);
10635 %}
10636 
10637 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10638                          iRegIorL2I src1, iRegIorL2I src2,
10639                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10640   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10641   ins_cost(1.9 * INSN_COST);
10642   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10643 
10644   ins_encode %{
10645     __ eonw(as_Register($dst$$reg),
10646               as_Register($src1$$reg),
10647               as_Register($src2$$reg),
10648               Assembler::LSR,
10649               $src3$$constant & 0x1f);
10650   %}
10651 
10652   ins_pipe(ialu_reg_reg_shift);
10653 %}
10654 
10655 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10656                          iRegL src1, iRegL src2,
10657                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10658   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10659   ins_cost(1.9 * INSN_COST);
10660   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10661 
10662   ins_encode %{
10663     __ eon(as_Register($dst$$reg),
10664               as_Register($src1$$reg),
10665               as_Register($src2$$reg),
10666               Assembler::LSR,
10667               $src3$$constant & 0x3f);
10668   %}
10669 
10670   ins_pipe(ialu_reg_reg_shift);
10671 %}
10672 
10673 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10674                          iRegIorL2I src1, iRegIorL2I src2,
10675                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10676   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10677   ins_cost(1.9 * INSN_COST);
10678   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10679 
10680   ins_encode %{
10681     __ eonw(as_Register($dst$$reg),
10682               as_Register($src1$$reg),
10683               as_Register($src2$$reg),
10684               Assembler::ASR,
10685               $src3$$constant & 0x1f);
10686   %}
10687 
10688   ins_pipe(ialu_reg_reg_shift);
10689 %}
10690 
10691 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10692                          iRegL src1, iRegL src2,
10693                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10694   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10695   ins_cost(1.9 * INSN_COST);
10696   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10697 
10698   ins_encode %{
10699     __ eon(as_Register($dst$$reg),
10700               as_Register($src1$$reg),
10701               as_Register($src2$$reg),
10702               Assembler::ASR,
10703               $src3$$constant & 0x3f);
10704   %}
10705 
10706   ins_pipe(ialu_reg_reg_shift);
10707 %}
10708 
10709 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10710                          iRegIorL2I src1, iRegIorL2I src2,
10711                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10712   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10713   ins_cost(1.9 * INSN_COST);
10714   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10715 
10716   ins_encode %{
10717     __ eonw(as_Register($dst$$reg),
10718               as_Register($src1$$reg),
10719               as_Register($src2$$reg),
10720               Assembler::LSL,
10721               $src3$$constant & 0x1f);
10722   %}
10723 
10724   ins_pipe(ialu_reg_reg_shift);
10725 %}
10726 
10727 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10728                          iRegL src1, iRegL src2,
10729                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10730   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10731   ins_cost(1.9 * INSN_COST);
10732   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10733 
10734   ins_encode %{
10735     __ eon(as_Register($dst$$reg),
10736               as_Register($src1$$reg),
10737               as_Register($src2$$reg),
10738               Assembler::LSL,
10739               $src3$$constant & 0x3f);
10740   %}
10741 
10742   ins_pipe(ialu_reg_reg_shift);
10743 %}
10744 
10745 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10746                          iRegIorL2I src1, iRegIorL2I src2,
10747                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10748   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10749   ins_cost(1.9 * INSN_COST);
10750   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10751 
10752   ins_encode %{
10753     __ ornw(as_Register($dst$$reg),
10754               as_Register($src1$$reg),
10755               as_Register($src2$$reg),
10756               Assembler::LSR,
10757               $src3$$constant & 0x1f);
10758   %}
10759 
10760   ins_pipe(ialu_reg_reg_shift);
10761 %}
10762 
10763 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10764                          iRegL src1, iRegL src2,
10765                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10766   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10767   ins_cost(1.9 * INSN_COST);
10768   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10769 
10770   ins_encode %{
10771     __ orn(as_Register($dst$$reg),
10772               as_Register($src1$$reg),
10773               as_Register($src2$$reg),
10774               Assembler::LSR,
10775               $src3$$constant & 0x3f);
10776   %}
10777 
10778   ins_pipe(ialu_reg_reg_shift);
10779 %}
10780 
10781 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10782                          iRegIorL2I src1, iRegIorL2I src2,
10783                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10784   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10785   ins_cost(1.9 * INSN_COST);
10786   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10787 
10788   ins_encode %{
10789     __ ornw(as_Register($dst$$reg),
10790               as_Register($src1$$reg),
10791               as_Register($src2$$reg),
10792               Assembler::ASR,
10793               $src3$$constant & 0x1f);
10794   %}
10795 
10796   ins_pipe(ialu_reg_reg_shift);
10797 %}
10798 
10799 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10800                          iRegL src1, iRegL src2,
10801                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10802   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10803   ins_cost(1.9 * INSN_COST);
10804   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10805 
10806   ins_encode %{
10807     __ orn(as_Register($dst$$reg),
10808               as_Register($src1$$reg),
10809               as_Register($src2$$reg),
10810               Assembler::ASR,
10811               $src3$$constant & 0x3f);
10812   %}
10813 
10814   ins_pipe(ialu_reg_reg_shift);
10815 %}
10816 
10817 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10818                          iRegIorL2I src1, iRegIorL2I src2,
10819                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10820   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10821   ins_cost(1.9 * INSN_COST);
10822   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10823 
10824   ins_encode %{
10825     __ ornw(as_Register($dst$$reg),
10826               as_Register($src1$$reg),
10827               as_Register($src2$$reg),
10828               Assembler::LSL,
10829               $src3$$constant & 0x1f);
10830   %}
10831 
10832   ins_pipe(ialu_reg_reg_shift);
10833 %}
10834 
10835 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10836                          iRegL src1, iRegL src2,
10837                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10838   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10839   ins_cost(1.9 * INSN_COST);
10840   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10841 
10842   ins_encode %{
10843     __ orn(as_Register($dst$$reg),
10844               as_Register($src1$$reg),
10845               as_Register($src2$$reg),
10846               Assembler::LSL,
10847               $src3$$constant & 0x3f);
10848   %}
10849 
10850   ins_pipe(ialu_reg_reg_shift);
10851 %}
10852 
10853 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10854                          iRegIorL2I src1, iRegIorL2I src2,
10855                          immI src3, rFlagsReg cr) %{
10856   match(Set dst (AndI src1 (URShiftI src2 src3)));
10857 
10858   ins_cost(1.9 * INSN_COST);
10859   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10860 
10861   ins_encode %{
10862     __ andw(as_Register($dst$$reg),
10863               as_Register($src1$$reg),
10864               as_Register($src2$$reg),
10865               Assembler::LSR,
10866               $src3$$constant & 0x1f);
10867   %}
10868 
10869   ins_pipe(ialu_reg_reg_shift);
10870 %}
10871 
10872 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10873                          iRegL src1, iRegL src2,
10874                          immI src3, rFlagsReg cr) %{
10875   match(Set dst (AndL src1 (URShiftL src2 src3)));
10876 
10877   ins_cost(1.9 * INSN_COST);
10878   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10879 
10880   ins_encode %{
10881     __ andr(as_Register($dst$$reg),
10882               as_Register($src1$$reg),
10883               as_Register($src2$$reg),
10884               Assembler::LSR,
10885               $src3$$constant & 0x3f);
10886   %}
10887 
10888   ins_pipe(ialu_reg_reg_shift);
10889 %}
10890 
10891 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10892                          iRegIorL2I src1, iRegIorL2I src2,
10893                          immI src3, rFlagsReg cr) %{
10894   match(Set dst (AndI src1 (RShiftI src2 src3)));
10895 
10896   ins_cost(1.9 * INSN_COST);
10897   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10898 
10899   ins_encode %{
10900     __ andw(as_Register($dst$$reg),
10901               as_Register($src1$$reg),
10902               as_Register($src2$$reg),
10903               Assembler::ASR,
10904               $src3$$constant & 0x1f);
10905   %}
10906 
10907   ins_pipe(ialu_reg_reg_shift);
10908 %}
10909 
10910 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10911                          iRegL src1, iRegL src2,
10912                          immI src3, rFlagsReg cr) %{
10913   match(Set dst (AndL src1 (RShiftL src2 src3)));
10914 
10915   ins_cost(1.9 * INSN_COST);
10916   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10917 
10918   ins_encode %{
10919     __ andr(as_Register($dst$$reg),
10920               as_Register($src1$$reg),
10921               as_Register($src2$$reg),
10922               Assembler::ASR,
10923               $src3$$constant & 0x3f);
10924   %}
10925 
10926   ins_pipe(ialu_reg_reg_shift);
10927 %}
10928 
10929 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10930                          iRegIorL2I src1, iRegIorL2I src2,
10931                          immI src3, rFlagsReg cr) %{
10932   match(Set dst (AndI src1 (LShiftI src2 src3)));
10933 
10934   ins_cost(1.9 * INSN_COST);
10935   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10936 
10937   ins_encode %{
10938     __ andw(as_Register($dst$$reg),
10939               as_Register($src1$$reg),
10940               as_Register($src2$$reg),
10941               Assembler::LSL,
10942               $src3$$constant & 0x1f);
10943   %}
10944 
10945   ins_pipe(ialu_reg_reg_shift);
10946 %}
10947 
10948 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10949                          iRegL src1, iRegL src2,
10950                          immI src3, rFlagsReg cr) %{
10951   match(Set dst (AndL src1 (LShiftL src2 src3)));
10952 
10953   ins_cost(1.9 * INSN_COST);
10954   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10955 
10956   ins_encode %{
10957     __ andr(as_Register($dst$$reg),
10958               as_Register($src1$$reg),
10959               as_Register($src2$$reg),
10960               Assembler::LSL,
10961               $src3$$constant & 0x3f);
10962   %}
10963 
10964   ins_pipe(ialu_reg_reg_shift);
10965 %}
10966 
10967 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10968                          iRegIorL2I src1, iRegIorL2I src2,
10969                          immI src3, rFlagsReg cr) %{
10970   match(Set dst (XorI src1 (URShiftI src2 src3)));
10971 
10972   ins_cost(1.9 * INSN_COST);
10973   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10974 
10975   ins_encode %{
10976     __ eorw(as_Register($dst$$reg),
10977               as_Register($src1$$reg),
10978               as_Register($src2$$reg),
10979               Assembler::LSR,
10980               $src3$$constant & 0x1f);
10981   %}
10982 
10983   ins_pipe(ialu_reg_reg_shift);
10984 %}
10985 
10986 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10987                          iRegL src1, iRegL src2,
10988                          immI src3, rFlagsReg cr) %{
10989   match(Set dst (XorL src1 (URShiftL src2 src3)));
10990 
10991   ins_cost(1.9 * INSN_COST);
10992   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10993 
10994   ins_encode %{
10995     __ eor(as_Register($dst$$reg),
10996               as_Register($src1$$reg),
10997               as_Register($src2$$reg),
10998               Assembler::LSR,
10999               $src3$$constant & 0x3f);
11000   %}
11001 
11002   ins_pipe(ialu_reg_reg_shift);
11003 %}
11004 
11005 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11006                          iRegIorL2I src1, iRegIorL2I src2,
11007                          immI src3, rFlagsReg cr) %{
11008   match(Set dst (XorI src1 (RShiftI src2 src3)));
11009 
11010   ins_cost(1.9 * INSN_COST);
11011   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11012 
11013   ins_encode %{
11014     __ eorw(as_Register($dst$$reg),
11015               as_Register($src1$$reg),
11016               as_Register($src2$$reg),
11017               Assembler::ASR,
11018               $src3$$constant & 0x1f);
11019   %}
11020 
11021   ins_pipe(ialu_reg_reg_shift);
11022 %}
11023 
11024 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11025                          iRegL src1, iRegL src2,
11026                          immI src3, rFlagsReg cr) %{
11027   match(Set dst (XorL src1 (RShiftL src2 src3)));
11028 
11029   ins_cost(1.9 * INSN_COST);
11030   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11031 
11032   ins_encode %{
11033     __ eor(as_Register($dst$$reg),
11034               as_Register($src1$$reg),
11035               as_Register($src2$$reg),
11036               Assembler::ASR,
11037               $src3$$constant & 0x3f);
11038   %}
11039 
11040   ins_pipe(ialu_reg_reg_shift);
11041 %}
11042 
11043 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11044                          iRegIorL2I src1, iRegIorL2I src2,
11045                          immI src3, rFlagsReg cr) %{
11046   match(Set dst (XorI src1 (LShiftI src2 src3)));
11047 
11048   ins_cost(1.9 * INSN_COST);
11049   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11050 
11051   ins_encode %{
11052     __ eorw(as_Register($dst$$reg),
11053               as_Register($src1$$reg),
11054               as_Register($src2$$reg),
11055               Assembler::LSL,
11056               $src3$$constant & 0x1f);
11057   %}
11058 
11059   ins_pipe(ialu_reg_reg_shift);
11060 %}
11061 
11062 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11063                          iRegL src1, iRegL src2,
11064                          immI src3, rFlagsReg cr) %{
11065   match(Set dst (XorL src1 (LShiftL src2 src3)));
11066 
11067   ins_cost(1.9 * INSN_COST);
11068   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11069 
11070   ins_encode %{
11071     __ eor(as_Register($dst$$reg),
11072               as_Register($src1$$reg),
11073               as_Register($src2$$reg),
11074               Assembler::LSL,
11075               $src3$$constant & 0x3f);
11076   %}
11077 
11078   ins_pipe(ialu_reg_reg_shift);
11079 %}
11080 
11081 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11082                          iRegIorL2I src1, iRegIorL2I src2,
11083                          immI src3, rFlagsReg cr) %{
11084   match(Set dst (OrI src1 (URShiftI src2 src3)));
11085 
11086   ins_cost(1.9 * INSN_COST);
11087   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11088 
11089   ins_encode %{
11090     __ orrw(as_Register($dst$$reg),
11091               as_Register($src1$$reg),
11092               as_Register($src2$$reg),
11093               Assembler::LSR,
11094               $src3$$constant & 0x1f);
11095   %}
11096 
11097   ins_pipe(ialu_reg_reg_shift);
11098 %}
11099 
11100 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11101                          iRegL src1, iRegL src2,
11102                          immI src3, rFlagsReg cr) %{
11103   match(Set dst (OrL src1 (URShiftL src2 src3)));
11104 
11105   ins_cost(1.9 * INSN_COST);
11106   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11107 
11108   ins_encode %{
11109     __ orr(as_Register($dst$$reg),
11110               as_Register($src1$$reg),
11111               as_Register($src2$$reg),
11112               Assembler::LSR,
11113               $src3$$constant & 0x3f);
11114   %}
11115 
11116   ins_pipe(ialu_reg_reg_shift);
11117 %}
11118 
11119 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11120                          iRegIorL2I src1, iRegIorL2I src2,
11121                          immI src3, rFlagsReg cr) %{
11122   match(Set dst (OrI src1 (RShiftI src2 src3)));
11123 
11124   ins_cost(1.9 * INSN_COST);
11125   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11126 
11127   ins_encode %{
11128     __ orrw(as_Register($dst$$reg),
11129               as_Register($src1$$reg),
11130               as_Register($src2$$reg),
11131               Assembler::ASR,
11132               $src3$$constant & 0x1f);
11133   %}
11134 
11135   ins_pipe(ialu_reg_reg_shift);
11136 %}
11137 
11138 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11139                          iRegL src1, iRegL src2,
11140                          immI src3, rFlagsReg cr) %{
11141   match(Set dst (OrL src1 (RShiftL src2 src3)));
11142 
11143   ins_cost(1.9 * INSN_COST);
11144   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11145 
11146   ins_encode %{
11147     __ orr(as_Register($dst$$reg),
11148               as_Register($src1$$reg),
11149               as_Register($src2$$reg),
11150               Assembler::ASR,
11151               $src3$$constant & 0x3f);
11152   %}
11153 
11154   ins_pipe(ialu_reg_reg_shift);
11155 %}
11156 
11157 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11158                          iRegIorL2I src1, iRegIorL2I src2,
11159                          immI src3, rFlagsReg cr) %{
11160   match(Set dst (OrI src1 (LShiftI src2 src3)));
11161 
11162   ins_cost(1.9 * INSN_COST);
11163   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11164 
11165   ins_encode %{
11166     __ orrw(as_Register($dst$$reg),
11167               as_Register($src1$$reg),
11168               as_Register($src2$$reg),
11169               Assembler::LSL,
11170               $src3$$constant & 0x1f);
11171   %}
11172 
11173   ins_pipe(ialu_reg_reg_shift);
11174 %}
11175 
11176 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11177                          iRegL src1, iRegL src2,
11178                          immI src3, rFlagsReg cr) %{
11179   match(Set dst (OrL src1 (LShiftL src2 src3)));
11180 
11181   ins_cost(1.9 * INSN_COST);
11182   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11183 
11184   ins_encode %{
11185     __ orr(as_Register($dst$$reg),
11186               as_Register($src1$$reg),
11187               as_Register($src2$$reg),
11188               Assembler::LSL,
11189               $src3$$constant & 0x3f);
11190   %}
11191 
11192   ins_pipe(ialu_reg_reg_shift);
11193 %}
11194 
11195 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11196                          iRegIorL2I src1, iRegIorL2I src2,
11197                          immI src3, rFlagsReg cr) %{
11198   match(Set dst (AddI src1 (URShiftI src2 src3)));
11199 
11200   ins_cost(1.9 * INSN_COST);
11201   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11202 
11203   ins_encode %{
11204     __ addw(as_Register($dst$$reg),
11205               as_Register($src1$$reg),
11206               as_Register($src2$$reg),
11207               Assembler::LSR,
11208               $src3$$constant & 0x1f);
11209   %}
11210 
11211   ins_pipe(ialu_reg_reg_shift);
11212 %}
11213 
11214 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11215                          iRegL src1, iRegL src2,
11216                          immI src3, rFlagsReg cr) %{
11217   match(Set dst (AddL src1 (URShiftL src2 src3)));
11218 
11219   ins_cost(1.9 * INSN_COST);
11220   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11221 
11222   ins_encode %{
11223     __ add(as_Register($dst$$reg),
11224               as_Register($src1$$reg),
11225               as_Register($src2$$reg),
11226               Assembler::LSR,
11227               $src3$$constant & 0x3f);
11228   %}
11229 
11230   ins_pipe(ialu_reg_reg_shift);
11231 %}
11232 
11233 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11234                          iRegIorL2I src1, iRegIorL2I src2,
11235                          immI src3, rFlagsReg cr) %{
11236   match(Set dst (AddI src1 (RShiftI src2 src3)));
11237 
11238   ins_cost(1.9 * INSN_COST);
11239   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11240 
11241   ins_encode %{
11242     __ addw(as_Register($dst$$reg),
11243               as_Register($src1$$reg),
11244               as_Register($src2$$reg),
11245               Assembler::ASR,
11246               $src3$$constant & 0x1f);
11247   %}
11248 
11249   ins_pipe(ialu_reg_reg_shift);
11250 %}
11251 
11252 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11253                          iRegL src1, iRegL src2,
11254                          immI src3, rFlagsReg cr) %{
11255   match(Set dst (AddL src1 (RShiftL src2 src3)));
11256 
11257   ins_cost(1.9 * INSN_COST);
11258   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11259 
11260   ins_encode %{
11261     __ add(as_Register($dst$$reg),
11262               as_Register($src1$$reg),
11263               as_Register($src2$$reg),
11264               Assembler::ASR,
11265               $src3$$constant & 0x3f);
11266   %}
11267 
11268   ins_pipe(ialu_reg_reg_shift);
11269 %}
11270 
11271 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11272                          iRegIorL2I src1, iRegIorL2I src2,
11273                          immI src3, rFlagsReg cr) %{
11274   match(Set dst (AddI src1 (LShiftI src2 src3)));
11275 
11276   ins_cost(1.9 * INSN_COST);
11277   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11278 
11279   ins_encode %{
11280     __ addw(as_Register($dst$$reg),
11281               as_Register($src1$$reg),
11282               as_Register($src2$$reg),
11283               Assembler::LSL,
11284               $src3$$constant & 0x1f);
11285   %}
11286 
11287   ins_pipe(ialu_reg_reg_shift);
11288 %}
11289 
11290 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11291                          iRegL src1, iRegL src2,
11292                          immI src3, rFlagsReg cr) %{
11293   match(Set dst (AddL src1 (LShiftL src2 src3)));
11294 
11295   ins_cost(1.9 * INSN_COST);
11296   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11297 
11298   ins_encode %{
11299     __ add(as_Register($dst$$reg),
11300               as_Register($src1$$reg),
11301               as_Register($src2$$reg),
11302               Assembler::LSL,
11303               $src3$$constant & 0x3f);
11304   %}
11305 
11306   ins_pipe(ialu_reg_reg_shift);
11307 %}
11308 
11309 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11310                          iRegIorL2I src1, iRegIorL2I src2,
11311                          immI src3, rFlagsReg cr) %{
11312   match(Set dst (SubI src1 (URShiftI src2 src3)));
11313 
11314   ins_cost(1.9 * INSN_COST);
11315   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11316 
11317   ins_encode %{
11318     __ subw(as_Register($dst$$reg),
11319               as_Register($src1$$reg),
11320               as_Register($src2$$reg),
11321               Assembler::LSR,
11322               $src3$$constant & 0x1f);
11323   %}
11324 
11325   ins_pipe(ialu_reg_reg_shift);
11326 %}
11327 
11328 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11329                          iRegL src1, iRegL src2,
11330                          immI src3, rFlagsReg cr) %{
11331   match(Set dst (SubL src1 (URShiftL src2 src3)));
11332 
11333   ins_cost(1.9 * INSN_COST);
11334   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11335 
11336   ins_encode %{
11337     __ sub(as_Register($dst$$reg),
11338               as_Register($src1$$reg),
11339               as_Register($src2$$reg),
11340               Assembler::LSR,
11341               $src3$$constant & 0x3f);
11342   %}
11343 
11344   ins_pipe(ialu_reg_reg_shift);
11345 %}
11346 
11347 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11348                          iRegIorL2I src1, iRegIorL2I src2,
11349                          immI src3, rFlagsReg cr) %{
11350   match(Set dst (SubI src1 (RShiftI src2 src3)));
11351 
11352   ins_cost(1.9 * INSN_COST);
11353   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11354 
11355   ins_encode %{
11356     __ subw(as_Register($dst$$reg),
11357               as_Register($src1$$reg),
11358               as_Register($src2$$reg),
11359               Assembler::ASR,
11360               $src3$$constant & 0x1f);
11361   %}
11362 
11363   ins_pipe(ialu_reg_reg_shift);
11364 %}
11365 
11366 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11367                          iRegL src1, iRegL src2,
11368                          immI src3, rFlagsReg cr) %{
11369   match(Set dst (SubL src1 (RShiftL src2 src3)));
11370 
11371   ins_cost(1.9 * INSN_COST);
11372   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11373 
11374   ins_encode %{
11375     __ sub(as_Register($dst$$reg),
11376               as_Register($src1$$reg),
11377               as_Register($src2$$reg),
11378               Assembler::ASR,
11379               $src3$$constant & 0x3f);
11380   %}
11381 
11382   ins_pipe(ialu_reg_reg_shift);
11383 %}
11384 
11385 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11386                          iRegIorL2I src1, iRegIorL2I src2,
11387                          immI src3, rFlagsReg cr) %{
11388   match(Set dst (SubI src1 (LShiftI src2 src3)));
11389 
11390   ins_cost(1.9 * INSN_COST);
11391   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11392 
11393   ins_encode %{
11394     __ subw(as_Register($dst$$reg),
11395               as_Register($src1$$reg),
11396               as_Register($src2$$reg),
11397               Assembler::LSL,
11398               $src3$$constant & 0x1f);
11399   %}
11400 
11401   ins_pipe(ialu_reg_reg_shift);
11402 %}
11403 
11404 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11405                          iRegL src1, iRegL src2,
11406                          immI src3, rFlagsReg cr) %{
11407   match(Set dst (SubL src1 (LShiftL src2 src3)));
11408 
11409   ins_cost(1.9 * INSN_COST);
11410   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11411 
11412   ins_encode %{
11413     __ sub(as_Register($dst$$reg),
11414               as_Register($src1$$reg),
11415               as_Register($src2$$reg),
11416               Assembler::LSL,
11417               $src3$$constant & 0x3f);
11418   %}
11419 
11420   ins_pipe(ialu_reg_reg_shift);
11421 %}
11422 
11423 
11424 
11425 // Shift Left followed by Shift Right.
11426 // This idiom is used by the compiler for the i2b bytecode etc.
11427 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11428 %{
11429   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11430   // Make sure we are not going to exceed what sbfm can do.
11431   predicate((unsigned int)n->in(2)->get_int() <= 63
11432             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11433 
11434   ins_cost(INSN_COST * 2);
11435   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11436   ins_encode %{
11437     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11438     int s = 63 - lshift;
11439     int r = (rshift - lshift) & 63;
11440     __ sbfm(as_Register($dst$$reg),
11441             as_Register($src$$reg),
11442             r, s);
11443   %}
11444 
11445   ins_pipe(ialu_reg_shift);
11446 %}
11447 
11448 // Shift Left followed by Shift Right.
11449 // This idiom is used by the compiler for the i2b bytecode etc.
11450 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11451 %{
11452   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11453   // Make sure we are not going to exceed what sbfmw can do.
11454   predicate((unsigned int)n->in(2)->get_int() <= 31
11455             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11456 
11457   ins_cost(INSN_COST * 2);
11458   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11459   ins_encode %{
11460     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11461     int s = 31 - lshift;
11462     int r = (rshift - lshift) & 31;
11463     __ sbfmw(as_Register($dst$$reg),
11464             as_Register($src$$reg),
11465             r, s);
11466   %}
11467 
11468   ins_pipe(ialu_reg_shift);
11469 %}
11470 
11471 // Shift Left followed by Shift Right.
11472 // This idiom is used by the compiler for the i2b bytecode etc.
11473 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11474 %{
11475   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11476   // Make sure we are not going to exceed what ubfm can do.
11477   predicate((unsigned int)n->in(2)->get_int() <= 63
11478             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11479 
11480   ins_cost(INSN_COST * 2);
11481   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11482   ins_encode %{
11483     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11484     int s = 63 - lshift;
11485     int r = (rshift - lshift) & 63;
11486     __ ubfm(as_Register($dst$$reg),
11487             as_Register($src$$reg),
11488             r, s);
11489   %}
11490 
11491   ins_pipe(ialu_reg_shift);
11492 %}
11493 
11494 // Shift Left followed by Shift Right.
11495 // This idiom is used by the compiler for the i2b bytecode etc.
11496 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11497 %{
11498   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11499   // Make sure we are not going to exceed what ubfmw can do.
11500   predicate((unsigned int)n->in(2)->get_int() <= 31
11501             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11502 
11503   ins_cost(INSN_COST * 2);
11504   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11505   ins_encode %{
11506     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11507     int s = 31 - lshift;
11508     int r = (rshift - lshift) & 31;
11509     __ ubfmw(as_Register($dst$$reg),
11510             as_Register($src$$reg),
11511             r, s);
11512   %}
11513 
11514   ins_pipe(ialu_reg_shift);
11515 %}
11516 // Bitfield extract with shift & mask
11517 
11518 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11519 %{
11520   match(Set dst (AndI (URShiftI src rshift) mask));
11521 
11522   ins_cost(INSN_COST);
11523   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11524   ins_encode %{
11525     int rshift = $rshift$$constant;
11526     long mask = $mask$$constant;
11527     int width = exact_log2(mask+1);
11528     __ ubfxw(as_Register($dst$$reg),
11529             as_Register($src$$reg), rshift, width);
11530   %}
11531   ins_pipe(ialu_reg_shift);
11532 %}
11533 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11534 %{
11535   match(Set dst (AndL (URShiftL src rshift) mask));
11536 
11537   ins_cost(INSN_COST);
11538   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11539   ins_encode %{
11540     int rshift = $rshift$$constant;
11541     long mask = $mask$$constant;
11542     int width = exact_log2(mask+1);
11543     __ ubfx(as_Register($dst$$reg),
11544             as_Register($src$$reg), rshift, width);
11545   %}
11546   ins_pipe(ialu_reg_shift);
11547 %}
11548 
11549 // We can use ubfx when extending an And with a mask when we know mask
11550 // is positive.  We know that because immI_bitmask guarantees it.
11551 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11552 %{
11553   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11554 
11555   ins_cost(INSN_COST * 2);
11556   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11557   ins_encode %{
11558     int rshift = $rshift$$constant;
11559     long mask = $mask$$constant;
11560     int width = exact_log2(mask+1);
11561     __ ubfx(as_Register($dst$$reg),
11562             as_Register($src$$reg), rshift, width);
11563   %}
11564   ins_pipe(ialu_reg_shift);
11565 %}
11566 
11567 // We can use ubfiz when masking by a positive number and then left shifting the result.
11568 // We know that the mask is positive because immI_bitmask guarantees it.
11569 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11570 %{
11571   match(Set dst (LShiftI (AndI src mask) lshift));
11572   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11573     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
11574 
11575   ins_cost(INSN_COST);
11576   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11577   ins_encode %{
11578     int lshift = $lshift$$constant;
11579     long mask = $mask$$constant;
11580     int width = exact_log2(mask+1);
11581     __ ubfizw(as_Register($dst$$reg),
11582           as_Register($src$$reg), lshift, width);
11583   %}
11584   ins_pipe(ialu_reg_shift);
11585 %}
11586 // We can use ubfiz when masking by a positive number and then left shifting the result.
11587 // We know that the mask is positive because immL_bitmask guarantees it.
11588 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11589 %{
11590   match(Set dst (LShiftL (AndL src mask) lshift));
11591   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
11592     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
11593 
11594   ins_cost(INSN_COST);
11595   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11596   ins_encode %{
11597     int lshift = $lshift$$constant;
11598     long mask = $mask$$constant;
11599     int width = exact_log2(mask+1);
11600     __ ubfiz(as_Register($dst$$reg),
11601           as_Register($src$$reg), lshift, width);
11602   %}
11603   ins_pipe(ialu_reg_shift);
11604 %}
11605 
11606 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11607 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11608 %{
11609   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
11610   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11611     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
11612 
11613   ins_cost(INSN_COST);
11614   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11615   ins_encode %{
11616     int lshift = $lshift$$constant;
11617     long mask = $mask$$constant;
11618     int width = exact_log2(mask+1);
11619     __ ubfiz(as_Register($dst$$reg),
11620              as_Register($src$$reg), lshift, width);
11621   %}
11622   ins_pipe(ialu_reg_shift);
11623 %}
11624 
11625 // Rotations
11626 
11627 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11628 %{
11629   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11630   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11631 
11632   ins_cost(INSN_COST);
11633   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11634 
11635   ins_encode %{
11636     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11637             $rshift$$constant & 63);
11638   %}
11639   ins_pipe(ialu_reg_reg_extr);
11640 %}
11641 
11642 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11643 %{
11644   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11645   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11646 
11647   ins_cost(INSN_COST);
11648   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11649 
11650   ins_encode %{
11651     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11652             $rshift$$constant & 31);
11653   %}
11654   ins_pipe(ialu_reg_reg_extr);
11655 %}
11656 
11657 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11658 %{
11659   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11660   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11661 
11662   ins_cost(INSN_COST);
11663   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11664 
11665   ins_encode %{
11666     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11667             $rshift$$constant & 63);
11668   %}
11669   ins_pipe(ialu_reg_reg_extr);
11670 %}
11671 
11672 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11673 %{
11674   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11675   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11676 
11677   ins_cost(INSN_COST);
11678   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11679 
11680   ins_encode %{
11681     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11682             $rshift$$constant & 31);
11683   %}
11684   ins_pipe(ialu_reg_reg_extr);
11685 %}
11686 
11687 
11688 // rol expander
11689 
11690 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11691 %{
11692   effect(DEF dst, USE src, USE shift);
11693 
11694   format %{ "rol    $dst, $src, $shift" %}
11695   ins_cost(INSN_COST * 3);
11696   ins_encode %{
11697     __ subw(rscratch1, zr, as_Register($shift$$reg));
11698     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11699             rscratch1);
11700     %}
11701   ins_pipe(ialu_reg_reg_vshift);
11702 %}
11703 
11704 // rol expander
11705 
11706 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11707 %{
11708   effect(DEF dst, USE src, USE shift);
11709 
11710   format %{ "rol    $dst, $src, $shift" %}
11711   ins_cost(INSN_COST * 3);
11712   ins_encode %{
11713     __ subw(rscratch1, zr, as_Register($shift$$reg));
11714     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11715             rscratch1);
11716     %}
11717   ins_pipe(ialu_reg_reg_vshift);
11718 %}
11719 
11720 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11721 %{
11722   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11723 
11724   expand %{
11725     rolL_rReg(dst, src, shift, cr);
11726   %}
11727 %}
11728 
11729 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11730 %{
11731   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11732 
11733   expand %{
11734     rolL_rReg(dst, src, shift, cr);
11735   %}
11736 %}
11737 
11738 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11739 %{
11740   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11741 
11742   expand %{
11743     rolI_rReg(dst, src, shift, cr);
11744   %}
11745 %}
11746 
11747 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11748 %{
11749   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11750 
11751   expand %{
11752     rolI_rReg(dst, src, shift, cr);
11753   %}
11754 %}
11755 
11756 // ror expander
11757 
11758 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11759 %{
11760   effect(DEF dst, USE src, USE shift);
11761 
11762   format %{ "ror    $dst, $src, $shift" %}
11763   ins_cost(INSN_COST);
11764   ins_encode %{
11765     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11766             as_Register($shift$$reg));
11767     %}
11768   ins_pipe(ialu_reg_reg_vshift);
11769 %}
11770 
11771 // ror expander
11772 
11773 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11774 %{
11775   effect(DEF dst, USE src, USE shift);
11776 
11777   format %{ "ror    $dst, $src, $shift" %}
11778   ins_cost(INSN_COST);
11779   ins_encode %{
11780     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11781             as_Register($shift$$reg));
11782     %}
11783   ins_pipe(ialu_reg_reg_vshift);
11784 %}
11785 
11786 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11787 %{
11788   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11789 
11790   expand %{
11791     rorL_rReg(dst, src, shift, cr);
11792   %}
11793 %}
11794 
11795 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11796 %{
11797   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11798 
11799   expand %{
11800     rorL_rReg(dst, src, shift, cr);
11801   %}
11802 %}
11803 
11804 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11805 %{
11806   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11807 
11808   expand %{
11809     rorI_rReg(dst, src, shift, cr);
11810   %}
11811 %}
11812 
11813 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11814 %{
11815   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11816 
11817   expand %{
11818     rorI_rReg(dst, src, shift, cr);
11819   %}
11820 %}
11821 
11822 // Add/subtract (extended)
11823 
11824 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11825 %{
11826   match(Set dst (AddL src1 (ConvI2L src2)));
11827   ins_cost(INSN_COST);
11828   format %{ "add  $dst, $src1, $src2, sxtw" %}
11829 
11830    ins_encode %{
11831      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11832             as_Register($src2$$reg), ext::sxtw);
11833    %}
11834   ins_pipe(ialu_reg_reg);
11835 %};
11836 
11837 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11838 %{
11839   match(Set dst (SubL src1 (ConvI2L src2)));
11840   ins_cost(INSN_COST);
11841   format %{ "sub  $dst, $src1, $src2, sxtw" %}
11842 
11843    ins_encode %{
11844      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11845             as_Register($src2$$reg), ext::sxtw);
11846    %}
11847   ins_pipe(ialu_reg_reg);
11848 %};
11849 
11850 
11851 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11852 %{
11853   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11854   ins_cost(INSN_COST);
11855   format %{ "add  $dst, $src1, $src2, sxth" %}
11856 
11857    ins_encode %{
11858      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11859             as_Register($src2$$reg), ext::sxth);
11860    %}
11861   ins_pipe(ialu_reg_reg);
11862 %}
11863 
11864 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11865 %{
11866   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11867   ins_cost(INSN_COST);
11868   format %{ "add  $dst, $src1, $src2, sxtb" %}
11869 
11870    ins_encode %{
11871      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11872             as_Register($src2$$reg), ext::sxtb);
11873    %}
11874   ins_pipe(ialu_reg_reg);
11875 %}
11876 
11877 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11878 %{
11879   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11880   ins_cost(INSN_COST);
11881   format %{ "add  $dst, $src1, $src2, uxtb" %}
11882 
11883    ins_encode %{
11884      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11885             as_Register($src2$$reg), ext::uxtb);
11886    %}
11887   ins_pipe(ialu_reg_reg);
11888 %}
11889 
11890 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11891 %{
11892   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11893   ins_cost(INSN_COST);
11894   format %{ "add  $dst, $src1, $src2, sxth" %}
11895 
11896    ins_encode %{
11897      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11898             as_Register($src2$$reg), ext::sxth);
11899    %}
11900   ins_pipe(ialu_reg_reg);
11901 %}
11902 
11903 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11904 %{
11905   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11906   ins_cost(INSN_COST);
11907   format %{ "add  $dst, $src1, $src2, sxtw" %}
11908 
11909    ins_encode %{
11910      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11911             as_Register($src2$$reg), ext::sxtw);
11912    %}
11913   ins_pipe(ialu_reg_reg);
11914 %}
11915 
11916 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11917 %{
11918   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11919   ins_cost(INSN_COST);
11920   format %{ "add  $dst, $src1, $src2, sxtb" %}
11921 
11922    ins_encode %{
11923      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11924             as_Register($src2$$reg), ext::sxtb);
11925    %}
11926   ins_pipe(ialu_reg_reg);
11927 %}
11928 
11929 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11930 %{
11931   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11932   ins_cost(INSN_COST);
11933   format %{ "add  $dst, $src1, $src2, uxtb" %}
11934 
11935    ins_encode %{
11936      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11937             as_Register($src2$$reg), ext::uxtb);
11938    %}
11939   ins_pipe(ialu_reg_reg);
11940 %}
11941 
11942 
11943 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11944 %{
11945   match(Set dst (AddI src1 (AndI src2 mask)));
11946   ins_cost(INSN_COST);
11947   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11948 
11949    ins_encode %{
11950      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11951             as_Register($src2$$reg), ext::uxtb);
11952    %}
11953   ins_pipe(ialu_reg_reg);
11954 %}
11955 
11956 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11957 %{
11958   match(Set dst (AddI src1 (AndI src2 mask)));
11959   ins_cost(INSN_COST);
11960   format %{ "addw  $dst, $src1, $src2, uxth" %}
11961 
11962    ins_encode %{
11963      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11964             as_Register($src2$$reg), ext::uxth);
11965    %}
11966   ins_pipe(ialu_reg_reg);
11967 %}
11968 
11969 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11970 %{
11971   match(Set dst (AddL src1 (AndL src2 mask)));
11972   ins_cost(INSN_COST);
11973   format %{ "add  $dst, $src1, $src2, uxtb" %}
11974 
11975    ins_encode %{
11976      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11977             as_Register($src2$$reg), ext::uxtb);
11978    %}
11979   ins_pipe(ialu_reg_reg);
11980 %}
11981 
11982 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11983 %{
11984   match(Set dst (AddL src1 (AndL src2 mask)));
11985   ins_cost(INSN_COST);
11986   format %{ "add  $dst, $src1, $src2, uxth" %}
11987 
11988    ins_encode %{
11989      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11990             as_Register($src2$$reg), ext::uxth);
11991    %}
11992   ins_pipe(ialu_reg_reg);
11993 %}
11994 
11995 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11996 %{
11997   match(Set dst (AddL src1 (AndL src2 mask)));
11998   ins_cost(INSN_COST);
11999   format %{ "add  $dst, $src1, $src2, uxtw" %}
12000 
12001    ins_encode %{
12002      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12003             as_Register($src2$$reg), ext::uxtw);
12004    %}
12005   ins_pipe(ialu_reg_reg);
12006 %}
12007 
12008 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12009 %{
12010   match(Set dst (SubI src1 (AndI src2 mask)));
12011   ins_cost(INSN_COST);
12012   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12013 
12014    ins_encode %{
12015      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12016             as_Register($src2$$reg), ext::uxtb);
12017    %}
12018   ins_pipe(ialu_reg_reg);
12019 %}
12020 
12021 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12022 %{
12023   match(Set dst (SubI src1 (AndI src2 mask)));
12024   ins_cost(INSN_COST);
12025   format %{ "subw  $dst, $src1, $src2, uxth" %}
12026 
12027    ins_encode %{
12028      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12029             as_Register($src2$$reg), ext::uxth);
12030    %}
12031   ins_pipe(ialu_reg_reg);
12032 %}
12033 
12034 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12035 %{
12036   match(Set dst (SubL src1 (AndL src2 mask)));
12037   ins_cost(INSN_COST);
12038   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12039 
12040    ins_encode %{
12041      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12042             as_Register($src2$$reg), ext::uxtb);
12043    %}
12044   ins_pipe(ialu_reg_reg);
12045 %}
12046 
12047 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12048 %{
12049   match(Set dst (SubL src1 (AndL src2 mask)));
12050   ins_cost(INSN_COST);
12051   format %{ "sub  $dst, $src1, $src2, uxth" %}
12052 
12053    ins_encode %{
12054      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12055             as_Register($src2$$reg), ext::uxth);
12056    %}
12057   ins_pipe(ialu_reg_reg);
12058 %}
12059 
12060 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12061 %{
12062   match(Set dst (SubL src1 (AndL src2 mask)));
12063   ins_cost(INSN_COST);
12064   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12065 
12066    ins_encode %{
12067      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12068             as_Register($src2$$reg), ext::uxtw);
12069    %}
12070   ins_pipe(ialu_reg_reg);
12071 %}
12072 
12073 
12074 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12075 %{
12076   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12077   ins_cost(1.9 * INSN_COST);
12078   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12079 
12080    ins_encode %{
12081      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12082             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12083    %}
12084   ins_pipe(ialu_reg_reg_shift);
12085 %}
12086 
12087 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12088 %{
12089   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12090   ins_cost(1.9 * INSN_COST);
12091   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12092 
12093    ins_encode %{
12094      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12095             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12096    %}
12097   ins_pipe(ialu_reg_reg_shift);
12098 %}
12099 
12100 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12101 %{
12102   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12103   ins_cost(1.9 * INSN_COST);
12104   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12105 
12106    ins_encode %{
12107      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12108             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12109    %}
12110   ins_pipe(ialu_reg_reg_shift);
12111 %}
12112 
12113 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12114 %{
12115   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12116   ins_cost(1.9 * INSN_COST);
12117   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12118 
12119    ins_encode %{
12120      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12121             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12122    %}
12123   ins_pipe(ialu_reg_reg_shift);
12124 %}
12125 
12126 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12127 %{
12128   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12129   ins_cost(1.9 * INSN_COST);
12130   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12131 
12132    ins_encode %{
12133      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12134             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12135    %}
12136   ins_pipe(ialu_reg_reg_shift);
12137 %}
12138 
12139 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12140 %{
12141   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12142   ins_cost(1.9 * INSN_COST);
12143   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12144 
12145    ins_encode %{
12146      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12147             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12148    %}
12149   ins_pipe(ialu_reg_reg_shift);
12150 %}
12151 
12152 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12153 %{
12154   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12155   ins_cost(1.9 * INSN_COST);
12156   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12157 
12158    ins_encode %{
12159      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12160             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12161    %}
12162   ins_pipe(ialu_reg_reg_shift);
12163 %}
12164 
12165 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12166 %{
12167   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12168   ins_cost(1.9 * INSN_COST);
12169   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12170 
12171    ins_encode %{
12172      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12173             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12174    %}
12175   ins_pipe(ialu_reg_reg_shift);
12176 %}
12177 
12178 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12179 %{
12180   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12181   ins_cost(1.9 * INSN_COST);
12182   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12183 
12184    ins_encode %{
12185      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12186             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12187    %}
12188   ins_pipe(ialu_reg_reg_shift);
12189 %}
12190 
12191 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12192 %{
12193   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12194   ins_cost(1.9 * INSN_COST);
12195   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12196 
12197    ins_encode %{
12198      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12199             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12200    %}
12201   ins_pipe(ialu_reg_reg_shift);
12202 %}
12203 
12204 
12205 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12206 %{
12207   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12208   ins_cost(1.9 * INSN_COST);
12209   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12210 
12211    ins_encode %{
12212      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12213             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12214    %}
12215   ins_pipe(ialu_reg_reg_shift);
12216 %};
12217 
12218 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12219 %{
12220   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12221   ins_cost(1.9 * INSN_COST);
12222   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12223 
12224    ins_encode %{
12225      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12226             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12227    %}
12228   ins_pipe(ialu_reg_reg_shift);
12229 %};
12230 
12231 
12232 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12233 %{
12234   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12235   ins_cost(1.9 * INSN_COST);
12236   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12237 
12238    ins_encode %{
12239      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12240             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12241    %}
12242   ins_pipe(ialu_reg_reg_shift);
12243 %}
12244 
12245 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12246 %{
12247   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12248   ins_cost(1.9 * INSN_COST);
12249   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12250 
12251    ins_encode %{
12252      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12253             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12254    %}
12255   ins_pipe(ialu_reg_reg_shift);
12256 %}
12257 
12258 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12259 %{
12260   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12261   ins_cost(1.9 * INSN_COST);
12262   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12263 
12264    ins_encode %{
12265      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12266             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12267    %}
12268   ins_pipe(ialu_reg_reg_shift);
12269 %}
12270 
12271 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12272 %{
12273   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12274   ins_cost(1.9 * INSN_COST);
12275   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12276 
12277    ins_encode %{
12278      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12279             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12280    %}
12281   ins_pipe(ialu_reg_reg_shift);
12282 %}
12283 
12284 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12285 %{
12286   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12287   ins_cost(1.9 * INSN_COST);
12288   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12289 
12290    ins_encode %{
12291      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12292             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12293    %}
12294   ins_pipe(ialu_reg_reg_shift);
12295 %}
12296 
12297 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12298 %{
12299   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12300   ins_cost(1.9 * INSN_COST);
12301   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12302 
12303    ins_encode %{
12304      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12305             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12306    %}
12307   ins_pipe(ialu_reg_reg_shift);
12308 %}
12309 
12310 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12311 %{
12312   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12313   ins_cost(1.9 * INSN_COST);
12314   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12315 
12316    ins_encode %{
12317      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12318             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12319    %}
12320   ins_pipe(ialu_reg_reg_shift);
12321 %}
12322 
12323 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12324 %{
12325   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12326   ins_cost(1.9 * INSN_COST);
12327   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12328 
12329    ins_encode %{
12330      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12331             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12332    %}
12333   ins_pipe(ialu_reg_reg_shift);
12334 %}
12335 
12336 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12337 %{
12338   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12339   ins_cost(1.9 * INSN_COST);
12340   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12341 
12342    ins_encode %{
12343      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12344             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12345    %}
12346   ins_pipe(ialu_reg_reg_shift);
12347 %}
12348 
12349 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12350 %{
12351   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12352   ins_cost(1.9 * INSN_COST);
12353   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12354 
12355    ins_encode %{
12356      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12357             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12358    %}
12359   ins_pipe(ialu_reg_reg_shift);
12360 %}
12361 // END This section of the file is automatically generated. Do not edit --------------
12362 
12363 // ============================================================================
12364 // Floating Point Arithmetic Instructions
12365 
12366 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12367   match(Set dst (AddF src1 src2));
12368 
12369   ins_cost(INSN_COST * 5);
12370   format %{ "fadds   $dst, $src1, $src2" %}
12371 
12372   ins_encode %{
12373     __ fadds(as_FloatRegister($dst$$reg),
12374              as_FloatRegister($src1$$reg),
12375              as_FloatRegister($src2$$reg));
12376   %}
12377 
12378   ins_pipe(fp_dop_reg_reg_s);
12379 %}
12380 
12381 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12382   match(Set dst (AddD src1 src2));
12383 
12384   ins_cost(INSN_COST * 5);
12385   format %{ "faddd   $dst, $src1, $src2" %}
12386 
12387   ins_encode %{
12388     __ faddd(as_FloatRegister($dst$$reg),
12389              as_FloatRegister($src1$$reg),
12390              as_FloatRegister($src2$$reg));
12391   %}
12392 
12393   ins_pipe(fp_dop_reg_reg_d);
12394 %}
12395 
12396 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12397   match(Set dst (SubF src1 src2));
12398 
12399   ins_cost(INSN_COST * 5);
12400   format %{ "fsubs   $dst, $src1, $src2" %}
12401 
12402   ins_encode %{
12403     __ fsubs(as_FloatRegister($dst$$reg),
12404              as_FloatRegister($src1$$reg),
12405              as_FloatRegister($src2$$reg));
12406   %}
12407 
12408   ins_pipe(fp_dop_reg_reg_s);
12409 %}
12410 
12411 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12412   match(Set dst (SubD src1 src2));
12413 
12414   ins_cost(INSN_COST * 5);
12415   format %{ "fsubd   $dst, $src1, $src2" %}
12416 
12417   ins_encode %{
12418     __ fsubd(as_FloatRegister($dst$$reg),
12419              as_FloatRegister($src1$$reg),
12420              as_FloatRegister($src2$$reg));
12421   %}
12422 
12423   ins_pipe(fp_dop_reg_reg_d);
12424 %}
12425 
12426 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12427   match(Set dst (MulF src1 src2));
12428 
12429   ins_cost(INSN_COST * 6);
12430   format %{ "fmuls   $dst, $src1, $src2" %}
12431 
12432   ins_encode %{
12433     __ fmuls(as_FloatRegister($dst$$reg),
12434              as_FloatRegister($src1$$reg),
12435              as_FloatRegister($src2$$reg));
12436   %}
12437 
12438   ins_pipe(fp_dop_reg_reg_s);
12439 %}
12440 
12441 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12442   match(Set dst (MulD src1 src2));
12443 
12444   ins_cost(INSN_COST * 6);
12445   format %{ "fmuld   $dst, $src1, $src2" %}
12446 
12447   ins_encode %{
12448     __ fmuld(as_FloatRegister($dst$$reg),
12449              as_FloatRegister($src1$$reg),
12450              as_FloatRegister($src2$$reg));
12451   %}
12452 
12453   ins_pipe(fp_dop_reg_reg_d);
12454 %}
12455 
12456 // src1 * src2 + src3
12457 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12458   predicate(UseFMA);
12459   match(Set dst (FmaF src3 (Binary src1 src2)));
12460 
12461   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12462 
12463   ins_encode %{
12464     __ fmadds(as_FloatRegister($dst$$reg),
12465              as_FloatRegister($src1$$reg),
12466              as_FloatRegister($src2$$reg),
12467              as_FloatRegister($src3$$reg));
12468   %}
12469 
12470   ins_pipe(pipe_class_default);
12471 %}
12472 
12473 // src1 * src2 + src3
12474 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12475   predicate(UseFMA);
12476   match(Set dst (FmaD src3 (Binary src1 src2)));
12477 
12478   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12479 
12480   ins_encode %{
12481     __ fmaddd(as_FloatRegister($dst$$reg),
12482              as_FloatRegister($src1$$reg),
12483              as_FloatRegister($src2$$reg),
12484              as_FloatRegister($src3$$reg));
12485   %}
12486 
12487   ins_pipe(pipe_class_default);
12488 %}
12489 
12490 // -src1 * src2 + src3
12491 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12492   predicate(UseFMA);
12493   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12494   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12495 
12496   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12497 
12498   ins_encode %{
12499     __ fmsubs(as_FloatRegister($dst$$reg),
12500               as_FloatRegister($src1$$reg),
12501               as_FloatRegister($src2$$reg),
12502               as_FloatRegister($src3$$reg));
12503   %}
12504 
12505   ins_pipe(pipe_class_default);
12506 %}
12507 
12508 // -src1 * src2 + src3
12509 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12510   predicate(UseFMA);
12511   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12512   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12513 
12514   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12515 
12516   ins_encode %{
12517     __ fmsubd(as_FloatRegister($dst$$reg),
12518               as_FloatRegister($src1$$reg),
12519               as_FloatRegister($src2$$reg),
12520               as_FloatRegister($src3$$reg));
12521   %}
12522 
12523   ins_pipe(pipe_class_default);
12524 %}
12525 
12526 // -src1 * src2 - src3
12527 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12528   predicate(UseFMA);
12529   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12530   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12531 
12532   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12533 
12534   ins_encode %{
12535     __ fnmadds(as_FloatRegister($dst$$reg),
12536                as_FloatRegister($src1$$reg),
12537                as_FloatRegister($src2$$reg),
12538                as_FloatRegister($src3$$reg));
12539   %}
12540 
12541   ins_pipe(pipe_class_default);
12542 %}
12543 
12544 // -src1 * src2 - src3
12545 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12546   predicate(UseFMA);
12547   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12548   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12549 
12550   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12551 
12552   ins_encode %{
12553     __ fnmaddd(as_FloatRegister($dst$$reg),
12554                as_FloatRegister($src1$$reg),
12555                as_FloatRegister($src2$$reg),
12556                as_FloatRegister($src3$$reg));
12557   %}
12558 
12559   ins_pipe(pipe_class_default);
12560 %}
12561 
12562 // src1 * src2 - src3
12563 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12564   predicate(UseFMA);
12565   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12566 
12567   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12568 
12569   ins_encode %{
12570     __ fnmsubs(as_FloatRegister($dst$$reg),
12571                as_FloatRegister($src1$$reg),
12572                as_FloatRegister($src2$$reg),
12573                as_FloatRegister($src3$$reg));
12574   %}
12575 
12576   ins_pipe(pipe_class_default);
12577 %}
12578 
12579 // src1 * src2 - src3
12580 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12581   predicate(UseFMA);
12582   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12583 
12584   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12585 
12586   ins_encode %{
12587   // n.b. insn name should be fnmsubd
12588     __ fnmsub(as_FloatRegister($dst$$reg),
12589               as_FloatRegister($src1$$reg),
12590               as_FloatRegister($src2$$reg),
12591               as_FloatRegister($src3$$reg));
12592   %}
12593 
12594   ins_pipe(pipe_class_default);
12595 %}
12596 
12597 
12598 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12599   match(Set dst (DivF src1  src2));
12600 
12601   ins_cost(INSN_COST * 18);
12602   format %{ "fdivs   $dst, $src1, $src2" %}
12603 
12604   ins_encode %{
12605     __ fdivs(as_FloatRegister($dst$$reg),
12606              as_FloatRegister($src1$$reg),
12607              as_FloatRegister($src2$$reg));
12608   %}
12609 
12610   ins_pipe(fp_div_s);
12611 %}
12612 
12613 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12614   match(Set dst (DivD src1  src2));
12615 
12616   ins_cost(INSN_COST * 32);
12617   format %{ "fdivd   $dst, $src1, $src2" %}
12618 
12619   ins_encode %{
12620     __ fdivd(as_FloatRegister($dst$$reg),
12621              as_FloatRegister($src1$$reg),
12622              as_FloatRegister($src2$$reg));
12623   %}
12624 
12625   ins_pipe(fp_div_d);
12626 %}
12627 
12628 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12629   match(Set dst (NegF src));
12630 
12631   ins_cost(INSN_COST * 3);
12632   format %{ "fneg   $dst, $src" %}
12633 
12634   ins_encode %{
12635     __ fnegs(as_FloatRegister($dst$$reg),
12636              as_FloatRegister($src$$reg));
12637   %}
12638 
12639   ins_pipe(fp_uop_s);
12640 %}
12641 
12642 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12643   match(Set dst (NegD src));
12644 
12645   ins_cost(INSN_COST * 3);
12646   format %{ "fnegd   $dst, $src" %}
12647 
12648   ins_encode %{
12649     __ fnegd(as_FloatRegister($dst$$reg),
12650              as_FloatRegister($src$$reg));
12651   %}
12652 
12653   ins_pipe(fp_uop_d);
12654 %}
12655 
12656 instruct absF_reg(vRegF dst, vRegF src) %{
12657   match(Set dst (AbsF src));
12658 
12659   ins_cost(INSN_COST * 3);
12660   format %{ "fabss   $dst, $src" %}
12661   ins_encode %{
12662     __ fabss(as_FloatRegister($dst$$reg),
12663              as_FloatRegister($src$$reg));
12664   %}
12665 
12666   ins_pipe(fp_uop_s);
12667 %}
12668 
12669 instruct absD_reg(vRegD dst, vRegD src) %{
12670   match(Set dst (AbsD src));
12671 
12672   ins_cost(INSN_COST * 3);
12673   format %{ "fabsd   $dst, $src" %}
12674   ins_encode %{
12675     __ fabsd(as_FloatRegister($dst$$reg),
12676              as_FloatRegister($src$$reg));
12677   %}
12678 
12679   ins_pipe(fp_uop_d);
12680 %}
12681 
12682 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12683   match(Set dst (SqrtD src));
12684 
12685   ins_cost(INSN_COST * 50);
12686   format %{ "fsqrtd  $dst, $src" %}
12687   ins_encode %{
12688     __ fsqrtd(as_FloatRegister($dst$$reg),
12689              as_FloatRegister($src$$reg));
12690   %}
12691 
12692   ins_pipe(fp_div_s);
12693 %}
12694 
12695 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12696   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12697 
12698   ins_cost(INSN_COST * 50);
12699   format %{ "fsqrts  $dst, $src" %}
12700   ins_encode %{
12701     __ fsqrts(as_FloatRegister($dst$$reg),
12702              as_FloatRegister($src$$reg));
12703   %}
12704 
12705   ins_pipe(fp_div_d);
12706 %}
12707 
12708 // ============================================================================
12709 // Logical Instructions
12710 
12711 // Integer Logical Instructions
12712 
12713 // And Instructions
12714 
12715 
12716 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12717   match(Set dst (AndI src1 src2));
12718 
12719   format %{ "andw  $dst, $src1, $src2\t# int" %}
12720 
12721   ins_cost(INSN_COST);
12722   ins_encode %{
12723     __ andw(as_Register($dst$$reg),
12724             as_Register($src1$$reg),
12725             as_Register($src2$$reg));
12726   %}
12727 
12728   ins_pipe(ialu_reg_reg);
12729 %}
12730 
12731 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12732   match(Set dst (AndI src1 src2));
12733 
12734   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12735 
12736   ins_cost(INSN_COST);
12737   ins_encode %{
12738     __ andw(as_Register($dst$$reg),
12739             as_Register($src1$$reg),
12740             (unsigned long)($src2$$constant));
12741   %}
12742 
12743   ins_pipe(ialu_reg_imm);
12744 %}
12745 
12746 // Or Instructions
12747 
12748 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12749   match(Set dst (OrI src1 src2));
12750 
12751   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12752 
12753   ins_cost(INSN_COST);
12754   ins_encode %{
12755     __ orrw(as_Register($dst$$reg),
12756             as_Register($src1$$reg),
12757             as_Register($src2$$reg));
12758   %}
12759 
12760   ins_pipe(ialu_reg_reg);
12761 %}
12762 
12763 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12764   match(Set dst (OrI src1 src2));
12765 
12766   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12767 
12768   ins_cost(INSN_COST);
12769   ins_encode %{
12770     __ orrw(as_Register($dst$$reg),
12771             as_Register($src1$$reg),
12772             (unsigned long)($src2$$constant));
12773   %}
12774 
12775   ins_pipe(ialu_reg_imm);
12776 %}
12777 
12778 // Xor Instructions
12779 
12780 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12781   match(Set dst (XorI src1 src2));
12782 
12783   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12784 
12785   ins_cost(INSN_COST);
12786   ins_encode %{
12787     __ eorw(as_Register($dst$$reg),
12788             as_Register($src1$$reg),
12789             as_Register($src2$$reg));
12790   %}
12791 
12792   ins_pipe(ialu_reg_reg);
12793 %}
12794 
12795 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12796   match(Set dst (XorI src1 src2));
12797 
12798   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12799 
12800   ins_cost(INSN_COST);
12801   ins_encode %{
12802     __ eorw(as_Register($dst$$reg),
12803             as_Register($src1$$reg),
12804             (unsigned long)($src2$$constant));
12805   %}
12806 
12807   ins_pipe(ialu_reg_imm);
12808 %}
12809 
12810 // Long Logical Instructions
12811 // TODO
12812 
12813 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12814   match(Set dst (AndL src1 src2));
12815 
12816   format %{ "and  $dst, $src1, $src2\t# int" %}
12817 
12818   ins_cost(INSN_COST);
12819   ins_encode %{
12820     __ andr(as_Register($dst$$reg),
12821             as_Register($src1$$reg),
12822             as_Register($src2$$reg));
12823   %}
12824 
12825   ins_pipe(ialu_reg_reg);
12826 %}
12827 
12828 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12829   match(Set dst (AndL src1 src2));
12830 
12831   format %{ "and  $dst, $src1, $src2\t# int" %}
12832 
12833   ins_cost(INSN_COST);
12834   ins_encode %{
12835     __ andr(as_Register($dst$$reg),
12836             as_Register($src1$$reg),
12837             (unsigned long)($src2$$constant));
12838   %}
12839 
12840   ins_pipe(ialu_reg_imm);
12841 %}
12842 
12843 // Or Instructions
12844 
12845 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12846   match(Set dst (OrL src1 src2));
12847 
12848   format %{ "orr  $dst, $src1, $src2\t# int" %}
12849 
12850   ins_cost(INSN_COST);
12851   ins_encode %{
12852     __ orr(as_Register($dst$$reg),
12853            as_Register($src1$$reg),
12854            as_Register($src2$$reg));
12855   %}
12856 
12857   ins_pipe(ialu_reg_reg);
12858 %}
12859 
12860 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12861   match(Set dst (OrL src1 src2));
12862 
12863   format %{ "orr  $dst, $src1, $src2\t# int" %}
12864 
12865   ins_cost(INSN_COST);
12866   ins_encode %{
12867     __ orr(as_Register($dst$$reg),
12868            as_Register($src1$$reg),
12869            (unsigned long)($src2$$constant));
12870   %}
12871 
12872   ins_pipe(ialu_reg_imm);
12873 %}
12874 
12875 // Xor Instructions
12876 
12877 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12878   match(Set dst (XorL src1 src2));
12879 
12880   format %{ "eor  $dst, $src1, $src2\t# int" %}
12881 
12882   ins_cost(INSN_COST);
12883   ins_encode %{
12884     __ eor(as_Register($dst$$reg),
12885            as_Register($src1$$reg),
12886            as_Register($src2$$reg));
12887   %}
12888 
12889   ins_pipe(ialu_reg_reg);
12890 %}
12891 
12892 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12893   match(Set dst (XorL src1 src2));
12894 
12895   ins_cost(INSN_COST);
12896   format %{ "eor  $dst, $src1, $src2\t# int" %}
12897 
12898   ins_encode %{
12899     __ eor(as_Register($dst$$reg),
12900            as_Register($src1$$reg),
12901            (unsigned long)($src2$$constant));
12902   %}
12903 
12904   ins_pipe(ialu_reg_imm);
12905 %}
12906 
12907 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12908 %{
12909   match(Set dst (ConvI2L src));
12910 
12911   ins_cost(INSN_COST);
12912   format %{ "sxtw  $dst, $src\t# i2l" %}
12913   ins_encode %{
12914     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12915   %}
12916   ins_pipe(ialu_reg_shift);
12917 %}
12918 
12919 // this pattern occurs in bigmath arithmetic
12920 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12921 %{
12922   match(Set dst (AndL (ConvI2L src) mask));
12923 
12924   ins_cost(INSN_COST);
12925   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12926   ins_encode %{
12927     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12928   %}
12929 
12930   ins_pipe(ialu_reg_shift);
12931 %}
12932 
12933 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12934   match(Set dst (ConvL2I src));
12935 
12936   ins_cost(INSN_COST);
12937   format %{ "movw  $dst, $src \t// l2i" %}
12938 
12939   ins_encode %{
12940     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12941   %}
12942 
12943   ins_pipe(ialu_reg);
12944 %}
12945 
12946 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12947 %{
12948   match(Set dst (Conv2B src));
12949   effect(KILL cr);
12950 
12951   format %{
12952     "cmpw $src, zr\n\t"
12953     "cset $dst, ne"
12954   %}
12955 
12956   ins_encode %{
12957     __ cmpw(as_Register($src$$reg), zr);
12958     __ cset(as_Register($dst$$reg), Assembler::NE);
12959   %}
12960 
12961   ins_pipe(ialu_reg);
12962 %}
12963 
12964 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12965 %{
12966   match(Set dst (Conv2B src));
12967   effect(KILL cr);
12968 
12969   format %{
12970     "cmp  $src, zr\n\t"
12971     "cset $dst, ne"
12972   %}
12973 
12974   ins_encode %{
12975     __ cmp(as_Register($src$$reg), zr);
12976     __ cset(as_Register($dst$$reg), Assembler::NE);
12977   %}
12978 
12979   ins_pipe(ialu_reg);
12980 %}
12981 
12982 instruct convD2F_reg(vRegF dst, vRegD src) %{
12983   match(Set dst (ConvD2F src));
12984 
12985   ins_cost(INSN_COST * 5);
12986   format %{ "fcvtd  $dst, $src \t// d2f" %}
12987 
12988   ins_encode %{
12989     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12990   %}
12991 
12992   ins_pipe(fp_d2f);
12993 %}
12994 
12995 instruct convF2D_reg(vRegD dst, vRegF src) %{
12996   match(Set dst (ConvF2D src));
12997 
12998   ins_cost(INSN_COST * 5);
12999   format %{ "fcvts  $dst, $src \t// f2d" %}
13000 
13001   ins_encode %{
13002     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13003   %}
13004 
13005   ins_pipe(fp_f2d);
13006 %}
13007 
13008 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13009   match(Set dst (ConvF2I src));
13010 
13011   ins_cost(INSN_COST * 5);
13012   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13013 
13014   ins_encode %{
13015     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13016   %}
13017 
13018   ins_pipe(fp_f2i);
13019 %}
13020 
13021 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13022   match(Set dst (ConvF2L src));
13023 
13024   ins_cost(INSN_COST * 5);
13025   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13026 
13027   ins_encode %{
13028     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13029   %}
13030 
13031   ins_pipe(fp_f2l);
13032 %}
13033 
13034 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13035   match(Set dst (ConvI2F src));
13036 
13037   ins_cost(INSN_COST * 5);
13038   format %{ "scvtfws  $dst, $src \t// i2f" %}
13039 
13040   ins_encode %{
13041     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13042   %}
13043 
13044   ins_pipe(fp_i2f);
13045 %}
13046 
13047 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13048   match(Set dst (ConvL2F src));
13049 
13050   ins_cost(INSN_COST * 5);
13051   format %{ "scvtfs  $dst, $src \t// l2f" %}
13052 
13053   ins_encode %{
13054     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13055   %}
13056 
13057   ins_pipe(fp_l2f);
13058 %}
13059 
13060 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13061   match(Set dst (ConvD2I src));
13062 
13063   ins_cost(INSN_COST * 5);
13064   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13065 
13066   ins_encode %{
13067     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13068   %}
13069 
13070   ins_pipe(fp_d2i);
13071 %}
13072 
13073 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13074   match(Set dst (ConvD2L src));
13075 
13076   ins_cost(INSN_COST * 5);
13077   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13078 
13079   ins_encode %{
13080     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13081   %}
13082 
13083   ins_pipe(fp_d2l);
13084 %}
13085 
13086 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13087   match(Set dst (ConvI2D src));
13088 
13089   ins_cost(INSN_COST * 5);
13090   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13091 
13092   ins_encode %{
13093     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13094   %}
13095 
13096   ins_pipe(fp_i2d);
13097 %}
13098 
13099 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13100   match(Set dst (ConvL2D src));
13101 
13102   ins_cost(INSN_COST * 5);
13103   format %{ "scvtfd  $dst, $src \t// l2d" %}
13104 
13105   ins_encode %{
13106     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13107   %}
13108 
13109   ins_pipe(fp_l2d);
13110 %}
13111 
13112 // stack <-> reg and reg <-> reg shuffles with no conversion
13113 
13114 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13115 
13116   match(Set dst (MoveF2I src));
13117 
13118   effect(DEF dst, USE src);
13119 
13120   ins_cost(4 * INSN_COST);
13121 
13122   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13123 
13124   ins_encode %{
13125     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13126   %}
13127 
13128   ins_pipe(iload_reg_reg);
13129 
13130 %}
13131 
13132 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13133 
13134   match(Set dst (MoveI2F src));
13135 
13136   effect(DEF dst, USE src);
13137 
13138   ins_cost(4 * INSN_COST);
13139 
13140   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13141 
13142   ins_encode %{
13143     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13144   %}
13145 
13146   ins_pipe(pipe_class_memory);
13147 
13148 %}
13149 
13150 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13151 
13152   match(Set dst (MoveD2L src));
13153 
13154   effect(DEF dst, USE src);
13155 
13156   ins_cost(4 * INSN_COST);
13157 
13158   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13159 
13160   ins_encode %{
13161     __ ldr($dst$$Register, Address(sp, $src$$disp));
13162   %}
13163 
13164   ins_pipe(iload_reg_reg);
13165 
13166 %}
13167 
13168 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13169 
13170   match(Set dst (MoveL2D src));
13171 
13172   effect(DEF dst, USE src);
13173 
13174   ins_cost(4 * INSN_COST);
13175 
13176   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13177 
13178   ins_encode %{
13179     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13180   %}
13181 
13182   ins_pipe(pipe_class_memory);
13183 
13184 %}
13185 
13186 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13187 
13188   match(Set dst (MoveF2I src));
13189 
13190   effect(DEF dst, USE src);
13191 
13192   ins_cost(INSN_COST);
13193 
13194   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13195 
13196   ins_encode %{
13197     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13198   %}
13199 
13200   ins_pipe(pipe_class_memory);
13201 
13202 %}
13203 
13204 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13205 
13206   match(Set dst (MoveI2F src));
13207 
13208   effect(DEF dst, USE src);
13209 
13210   ins_cost(INSN_COST);
13211 
13212   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13213 
13214   ins_encode %{
13215     __ strw($src$$Register, Address(sp, $dst$$disp));
13216   %}
13217 
13218   ins_pipe(istore_reg_reg);
13219 
13220 %}
13221 
13222 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13223 
13224   match(Set dst (MoveD2L src));
13225 
13226   effect(DEF dst, USE src);
13227 
13228   ins_cost(INSN_COST);
13229 
13230   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13231 
13232   ins_encode %{
13233     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13234   %}
13235 
13236   ins_pipe(pipe_class_memory);
13237 
13238 %}
13239 
13240 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13241 
13242   match(Set dst (MoveL2D src));
13243 
13244   effect(DEF dst, USE src);
13245 
13246   ins_cost(INSN_COST);
13247 
13248   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13249 
13250   ins_encode %{
13251     __ str($src$$Register, Address(sp, $dst$$disp));
13252   %}
13253 
13254   ins_pipe(istore_reg_reg);
13255 
13256 %}
13257 
13258 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13259 
13260   match(Set dst (MoveF2I src));
13261 
13262   effect(DEF dst, USE src);
13263 
13264   ins_cost(INSN_COST);
13265 
13266   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13267 
13268   ins_encode %{
13269     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13270   %}
13271 
13272   ins_pipe(fp_f2i);
13273 
13274 %}
13275 
13276 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13277 
13278   match(Set dst (MoveI2F src));
13279 
13280   effect(DEF dst, USE src);
13281 
13282   ins_cost(INSN_COST);
13283 
13284   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13285 
13286   ins_encode %{
13287     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13288   %}
13289 
13290   ins_pipe(fp_i2f);
13291 
13292 %}
13293 
13294 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13295 
13296   match(Set dst (MoveD2L src));
13297 
13298   effect(DEF dst, USE src);
13299 
13300   ins_cost(INSN_COST);
13301 
13302   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13303 
13304   ins_encode %{
13305     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13306   %}
13307 
13308   ins_pipe(fp_d2l);
13309 
13310 %}
13311 
13312 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13313 
13314   match(Set dst (MoveL2D src));
13315 
13316   effect(DEF dst, USE src);
13317 
13318   ins_cost(INSN_COST);
13319 
13320   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13321 
13322   ins_encode %{
13323     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13324   %}
13325 
13326   ins_pipe(fp_l2d);
13327 
13328 %}
13329 
13330 // ============================================================================
13331 // clearing of an array
13332 
13333 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13334 %{
13335   match(Set dummy (ClearArray cnt base));
13336   effect(USE_KILL cnt, USE_KILL base);
13337 
13338   ins_cost(4 * INSN_COST);
13339   format %{ "ClearArray $cnt, $base" %}
13340 
13341   ins_encode %{
13342     __ zero_words($base$$Register, $cnt$$Register);
13343   %}
13344 
13345   ins_pipe(pipe_class_memory);
13346 %}
13347 
13348 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13349 %{
13350   predicate((u_int64_t)n->in(2)->get_long()
13351             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13352   match(Set dummy (ClearArray cnt base));
13353   effect(USE_KILL base);
13354 
13355   ins_cost(4 * INSN_COST);
13356   format %{ "ClearArray $cnt, $base" %}
13357 
13358   ins_encode %{
13359     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13360   %}
13361 
13362   ins_pipe(pipe_class_memory);
13363 %}
13364 
13365 // ============================================================================
13366 // Overflow Math Instructions
13367 
13368 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13369 %{
13370   match(Set cr (OverflowAddI op1 op2));
13371 
13372   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13373   ins_cost(INSN_COST);
13374   ins_encode %{
13375     __ cmnw($op1$$Register, $op2$$Register);
13376   %}
13377 
13378   ins_pipe(icmp_reg_reg);
13379 %}
13380 
13381 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13382 %{
13383   match(Set cr (OverflowAddI op1 op2));
13384 
13385   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13386   ins_cost(INSN_COST);
13387   ins_encode %{
13388     __ cmnw($op1$$Register, $op2$$constant);
13389   %}
13390 
13391   ins_pipe(icmp_reg_imm);
13392 %}
13393 
13394 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13395 %{
13396   match(Set cr (OverflowAddL op1 op2));
13397 
13398   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13399   ins_cost(INSN_COST);
13400   ins_encode %{
13401     __ cmn($op1$$Register, $op2$$Register);
13402   %}
13403 
13404   ins_pipe(icmp_reg_reg);
13405 %}
13406 
13407 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13408 %{
13409   match(Set cr (OverflowAddL op1 op2));
13410 
13411   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13412   ins_cost(INSN_COST);
13413   ins_encode %{
13414     __ cmn($op1$$Register, $op2$$constant);
13415   %}
13416 
13417   ins_pipe(icmp_reg_imm);
13418 %}
13419 
13420 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13421 %{
13422   match(Set cr (OverflowSubI op1 op2));
13423 
13424   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13425   ins_cost(INSN_COST);
13426   ins_encode %{
13427     __ cmpw($op1$$Register, $op2$$Register);
13428   %}
13429 
13430   ins_pipe(icmp_reg_reg);
13431 %}
13432 
13433 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13434 %{
13435   match(Set cr (OverflowSubI op1 op2));
13436 
13437   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13438   ins_cost(INSN_COST);
13439   ins_encode %{
13440     __ cmpw($op1$$Register, $op2$$constant);
13441   %}
13442 
13443   ins_pipe(icmp_reg_imm);
13444 %}
13445 
13446 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13447 %{
13448   match(Set cr (OverflowSubL op1 op2));
13449 
13450   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13451   ins_cost(INSN_COST);
13452   ins_encode %{
13453     __ cmp($op1$$Register, $op2$$Register);
13454   %}
13455 
13456   ins_pipe(icmp_reg_reg);
13457 %}
13458 
13459 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13460 %{
13461   match(Set cr (OverflowSubL op1 op2));
13462 
13463   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13464   ins_cost(INSN_COST);
13465   ins_encode %{
13466     __ subs(zr, $op1$$Register, $op2$$constant);
13467   %}
13468 
13469   ins_pipe(icmp_reg_imm);
13470 %}
13471 
13472 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13473 %{
13474   match(Set cr (OverflowSubI zero op1));
13475 
13476   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13477   ins_cost(INSN_COST);
13478   ins_encode %{
13479     __ cmpw(zr, $op1$$Register);
13480   %}
13481 
13482   ins_pipe(icmp_reg_imm);
13483 %}
13484 
13485 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13486 %{
13487   match(Set cr (OverflowSubL zero op1));
13488 
13489   format %{ "cmp   zr, $op1\t# overflow check long" %}
13490   ins_cost(INSN_COST);
13491   ins_encode %{
13492     __ cmp(zr, $op1$$Register);
13493   %}
13494 
13495   ins_pipe(icmp_reg_imm);
13496 %}
13497 
13498 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13499 %{
13500   match(Set cr (OverflowMulI op1 op2));
13501 
13502   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13503             "cmp   rscratch1, rscratch1, sxtw\n\t"
13504             "movw  rscratch1, #0x80000000\n\t"
13505             "cselw rscratch1, rscratch1, zr, NE\n\t"
13506             "cmpw  rscratch1, #1" %}
13507   ins_cost(5 * INSN_COST);
13508   ins_encode %{
13509     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13510     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13511     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13512     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13513     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13514   %}
13515 
13516   ins_pipe(pipe_slow);
13517 %}
13518 
13519 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13520 %{
13521   match(If cmp (OverflowMulI op1 op2));
13522   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13523             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13524   effect(USE labl, KILL cr);
13525 
13526   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13527             "cmp   rscratch1, rscratch1, sxtw\n\t"
13528             "b$cmp   $labl" %}
13529   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13530   ins_encode %{
13531     Label* L = $labl$$label;
13532     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13533     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13534     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13535     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13536   %}
13537 
13538   ins_pipe(pipe_serial);
13539 %}
13540 
13541 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13542 %{
13543   match(Set cr (OverflowMulL op1 op2));
13544 
13545   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13546             "smulh rscratch2, $op1, $op2\n\t"
13547             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13548             "movw  rscratch1, #0x80000000\n\t"
13549             "cselw rscratch1, rscratch1, zr, NE\n\t"
13550             "cmpw  rscratch1, #1" %}
13551   ins_cost(6 * INSN_COST);
13552   ins_encode %{
13553     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13554     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13555     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13556     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13557     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13558     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13559   %}
13560 
13561   ins_pipe(pipe_slow);
13562 %}
13563 
13564 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13565 %{
13566   match(If cmp (OverflowMulL op1 op2));
13567   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13568             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13569   effect(USE labl, KILL cr);
13570 
13571   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13572             "smulh rscratch2, $op1, $op2\n\t"
13573             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13574             "b$cmp $labl" %}
13575   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13576   ins_encode %{
13577     Label* L = $labl$$label;
13578     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13579     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13580     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13581     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13582     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13583   %}
13584 
13585   ins_pipe(pipe_serial);
13586 %}
13587 
13588 // ============================================================================
13589 // Compare Instructions
13590 
13591 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13592 %{
13593   match(Set cr (CmpI op1 op2));
13594 
13595   effect(DEF cr, USE op1, USE op2);
13596 
13597   ins_cost(INSN_COST);
13598   format %{ "cmpw  $op1, $op2" %}
13599 
13600   ins_encode(aarch64_enc_cmpw(op1, op2));
13601 
13602   ins_pipe(icmp_reg_reg);
13603 %}
13604 
13605 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13606 %{
13607   match(Set cr (CmpI op1 zero));
13608 
13609   effect(DEF cr, USE op1);
13610 
13611   ins_cost(INSN_COST);
13612   format %{ "cmpw $op1, 0" %}
13613 
13614   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13615 
13616   ins_pipe(icmp_reg_imm);
13617 %}
13618 
13619 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13620 %{
13621   match(Set cr (CmpI op1 op2));
13622 
13623   effect(DEF cr, USE op1);
13624 
13625   ins_cost(INSN_COST);
13626   format %{ "cmpw  $op1, $op2" %}
13627 
13628   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13629 
13630   ins_pipe(icmp_reg_imm);
13631 %}
13632 
13633 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13634 %{
13635   match(Set cr (CmpI op1 op2));
13636 
13637   effect(DEF cr, USE op1);
13638 
13639   ins_cost(INSN_COST * 2);
13640   format %{ "cmpw  $op1, $op2" %}
13641 
13642   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13643 
13644   ins_pipe(icmp_reg_imm);
13645 %}
13646 
13647 // Unsigned compare Instructions; really, same as signed compare
13648 // except it should only be used to feed an If or a CMovI which takes a
13649 // cmpOpU.
13650 
13651 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13652 %{
13653   match(Set cr (CmpU op1 op2));
13654 
13655   effect(DEF cr, USE op1, USE op2);
13656 
13657   ins_cost(INSN_COST);
13658   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13659 
13660   ins_encode(aarch64_enc_cmpw(op1, op2));
13661 
13662   ins_pipe(icmp_reg_reg);
13663 %}
13664 
13665 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13666 %{
13667   match(Set cr (CmpU op1 zero));
13668 
13669   effect(DEF cr, USE op1);
13670 
13671   ins_cost(INSN_COST);
13672   format %{ "cmpw $op1, #0\t# unsigned" %}
13673 
13674   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13675 
13676   ins_pipe(icmp_reg_imm);
13677 %}
13678 
13679 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13680 %{
13681   match(Set cr (CmpU op1 op2));
13682 
13683   effect(DEF cr, USE op1);
13684 
13685   ins_cost(INSN_COST);
13686   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13687 
13688   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13689 
13690   ins_pipe(icmp_reg_imm);
13691 %}
13692 
13693 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13694 %{
13695   match(Set cr (CmpU op1 op2));
13696 
13697   effect(DEF cr, USE op1);
13698 
13699   ins_cost(INSN_COST * 2);
13700   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13701 
13702   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13703 
13704   ins_pipe(icmp_reg_imm);
13705 %}
13706 
13707 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13708 %{
13709   match(Set cr (CmpL op1 op2));
13710 
13711   effect(DEF cr, USE op1, USE op2);
13712 
13713   ins_cost(INSN_COST);
13714   format %{ "cmp  $op1, $op2" %}
13715 
13716   ins_encode(aarch64_enc_cmp(op1, op2));
13717 
13718   ins_pipe(icmp_reg_reg);
13719 %}
13720 
13721 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
13722 %{
13723   match(Set cr (CmpL op1 zero));
13724 
13725   effect(DEF cr, USE op1);
13726 
13727   ins_cost(INSN_COST);
13728   format %{ "tst  $op1" %}
13729 
13730   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13731 
13732   ins_pipe(icmp_reg_imm);
13733 %}
13734 
13735 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13736 %{
13737   match(Set cr (CmpL op1 op2));
13738 
13739   effect(DEF cr, USE op1);
13740 
13741   ins_cost(INSN_COST);
13742   format %{ "cmp  $op1, $op2" %}
13743 
13744   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13745 
13746   ins_pipe(icmp_reg_imm);
13747 %}
13748 
13749 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13750 %{
13751   match(Set cr (CmpL op1 op2));
13752 
13753   effect(DEF cr, USE op1);
13754 
13755   ins_cost(INSN_COST * 2);
13756   format %{ "cmp  $op1, $op2" %}
13757 
13758   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13759 
13760   ins_pipe(icmp_reg_imm);
13761 %}
13762 
13763 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
13764 %{
13765   match(Set cr (CmpUL op1 op2));
13766 
13767   effect(DEF cr, USE op1, USE op2);
13768 
13769   ins_cost(INSN_COST);
13770   format %{ "cmp  $op1, $op2" %}
13771 
13772   ins_encode(aarch64_enc_cmp(op1, op2));
13773 
13774   ins_pipe(icmp_reg_reg);
13775 %}
13776 
13777 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
13778 %{
13779   match(Set cr (CmpUL op1 zero));
13780 
13781   effect(DEF cr, USE op1);
13782 
13783   ins_cost(INSN_COST);
13784   format %{ "tst  $op1" %}
13785 
13786   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13787 
13788   ins_pipe(icmp_reg_imm);
13789 %}
13790 
13791 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
13792 %{
13793   match(Set cr (CmpUL op1 op2));
13794 
13795   effect(DEF cr, USE op1);
13796 
13797   ins_cost(INSN_COST);
13798   format %{ "cmp  $op1, $op2" %}
13799 
13800   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13801 
13802   ins_pipe(icmp_reg_imm);
13803 %}
13804 
13805 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
13806 %{
13807   match(Set cr (CmpUL op1 op2));
13808 
13809   effect(DEF cr, USE op1);
13810 
13811   ins_cost(INSN_COST * 2);
13812   format %{ "cmp  $op1, $op2" %}
13813 
13814   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13815 
13816   ins_pipe(icmp_reg_imm);
13817 %}
13818 
13819 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13820 %{
13821   match(Set cr (CmpP op1 op2));
13822 
13823   effect(DEF cr, USE op1, USE op2);
13824 
13825   ins_cost(INSN_COST);
13826   format %{ "cmp  $op1, $op2\t // ptr" %}
13827 
13828   ins_encode(aarch64_enc_cmpp(op1, op2));
13829 
13830   ins_pipe(icmp_reg_reg);
13831 %}
13832 
13833 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13834 %{
13835   match(Set cr (CmpN op1 op2));
13836 
13837   effect(DEF cr, USE op1, USE op2);
13838 
13839   ins_cost(INSN_COST);
13840   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13841 
13842   ins_encode(aarch64_enc_cmpn(op1, op2));
13843 
13844   ins_pipe(icmp_reg_reg);
13845 %}
13846 
13847 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13848 %{
13849   match(Set cr (CmpP op1 zero));
13850 
13851   effect(DEF cr, USE op1, USE zero);
13852 
13853   ins_cost(INSN_COST);
13854   format %{ "cmp  $op1, 0\t // ptr" %}
13855 
13856   ins_encode(aarch64_enc_testp(op1));
13857 
13858   ins_pipe(icmp_reg_imm);
13859 %}
13860 
13861 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13862 %{
13863   match(Set cr (CmpN op1 zero));
13864 
13865   effect(DEF cr, USE op1, USE zero);
13866 
13867   ins_cost(INSN_COST);
13868   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13869 
13870   ins_encode(aarch64_enc_testn(op1));
13871 
13872   ins_pipe(icmp_reg_imm);
13873 %}
13874 
13875 // FP comparisons
13876 //
13877 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13878 // using normal cmpOp. See declaration of rFlagsReg for details.
13879 
13880 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13881 %{
13882   match(Set cr (CmpF src1 src2));
13883 
13884   ins_cost(3 * INSN_COST);
13885   format %{ "fcmps $src1, $src2" %}
13886 
13887   ins_encode %{
13888     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13889   %}
13890 
13891   ins_pipe(pipe_class_compare);
13892 %}
13893 
13894 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13895 %{
13896   match(Set cr (CmpF src1 src2));
13897 
13898   ins_cost(3 * INSN_COST);
13899   format %{ "fcmps $src1, 0.0" %}
13900 
13901   ins_encode %{
13902     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13903   %}
13904 
13905   ins_pipe(pipe_class_compare);
13906 %}
13907 // FROM HERE
13908 
13909 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13910 %{
13911   match(Set cr (CmpD src1 src2));
13912 
13913   ins_cost(3 * INSN_COST);
13914   format %{ "fcmpd $src1, $src2" %}
13915 
13916   ins_encode %{
13917     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13918   %}
13919 
13920   ins_pipe(pipe_class_compare);
13921 %}
13922 
13923 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13924 %{
13925   match(Set cr (CmpD src1 src2));
13926 
13927   ins_cost(3 * INSN_COST);
13928   format %{ "fcmpd $src1, 0.0" %}
13929 
13930   ins_encode %{
13931     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13932   %}
13933 
13934   ins_pipe(pipe_class_compare);
13935 %}
13936 
13937 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13938 %{
13939   match(Set dst (CmpF3 src1 src2));
13940   effect(KILL cr);
13941 
13942   ins_cost(5 * INSN_COST);
13943   format %{ "fcmps $src1, $src2\n\t"
13944             "csinvw($dst, zr, zr, eq\n\t"
13945             "csnegw($dst, $dst, $dst, lt)"
13946   %}
13947 
13948   ins_encode %{
13949     Label done;
13950     FloatRegister s1 = as_FloatRegister($src1$$reg);
13951     FloatRegister s2 = as_FloatRegister($src2$$reg);
13952     Register d = as_Register($dst$$reg);
13953     __ fcmps(s1, s2);
13954     // installs 0 if EQ else -1
13955     __ csinvw(d, zr, zr, Assembler::EQ);
13956     // keeps -1 if less or unordered else installs 1
13957     __ csnegw(d, d, d, Assembler::LT);
13958     __ bind(done);
13959   %}
13960 
13961   ins_pipe(pipe_class_default);
13962 
13963 %}
13964 
13965 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13966 %{
13967   match(Set dst (CmpD3 src1 src2));
13968   effect(KILL cr);
13969 
13970   ins_cost(5 * INSN_COST);
13971   format %{ "fcmpd $src1, $src2\n\t"
13972             "csinvw($dst, zr, zr, eq\n\t"
13973             "csnegw($dst, $dst, $dst, lt)"
13974   %}
13975 
13976   ins_encode %{
13977     Label done;
13978     FloatRegister s1 = as_FloatRegister($src1$$reg);
13979     FloatRegister s2 = as_FloatRegister($src2$$reg);
13980     Register d = as_Register($dst$$reg);
13981     __ fcmpd(s1, s2);
13982     // installs 0 if EQ else -1
13983     __ csinvw(d, zr, zr, Assembler::EQ);
13984     // keeps -1 if less or unordered else installs 1
13985     __ csnegw(d, d, d, Assembler::LT);
13986     __ bind(done);
13987   %}
13988   ins_pipe(pipe_class_default);
13989 
13990 %}
13991 
13992 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13993 %{
13994   match(Set dst (CmpF3 src1 zero));
13995   effect(KILL cr);
13996 
13997   ins_cost(5 * INSN_COST);
13998   format %{ "fcmps $src1, 0.0\n\t"
13999             "csinvw($dst, zr, zr, eq\n\t"
14000             "csnegw($dst, $dst, $dst, lt)"
14001   %}
14002 
14003   ins_encode %{
14004     Label done;
14005     FloatRegister s1 = as_FloatRegister($src1$$reg);
14006     Register d = as_Register($dst$$reg);
14007     __ fcmps(s1, 0.0D);
14008     // installs 0 if EQ else -1
14009     __ csinvw(d, zr, zr, Assembler::EQ);
14010     // keeps -1 if less or unordered else installs 1
14011     __ csnegw(d, d, d, Assembler::LT);
14012     __ bind(done);
14013   %}
14014 
14015   ins_pipe(pipe_class_default);
14016 
14017 %}
14018 
14019 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14020 %{
14021   match(Set dst (CmpD3 src1 zero));
14022   effect(KILL cr);
14023 
14024   ins_cost(5 * INSN_COST);
14025   format %{ "fcmpd $src1, 0.0\n\t"
14026             "csinvw($dst, zr, zr, eq\n\t"
14027             "csnegw($dst, $dst, $dst, lt)"
14028   %}
14029 
14030   ins_encode %{
14031     Label done;
14032     FloatRegister s1 = as_FloatRegister($src1$$reg);
14033     Register d = as_Register($dst$$reg);
14034     __ fcmpd(s1, 0.0D);
14035     // installs 0 if EQ else -1
14036     __ csinvw(d, zr, zr, Assembler::EQ);
14037     // keeps -1 if less or unordered else installs 1
14038     __ csnegw(d, d, d, Assembler::LT);
14039     __ bind(done);
14040   %}
14041   ins_pipe(pipe_class_default);
14042 
14043 %}
14044 
14045 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14046 %{
14047   match(Set dst (CmpLTMask p q));
14048   effect(KILL cr);
14049 
14050   ins_cost(3 * INSN_COST);
14051 
14052   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14053             "csetw $dst, lt\n\t"
14054             "subw $dst, zr, $dst"
14055   %}
14056 
14057   ins_encode %{
14058     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14059     __ csetw(as_Register($dst$$reg), Assembler::LT);
14060     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14061   %}
14062 
14063   ins_pipe(ialu_reg_reg);
14064 %}
14065 
14066 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14067 %{
14068   match(Set dst (CmpLTMask src zero));
14069   effect(KILL cr);
14070 
14071   ins_cost(INSN_COST);
14072 
14073   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14074 
14075   ins_encode %{
14076     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14077   %}
14078 
14079   ins_pipe(ialu_reg_shift);
14080 %}
14081 
14082 // ============================================================================
14083 // Max and Min
14084 
14085 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14086 %{
14087   match(Set dst (MinI src1 src2));
14088 
14089   effect(DEF dst, USE src1, USE src2, KILL cr);
14090   size(8);
14091 
14092   ins_cost(INSN_COST * 3);
14093   format %{
14094     "cmpw $src1 $src2\t signed int\n\t"
14095     "cselw $dst, $src1, $src2 lt\t"
14096   %}
14097 
14098   ins_encode %{
14099     __ cmpw(as_Register($src1$$reg),
14100             as_Register($src2$$reg));
14101     __ cselw(as_Register($dst$$reg),
14102              as_Register($src1$$reg),
14103              as_Register($src2$$reg),
14104              Assembler::LT);
14105   %}
14106 
14107   ins_pipe(ialu_reg_reg);
14108 %}
14109 // FROM HERE
14110 
14111 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14112 %{
14113   match(Set dst (MaxI src1 src2));
14114 
14115   effect(DEF dst, USE src1, USE src2, KILL cr);
14116   size(8);
14117 
14118   ins_cost(INSN_COST * 3);
14119   format %{
14120     "cmpw $src1 $src2\t signed int\n\t"
14121     "cselw $dst, $src1, $src2 gt\t"
14122   %}
14123 
14124   ins_encode %{
14125     __ cmpw(as_Register($src1$$reg),
14126             as_Register($src2$$reg));
14127     __ cselw(as_Register($dst$$reg),
14128              as_Register($src1$$reg),
14129              as_Register($src2$$reg),
14130              Assembler::GT);
14131   %}
14132 
14133   ins_pipe(ialu_reg_reg);
14134 %}
14135 
14136 // ============================================================================
14137 // Branch Instructions
14138 
14139 // Direct Branch.
14140 instruct branch(label lbl)
14141 %{
14142   match(Goto);
14143 
14144   effect(USE lbl);
14145 
14146   ins_cost(BRANCH_COST);
14147   format %{ "b  $lbl" %}
14148 
14149   ins_encode(aarch64_enc_b(lbl));
14150 
14151   ins_pipe(pipe_branch);
14152 %}
14153 
14154 // Conditional Near Branch
14155 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14156 %{
14157   // Same match rule as `branchConFar'.
14158   match(If cmp cr);
14159 
14160   effect(USE lbl);
14161 
14162   ins_cost(BRANCH_COST);
14163   // If set to 1 this indicates that the current instruction is a
14164   // short variant of a long branch. This avoids using this
14165   // instruction in first-pass matching. It will then only be used in
14166   // the `Shorten_branches' pass.
14167   // ins_short_branch(1);
14168   format %{ "b$cmp  $lbl" %}
14169 
14170   ins_encode(aarch64_enc_br_con(cmp, lbl));
14171 
14172   ins_pipe(pipe_branch_cond);
14173 %}
14174 
14175 // Conditional Near Branch Unsigned
14176 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14177 %{
14178   // Same match rule as `branchConFar'.
14179   match(If cmp cr);
14180 
14181   effect(USE lbl);
14182 
14183   ins_cost(BRANCH_COST);
14184   // If set to 1 this indicates that the current instruction is a
14185   // short variant of a long branch. This avoids using this
14186   // instruction in first-pass matching. It will then only be used in
14187   // the `Shorten_branches' pass.
14188   // ins_short_branch(1);
14189   format %{ "b$cmp  $lbl\t# unsigned" %}
14190 
14191   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14192 
14193   ins_pipe(pipe_branch_cond);
14194 %}
14195 
14196 // Make use of CBZ and CBNZ.  These instructions, as well as being
14197 // shorter than (cmp; branch), have the additional benefit of not
14198 // killing the flags.
14199 
14200 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14201   match(If cmp (CmpI op1 op2));
14202   effect(USE labl);
14203 
14204   ins_cost(BRANCH_COST);
14205   format %{ "cbw$cmp   $op1, $labl" %}
14206   ins_encode %{
14207     Label* L = $labl$$label;
14208     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14209     if (cond == Assembler::EQ)
14210       __ cbzw($op1$$Register, *L);
14211     else
14212       __ cbnzw($op1$$Register, *L);
14213   %}
14214   ins_pipe(pipe_cmp_branch);
14215 %}
14216 
14217 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14218   match(If cmp (CmpL op1 op2));
14219   effect(USE labl);
14220 
14221   ins_cost(BRANCH_COST);
14222   format %{ "cb$cmp   $op1, $labl" %}
14223   ins_encode %{
14224     Label* L = $labl$$label;
14225     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14226     if (cond == Assembler::EQ)
14227       __ cbz($op1$$Register, *L);
14228     else
14229       __ cbnz($op1$$Register, *L);
14230   %}
14231   ins_pipe(pipe_cmp_branch);
14232 %}
14233 
14234 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14235   match(If cmp (CmpP op1 op2));
14236   effect(USE labl);
14237 
14238   ins_cost(BRANCH_COST);
14239   format %{ "cb$cmp   $op1, $labl" %}
14240   ins_encode %{
14241     Label* L = $labl$$label;
14242     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14243     if (cond == Assembler::EQ)
14244       __ cbz($op1$$Register, *L);
14245     else
14246       __ cbnz($op1$$Register, *L);
14247   %}
14248   ins_pipe(pipe_cmp_branch);
14249 %}
14250 
14251 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14252   match(If cmp (CmpN op1 op2));
14253   effect(USE labl);
14254 
14255   ins_cost(BRANCH_COST);
14256   format %{ "cbw$cmp   $op1, $labl" %}
14257   ins_encode %{
14258     Label* L = $labl$$label;
14259     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14260     if (cond == Assembler::EQ)
14261       __ cbzw($op1$$Register, *L);
14262     else
14263       __ cbnzw($op1$$Register, *L);
14264   %}
14265   ins_pipe(pipe_cmp_branch);
14266 %}
14267 
14268 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14269   match(If cmp (CmpP (DecodeN oop) zero));
14270   effect(USE labl);
14271 
14272   ins_cost(BRANCH_COST);
14273   format %{ "cb$cmp   $oop, $labl" %}
14274   ins_encode %{
14275     Label* L = $labl$$label;
14276     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14277     if (cond == Assembler::EQ)
14278       __ cbzw($oop$$Register, *L);
14279     else
14280       __ cbnzw($oop$$Register, *L);
14281   %}
14282   ins_pipe(pipe_cmp_branch);
14283 %}
14284 
14285 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14286   match(If cmp (CmpU op1 op2));
14287   effect(USE labl);
14288 
14289   ins_cost(BRANCH_COST);
14290   format %{ "cbw$cmp   $op1, $labl" %}
14291   ins_encode %{
14292     Label* L = $labl$$label;
14293     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14294     if (cond == Assembler::EQ || cond == Assembler::LS)
14295       __ cbzw($op1$$Register, *L);
14296     else
14297       __ cbnzw($op1$$Register, *L);
14298   %}
14299   ins_pipe(pipe_cmp_branch);
14300 %}
14301 
14302 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14303   match(If cmp (CmpUL op1 op2));
14304   effect(USE labl);
14305 
14306   ins_cost(BRANCH_COST);
14307   format %{ "cb$cmp   $op1, $labl" %}
14308   ins_encode %{
14309     Label* L = $labl$$label;
14310     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14311     if (cond == Assembler::EQ || cond == Assembler::LS)
14312       __ cbz($op1$$Register, *L);
14313     else
14314       __ cbnz($op1$$Register, *L);
14315   %}
14316   ins_pipe(pipe_cmp_branch);
14317 %}
14318 
14319 // Test bit and Branch
14320 
14321 // Patterns for short (< 32KiB) variants
14322 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14323   match(If cmp (CmpL op1 op2));
14324   effect(USE labl);
14325 
14326   ins_cost(BRANCH_COST);
14327   format %{ "cb$cmp   $op1, $labl # long" %}
14328   ins_encode %{
14329     Label* L = $labl$$label;
14330     Assembler::Condition cond =
14331       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14332     __ tbr(cond, $op1$$Register, 63, *L);
14333   %}
14334   ins_pipe(pipe_cmp_branch);
14335   ins_short_branch(1);
14336 %}
14337 
14338 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14339   match(If cmp (CmpI op1 op2));
14340   effect(USE labl);
14341 
14342   ins_cost(BRANCH_COST);
14343   format %{ "cb$cmp   $op1, $labl # int" %}
14344   ins_encode %{
14345     Label* L = $labl$$label;
14346     Assembler::Condition cond =
14347       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14348     __ tbr(cond, $op1$$Register, 31, *L);
14349   %}
14350   ins_pipe(pipe_cmp_branch);
14351   ins_short_branch(1);
14352 %}
14353 
14354 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14355   match(If cmp (CmpL (AndL op1 op2) op3));
14356   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14357   effect(USE labl);
14358 
14359   ins_cost(BRANCH_COST);
14360   format %{ "tb$cmp   $op1, $op2, $labl" %}
14361   ins_encode %{
14362     Label* L = $labl$$label;
14363     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14364     int bit = exact_log2($op2$$constant);
14365     __ tbr(cond, $op1$$Register, bit, *L);
14366   %}
14367   ins_pipe(pipe_cmp_branch);
14368   ins_short_branch(1);
14369 %}
14370 
14371 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14372   match(If cmp (CmpI (AndI op1 op2) op3));
14373   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14374   effect(USE labl);
14375 
14376   ins_cost(BRANCH_COST);
14377   format %{ "tb$cmp   $op1, $op2, $labl" %}
14378   ins_encode %{
14379     Label* L = $labl$$label;
14380     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14381     int bit = exact_log2($op2$$constant);
14382     __ tbr(cond, $op1$$Register, bit, *L);
14383   %}
14384   ins_pipe(pipe_cmp_branch);
14385   ins_short_branch(1);
14386 %}
14387 
14388 // And far variants
14389 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14390   match(If cmp (CmpL op1 op2));
14391   effect(USE labl);
14392 
14393   ins_cost(BRANCH_COST);
14394   format %{ "cb$cmp   $op1, $labl # long" %}
14395   ins_encode %{
14396     Label* L = $labl$$label;
14397     Assembler::Condition cond =
14398       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14399     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14400   %}
14401   ins_pipe(pipe_cmp_branch);
14402 %}
14403 
14404 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14405   match(If cmp (CmpI op1 op2));
14406   effect(USE labl);
14407 
14408   ins_cost(BRANCH_COST);
14409   format %{ "cb$cmp   $op1, $labl # int" %}
14410   ins_encode %{
14411     Label* L = $labl$$label;
14412     Assembler::Condition cond =
14413       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14414     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14415   %}
14416   ins_pipe(pipe_cmp_branch);
14417 %}
14418 
14419 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14420   match(If cmp (CmpL (AndL op1 op2) op3));
14421   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14422   effect(USE labl);
14423 
14424   ins_cost(BRANCH_COST);
14425   format %{ "tb$cmp   $op1, $op2, $labl" %}
14426   ins_encode %{
14427     Label* L = $labl$$label;
14428     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14429     int bit = exact_log2($op2$$constant);
14430     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14431   %}
14432   ins_pipe(pipe_cmp_branch);
14433 %}
14434 
14435 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14436   match(If cmp (CmpI (AndI op1 op2) op3));
14437   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14438   effect(USE labl);
14439 
14440   ins_cost(BRANCH_COST);
14441   format %{ "tb$cmp   $op1, $op2, $labl" %}
14442   ins_encode %{
14443     Label* L = $labl$$label;
14444     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14445     int bit = exact_log2($op2$$constant);
14446     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14447   %}
14448   ins_pipe(pipe_cmp_branch);
14449 %}
14450 
14451 // Test bits
14452 
14453 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14454   match(Set cr (CmpL (AndL op1 op2) op3));
14455   predicate(Assembler::operand_valid_for_logical_immediate
14456             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14457 
14458   ins_cost(INSN_COST);
14459   format %{ "tst $op1, $op2 # long" %}
14460   ins_encode %{
14461     __ tst($op1$$Register, $op2$$constant);
14462   %}
14463   ins_pipe(ialu_reg_reg);
14464 %}
14465 
14466 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14467   match(Set cr (CmpI (AndI op1 op2) op3));
14468   predicate(Assembler::operand_valid_for_logical_immediate
14469             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14470 
14471   ins_cost(INSN_COST);
14472   format %{ "tst $op1, $op2 # int" %}
14473   ins_encode %{
14474     __ tstw($op1$$Register, $op2$$constant);
14475   %}
14476   ins_pipe(ialu_reg_reg);
14477 %}
14478 
14479 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14480   match(Set cr (CmpL (AndL op1 op2) op3));
14481 
14482   ins_cost(INSN_COST);
14483   format %{ "tst $op1, $op2 # long" %}
14484   ins_encode %{
14485     __ tst($op1$$Register, $op2$$Register);
14486   %}
14487   ins_pipe(ialu_reg_reg);
14488 %}
14489 
14490 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14491   match(Set cr (CmpI (AndI op1 op2) op3));
14492 
14493   ins_cost(INSN_COST);
14494   format %{ "tstw $op1, $op2 # int" %}
14495   ins_encode %{
14496     __ tstw($op1$$Register, $op2$$Register);
14497   %}
14498   ins_pipe(ialu_reg_reg);
14499 %}
14500 
14501 
14502 // Conditional Far Branch
14503 // Conditional Far Branch Unsigned
14504 // TODO: fixme
14505 
14506 // counted loop end branch near
14507 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14508 %{
14509   match(CountedLoopEnd cmp cr);
14510 
14511   effect(USE lbl);
14512 
14513   ins_cost(BRANCH_COST);
14514   // short variant.
14515   // ins_short_branch(1);
14516   format %{ "b$cmp $lbl \t// counted loop end" %}
14517 
14518   ins_encode(aarch64_enc_br_con(cmp, lbl));
14519 
14520   ins_pipe(pipe_branch);
14521 %}
14522 
14523 // counted loop end branch near Unsigned
14524 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14525 %{
14526   match(CountedLoopEnd cmp cr);
14527 
14528   effect(USE lbl);
14529 
14530   ins_cost(BRANCH_COST);
14531   // short variant.
14532   // ins_short_branch(1);
14533   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14534 
14535   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14536 
14537   ins_pipe(pipe_branch);
14538 %}
14539 
14540 // counted loop end branch far
14541 // counted loop end branch far unsigned
14542 // TODO: fixme
14543 
14544 // ============================================================================
14545 // inlined locking and unlocking
14546 
14547 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14548 %{
14549   match(Set cr (FastLock object box));
14550   effect(TEMP tmp, TEMP tmp2);
14551 
14552   // TODO
14553   // identify correct cost
14554   ins_cost(5 * INSN_COST);
14555   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14556 
14557   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14558 
14559   ins_pipe(pipe_serial);
14560 %}
14561 
14562 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14563 %{
14564   match(Set cr (FastUnlock object box));
14565   effect(TEMP tmp, TEMP tmp2);
14566 
14567   ins_cost(5 * INSN_COST);
14568   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14569 
14570   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14571 
14572   ins_pipe(pipe_serial);
14573 %}
14574 
14575 
14576 // ============================================================================
14577 // Safepoint Instructions
14578 
14579 // TODO
14580 // provide a near and far version of this code
14581 
14582 instruct safePoint(iRegP poll)
14583 %{
14584   match(SafePoint poll);
14585 
14586   format %{
14587     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14588   %}
14589   ins_encode %{
14590     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14591   %}
14592   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14593 %}
14594 
14595 
14596 // ============================================================================
14597 // Procedure Call/Return Instructions
14598 
14599 // Call Java Static Instruction
14600 
14601 instruct CallStaticJavaDirect(method meth)
14602 %{
14603   match(CallStaticJava);
14604 
14605   effect(USE meth);
14606 
14607   ins_cost(CALL_COST);
14608 
14609   format %{ "call,static $meth \t// ==> " %}
14610 
14611   ins_encode( aarch64_enc_java_static_call(meth),
14612               aarch64_enc_call_epilog );
14613 
14614   ins_pipe(pipe_class_call);
14615 %}
14616 
14617 // TO HERE
14618 
14619 // Call Java Dynamic Instruction
14620 instruct CallDynamicJavaDirect(method meth)
14621 %{
14622   match(CallDynamicJava);
14623 
14624   effect(USE meth);
14625 
14626   ins_cost(CALL_COST);
14627 
14628   format %{ "CALL,dynamic $meth \t// ==> " %}
14629 
14630   ins_encode( aarch64_enc_java_dynamic_call(meth),
14631                aarch64_enc_call_epilog );
14632 
14633   ins_pipe(pipe_class_call);
14634 %}
14635 
14636 // Call Runtime Instruction
14637 
14638 instruct CallRuntimeDirect(method meth)
14639 %{
14640   match(CallRuntime);
14641 
14642   effect(USE meth);
14643 
14644   ins_cost(CALL_COST);
14645 
14646   format %{ "CALL, runtime $meth" %}
14647 
14648   ins_encode( aarch64_enc_java_to_runtime(meth) );
14649 
14650   ins_pipe(pipe_class_call);
14651 %}
14652 
14653 // Call Runtime Instruction
14654 
14655 instruct CallLeafDirect(method meth)
14656 %{
14657   match(CallLeaf);
14658 
14659   effect(USE meth);
14660 
14661   ins_cost(CALL_COST);
14662 
14663   format %{ "CALL, runtime leaf $meth" %}
14664 
14665   ins_encode( aarch64_enc_java_to_runtime(meth) );
14666 
14667   ins_pipe(pipe_class_call);
14668 %}
14669 
14670 // Call Runtime Instruction
14671 
14672 instruct CallLeafNoFPDirect(method meth)
14673 %{
14674   match(CallLeafNoFP);
14675 
14676   effect(USE meth);
14677 
14678   ins_cost(CALL_COST);
14679 
14680   format %{ "CALL, runtime leaf nofp $meth" %}
14681 
14682   ins_encode( aarch64_enc_java_to_runtime(meth) );
14683 
14684   ins_pipe(pipe_class_call);
14685 %}
14686 
14687 // Tail Call; Jump from runtime stub to Java code.
14688 // Also known as an 'interprocedural jump'.
14689 // Target of jump will eventually return to caller.
14690 // TailJump below removes the return address.
14691 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14692 %{
14693   match(TailCall jump_target method_oop);
14694 
14695   ins_cost(CALL_COST);
14696 
14697   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14698 
14699   ins_encode(aarch64_enc_tail_call(jump_target));
14700 
14701   ins_pipe(pipe_class_call);
14702 %}
14703 
14704 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14705 %{
14706   match(TailJump jump_target ex_oop);
14707 
14708   ins_cost(CALL_COST);
14709 
14710   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14711 
14712   ins_encode(aarch64_enc_tail_jmp(jump_target));
14713 
14714   ins_pipe(pipe_class_call);
14715 %}
14716 
14717 // Create exception oop: created by stack-crawling runtime code.
14718 // Created exception is now available to this handler, and is setup
14719 // just prior to jumping to this handler. No code emitted.
14720 // TODO check
14721 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14722 instruct CreateException(iRegP_R0 ex_oop)
14723 %{
14724   match(Set ex_oop (CreateEx));
14725 
14726   format %{ " -- \t// exception oop; no code emitted" %}
14727 
14728   size(0);
14729 
14730   ins_encode( /*empty*/ );
14731 
14732   ins_pipe(pipe_class_empty);
14733 %}
14734 
14735 // Rethrow exception: The exception oop will come in the first
14736 // argument position. Then JUMP (not call) to the rethrow stub code.
14737 instruct RethrowException() %{
14738   match(Rethrow);
14739   ins_cost(CALL_COST);
14740 
14741   format %{ "b rethrow_stub" %}
14742 
14743   ins_encode( aarch64_enc_rethrow() );
14744 
14745   ins_pipe(pipe_class_call);
14746 %}
14747 
14748 
14749 // Return Instruction
14750 // epilog node loads ret address into lr as part of frame pop
14751 instruct Ret()
14752 %{
14753   match(Return);
14754 
14755   format %{ "ret\t// return register" %}
14756 
14757   ins_encode( aarch64_enc_ret() );
14758 
14759   ins_pipe(pipe_branch);
14760 %}
14761 
14762 // Die now.
14763 instruct ShouldNotReachHere() %{
14764   match(Halt);
14765 
14766   ins_cost(CALL_COST);
14767   format %{ "ShouldNotReachHere" %}
14768 
14769   ins_encode %{
14770     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
14771     // return true
14772     __ dpcs1(0xdead + 1);
14773   %}
14774 
14775   ins_pipe(pipe_class_default);
14776 %}
14777 
14778 // ============================================================================
14779 // Partial Subtype Check
14780 //
14781 // superklass array for an instance of the superklass.  Set a hidden
14782 // internal cache on a hit (cache is checked with exposed code in
14783 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14784 // encoding ALSO sets flags.
14785 
14786 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14787 %{
14788   match(Set result (PartialSubtypeCheck sub super));
14789   effect(KILL cr, KILL temp);
14790 
14791   ins_cost(1100);  // slightly larger than the next version
14792   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14793 
14794   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14795 
14796   opcode(0x1); // Force zero of result reg on hit
14797 
14798   ins_pipe(pipe_class_memory);
14799 %}
14800 
14801 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14802 %{
14803   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14804   effect(KILL temp, KILL result);
14805 
14806   ins_cost(1100);  // slightly larger than the next version
14807   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14808 
14809   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14810 
14811   opcode(0x0); // Don't zero result reg on hit
14812 
14813   ins_pipe(pipe_class_memory);
14814 %}
14815 
14816 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14817                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14818 %{
14819   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
14820   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14821   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14822 
14823   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14824   ins_encode %{
14825     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14826     __ string_compare($str1$$Register, $str2$$Register,
14827                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14828                       $tmp1$$Register, $tmp2$$Register,
14829                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
14830   %}
14831   ins_pipe(pipe_class_memory);
14832 %}
14833 
14834 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14835                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14836 %{
14837   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
14838   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14839   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14840 
14841   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14842   ins_encode %{
14843     __ string_compare($str1$$Register, $str2$$Register,
14844                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14845                       $tmp1$$Register, $tmp2$$Register,
14846                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
14847   %}
14848   ins_pipe(pipe_class_memory);
14849 %}
14850 
14851 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14852                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14853                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14854 %{
14855   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
14856   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14857   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14858          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14859 
14860   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14861   ins_encode %{
14862     __ string_compare($str1$$Register, $str2$$Register,
14863                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14864                       $tmp1$$Register, $tmp2$$Register,
14865                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14866                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
14867   %}
14868   ins_pipe(pipe_class_memory);
14869 %}
14870 
14871 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14872                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14873                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14874 %{
14875   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
14876   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14877   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14878          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14879 
14880   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14881   ins_encode %{
14882     __ string_compare($str1$$Register, $str2$$Register,
14883                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14884                       $tmp1$$Register, $tmp2$$Register,
14885                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14886                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
14887   %}
14888   ins_pipe(pipe_class_memory);
14889 %}
14890 
14891 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14892        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14893        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14894 %{
14895   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14896   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14897   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14898          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14899   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
14900 
14901   ins_encode %{
14902     __ string_indexof($str1$$Register, $str2$$Register,
14903                       $cnt1$$Register, $cnt2$$Register,
14904                       $tmp1$$Register, $tmp2$$Register,
14905                       $tmp3$$Register, $tmp4$$Register,
14906                       $tmp5$$Register, $tmp6$$Register,
14907                       -1, $result$$Register, StrIntrinsicNode::UU);
14908   %}
14909   ins_pipe(pipe_class_memory);
14910 %}
14911 
14912 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14913        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14914        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14915 %{
14916   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
14917   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14918   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14919          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14920   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
14921 
14922   ins_encode %{
14923     __ string_indexof($str1$$Register, $str2$$Register,
14924                       $cnt1$$Register, $cnt2$$Register,
14925                       $tmp1$$Register, $tmp2$$Register,
14926                       $tmp3$$Register, $tmp4$$Register,
14927                       $tmp5$$Register, $tmp6$$Register,
14928                       -1, $result$$Register, StrIntrinsicNode::LL);
14929   %}
14930   ins_pipe(pipe_class_memory);
14931 %}
14932 
14933 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14934        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14935        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14936 %{
14937   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
14938   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14939   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14940          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14941   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
14942 
14943   ins_encode %{
14944     __ string_indexof($str1$$Register, $str2$$Register,
14945                       $cnt1$$Register, $cnt2$$Register,
14946                       $tmp1$$Register, $tmp2$$Register,
14947                       $tmp3$$Register, $tmp4$$Register,
14948                       $tmp5$$Register, $tmp6$$Register,
14949                       -1, $result$$Register, StrIntrinsicNode::UL);
14950   %}
14951   ins_pipe(pipe_class_memory);
14952 %}
14953 
14954 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14955                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14956                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14957 %{
14958   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14959   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14960   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14961          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14962   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
14963 
14964   ins_encode %{
14965     int icnt2 = (int)$int_cnt2$$constant;
14966     __ string_indexof($str1$$Register, $str2$$Register,
14967                       $cnt1$$Register, zr,
14968                       $tmp1$$Register, $tmp2$$Register,
14969                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14970                       icnt2, $result$$Register, StrIntrinsicNode::UU);
14971   %}
14972   ins_pipe(pipe_class_memory);
14973 %}
14974 
14975 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14976                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14977                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14978 %{
14979   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
14980   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14981   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14982          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14983   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
14984 
14985   ins_encode %{
14986     int icnt2 = (int)$int_cnt2$$constant;
14987     __ string_indexof($str1$$Register, $str2$$Register,
14988                       $cnt1$$Register, zr,
14989                       $tmp1$$Register, $tmp2$$Register,
14990                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14991                       icnt2, $result$$Register, StrIntrinsicNode::LL);
14992   %}
14993   ins_pipe(pipe_class_memory);
14994 %}
14995 
14996 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14997                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14998                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14999 %{
15000   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15001   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15002   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15003          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15004   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15005 
15006   ins_encode %{
15007     int icnt2 = (int)$int_cnt2$$constant;
15008     __ string_indexof($str1$$Register, $str2$$Register,
15009                       $cnt1$$Register, zr,
15010                       $tmp1$$Register, $tmp2$$Register,
15011                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15012                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15013   %}
15014   ins_pipe(pipe_class_memory);
15015 %}
15016 
15017 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15018                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15019                               iRegINoSp tmp3, rFlagsReg cr)
15020 %{
15021   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15022   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15023          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15024 
15025   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15026 
15027   ins_encode %{
15028     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15029                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15030                            $tmp3$$Register);
15031   %}
15032   ins_pipe(pipe_class_memory);
15033 %}
15034 
15035 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15036                         iRegI_R0 result, rFlagsReg cr)
15037 %{
15038   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15039   match(Set result (StrEquals (Binary str1 str2) cnt));
15040   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15041 
15042   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15043   ins_encode %{
15044     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15045     __ string_equals($str1$$Register, $str2$$Register,
15046                      $result$$Register, $cnt$$Register, 1);
15047   %}
15048   ins_pipe(pipe_class_memory);
15049 %}
15050 
15051 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15052                         iRegI_R0 result, rFlagsReg cr)
15053 %{
15054   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15055   match(Set result (StrEquals (Binary str1 str2) cnt));
15056   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15057 
15058   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15059   ins_encode %{
15060     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15061     __ string_equals($str1$$Register, $str2$$Register,
15062                      $result$$Register, $cnt$$Register, 2);
15063   %}
15064   ins_pipe(pipe_class_memory);
15065 %}
15066 
15067 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15068                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15069                        iRegP_R10 tmp, rFlagsReg cr)
15070 %{
15071   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15072   match(Set result (AryEq ary1 ary2));
15073   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15074 
15075   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15076   ins_encode %{
15077     __ arrays_equals($ary1$$Register, $ary2$$Register,
15078                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15079                      $result$$Register, $tmp$$Register, 1);
15080     %}
15081   ins_pipe(pipe_class_memory);
15082 %}
15083 
15084 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15085                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15086                        iRegP_R10 tmp, rFlagsReg cr)
15087 %{
15088   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15089   match(Set result (AryEq ary1 ary2));
15090   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15091 
15092   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15093   ins_encode %{
15094     __ arrays_equals($ary1$$Register, $ary2$$Register,
15095                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15096                      $result$$Register, $tmp$$Register, 2);
15097   %}
15098   ins_pipe(pipe_class_memory);
15099 %}
15100 
15101 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15102 %{
15103   match(Set result (HasNegatives ary1 len));
15104   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15105   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15106   ins_encode %{
15107     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15108   %}
15109   ins_pipe( pipe_slow );
15110 %}
15111 
15112 // fast char[] to byte[] compression
15113 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15114                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15115                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15116                          iRegI_R0 result, rFlagsReg cr)
15117 %{
15118   match(Set result (StrCompressedCopy src (Binary dst len)));
15119   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15120 
15121   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15122   ins_encode %{
15123     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15124                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15125                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15126                            $result$$Register);
15127   %}
15128   ins_pipe( pipe_slow );
15129 %}
15130 
15131 // fast byte[] to char[] inflation
15132 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15133                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15134 %{
15135   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15136   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15137 
15138   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15139   ins_encode %{
15140     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15141                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15142   %}
15143   ins_pipe(pipe_class_memory);
15144 %}
15145 
15146 // encode char[] to byte[] in ISO_8859_1
15147 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15148                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15149                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15150                           iRegI_R0 result, rFlagsReg cr)
15151 %{
15152   match(Set result (EncodeISOArray src (Binary dst len)));
15153   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15154          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15155 
15156   format %{ "Encode array $src,$dst,$len -> $result" %}
15157   ins_encode %{
15158     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15159          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15160          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15161   %}
15162   ins_pipe( pipe_class_memory );
15163 %}
15164 
15165 // ============================================================================
15166 // This name is KNOWN by the ADLC and cannot be changed.
15167 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15168 // for this guy.
15169 instruct tlsLoadP(thread_RegP dst)
15170 %{
15171   match(Set dst (ThreadLocal));
15172 
15173   ins_cost(0);
15174 
15175   format %{ " -- \t// $dst=Thread::current(), empty" %}
15176 
15177   size(0);
15178 
15179   ins_encode( /*empty*/ );
15180 
15181   ins_pipe(pipe_class_empty);
15182 %}
15183 
15184 // ====================VECTOR INSTRUCTIONS=====================================
15185 
15186 // Load vector (32 bits)
15187 instruct loadV4(vecD dst, vmem4 mem)
15188 %{
15189   predicate(n->as_LoadVector()->memory_size() == 4);
15190   match(Set dst (LoadVector mem));
15191   ins_cost(4 * INSN_COST);
15192   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15193   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15194   ins_pipe(vload_reg_mem64);
15195 %}
15196 
15197 // Load vector (64 bits)
15198 instruct loadV8(vecD dst, vmem8 mem)
15199 %{
15200   predicate(n->as_LoadVector()->memory_size() == 8);
15201   match(Set dst (LoadVector mem));
15202   ins_cost(4 * INSN_COST);
15203   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15204   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15205   ins_pipe(vload_reg_mem64);
15206 %}
15207 
15208 // Load Vector (128 bits)
15209 instruct loadV16(vecX dst, vmem16 mem)
15210 %{
15211   predicate(n->as_LoadVector()->memory_size() == 16);
15212   match(Set dst (LoadVector mem));
15213   ins_cost(4 * INSN_COST);
15214   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15215   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15216   ins_pipe(vload_reg_mem128);
15217 %}
15218 
15219 // Store Vector (32 bits)
15220 instruct storeV4(vecD src, vmem4 mem)
15221 %{
15222   predicate(n->as_StoreVector()->memory_size() == 4);
15223   match(Set mem (StoreVector mem src));
15224   ins_cost(4 * INSN_COST);
15225   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15226   ins_encode( aarch64_enc_strvS(src, mem) );
15227   ins_pipe(vstore_reg_mem64);
15228 %}
15229 
15230 // Store Vector (64 bits)
15231 instruct storeV8(vecD src, vmem8 mem)
15232 %{
15233   predicate(n->as_StoreVector()->memory_size() == 8);
15234   match(Set mem (StoreVector mem src));
15235   ins_cost(4 * INSN_COST);
15236   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15237   ins_encode( aarch64_enc_strvD(src, mem) );
15238   ins_pipe(vstore_reg_mem64);
15239 %}
15240 
15241 // Store Vector (128 bits)
15242 instruct storeV16(vecX src, vmem16 mem)
15243 %{
15244   predicate(n->as_StoreVector()->memory_size() == 16);
15245   match(Set mem (StoreVector mem src));
15246   ins_cost(4 * INSN_COST);
15247   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15248   ins_encode( aarch64_enc_strvQ(src, mem) );
15249   ins_pipe(vstore_reg_mem128);
15250 %}
15251 
15252 instruct replicate8B(vecD dst, iRegIorL2I src)
15253 %{
15254   predicate(n->as_Vector()->length() == 4 ||
15255             n->as_Vector()->length() == 8);
15256   match(Set dst (ReplicateB src));
15257   ins_cost(INSN_COST);
15258   format %{ "dup  $dst, $src\t# vector (8B)" %}
15259   ins_encode %{
15260     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15261   %}
15262   ins_pipe(vdup_reg_reg64);
15263 %}
15264 
15265 instruct replicate16B(vecX dst, iRegIorL2I src)
15266 %{
15267   predicate(n->as_Vector()->length() == 16);
15268   match(Set dst (ReplicateB src));
15269   ins_cost(INSN_COST);
15270   format %{ "dup  $dst, $src\t# vector (16B)" %}
15271   ins_encode %{
15272     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15273   %}
15274   ins_pipe(vdup_reg_reg128);
15275 %}
15276 
15277 instruct replicate8B_imm(vecD dst, immI con)
15278 %{
15279   predicate(n->as_Vector()->length() == 4 ||
15280             n->as_Vector()->length() == 8);
15281   match(Set dst (ReplicateB con));
15282   ins_cost(INSN_COST);
15283   format %{ "movi  $dst, $con\t# vector(8B)" %}
15284   ins_encode %{
15285     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15286   %}
15287   ins_pipe(vmovi_reg_imm64);
15288 %}
15289 
15290 instruct replicate16B_imm(vecX dst, immI con)
15291 %{
15292   predicate(n->as_Vector()->length() == 16);
15293   match(Set dst (ReplicateB con));
15294   ins_cost(INSN_COST);
15295   format %{ "movi  $dst, $con\t# vector(16B)" %}
15296   ins_encode %{
15297     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15298   %}
15299   ins_pipe(vmovi_reg_imm128);
15300 %}
15301 
15302 instruct replicate4S(vecD dst, iRegIorL2I src)
15303 %{
15304   predicate(n->as_Vector()->length() == 2 ||
15305             n->as_Vector()->length() == 4);
15306   match(Set dst (ReplicateS src));
15307   ins_cost(INSN_COST);
15308   format %{ "dup  $dst, $src\t# vector (4S)" %}
15309   ins_encode %{
15310     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15311   %}
15312   ins_pipe(vdup_reg_reg64);
15313 %}
15314 
15315 instruct replicate8S(vecX dst, iRegIorL2I src)
15316 %{
15317   predicate(n->as_Vector()->length() == 8);
15318   match(Set dst (ReplicateS src));
15319   ins_cost(INSN_COST);
15320   format %{ "dup  $dst, $src\t# vector (8S)" %}
15321   ins_encode %{
15322     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15323   %}
15324   ins_pipe(vdup_reg_reg128);
15325 %}
15326 
15327 instruct replicate4S_imm(vecD dst, immI con)
15328 %{
15329   predicate(n->as_Vector()->length() == 2 ||
15330             n->as_Vector()->length() == 4);
15331   match(Set dst (ReplicateS con));
15332   ins_cost(INSN_COST);
15333   format %{ "movi  $dst, $con\t# vector(4H)" %}
15334   ins_encode %{
15335     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15336   %}
15337   ins_pipe(vmovi_reg_imm64);
15338 %}
15339 
15340 instruct replicate8S_imm(vecX dst, immI con)
15341 %{
15342   predicate(n->as_Vector()->length() == 8);
15343   match(Set dst (ReplicateS con));
15344   ins_cost(INSN_COST);
15345   format %{ "movi  $dst, $con\t# vector(8H)" %}
15346   ins_encode %{
15347     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15348   %}
15349   ins_pipe(vmovi_reg_imm128);
15350 %}
15351 
15352 instruct replicate2I(vecD dst, iRegIorL2I src)
15353 %{
15354   predicate(n->as_Vector()->length() == 2);
15355   match(Set dst (ReplicateI src));
15356   ins_cost(INSN_COST);
15357   format %{ "dup  $dst, $src\t# vector (2I)" %}
15358   ins_encode %{
15359     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15360   %}
15361   ins_pipe(vdup_reg_reg64);
15362 %}
15363 
15364 instruct replicate4I(vecX dst, iRegIorL2I src)
15365 %{
15366   predicate(n->as_Vector()->length() == 4);
15367   match(Set dst (ReplicateI src));
15368   ins_cost(INSN_COST);
15369   format %{ "dup  $dst, $src\t# vector (4I)" %}
15370   ins_encode %{
15371     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15372   %}
15373   ins_pipe(vdup_reg_reg128);
15374 %}
15375 
15376 instruct replicate2I_imm(vecD dst, immI con)
15377 %{
15378   predicate(n->as_Vector()->length() == 2);
15379   match(Set dst (ReplicateI con));
15380   ins_cost(INSN_COST);
15381   format %{ "movi  $dst, $con\t# vector(2I)" %}
15382   ins_encode %{
15383     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15384   %}
15385   ins_pipe(vmovi_reg_imm64);
15386 %}
15387 
15388 instruct replicate4I_imm(vecX dst, immI con)
15389 %{
15390   predicate(n->as_Vector()->length() == 4);
15391   match(Set dst (ReplicateI con));
15392   ins_cost(INSN_COST);
15393   format %{ "movi  $dst, $con\t# vector(4I)" %}
15394   ins_encode %{
15395     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15396   %}
15397   ins_pipe(vmovi_reg_imm128);
15398 %}
15399 
15400 instruct replicate2L(vecX dst, iRegL src)
15401 %{
15402   predicate(n->as_Vector()->length() == 2);
15403   match(Set dst (ReplicateL src));
15404   ins_cost(INSN_COST);
15405   format %{ "dup  $dst, $src\t# vector (2L)" %}
15406   ins_encode %{
15407     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15408   %}
15409   ins_pipe(vdup_reg_reg128);
15410 %}
15411 
15412 instruct replicate2L_zero(vecX dst, immI0 zero)
15413 %{
15414   predicate(n->as_Vector()->length() == 2);
15415   match(Set dst (ReplicateI zero));
15416   ins_cost(INSN_COST);
15417   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15418   ins_encode %{
15419     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15420            as_FloatRegister($dst$$reg),
15421            as_FloatRegister($dst$$reg));
15422   %}
15423   ins_pipe(vmovi_reg_imm128);
15424 %}
15425 
15426 instruct replicate2F(vecD dst, vRegF src)
15427 %{
15428   predicate(n->as_Vector()->length() == 2);
15429   match(Set dst (ReplicateF src));
15430   ins_cost(INSN_COST);
15431   format %{ "dup  $dst, $src\t# vector (2F)" %}
15432   ins_encode %{
15433     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15434            as_FloatRegister($src$$reg));
15435   %}
15436   ins_pipe(vdup_reg_freg64);
15437 %}
15438 
15439 instruct replicate4F(vecX dst, vRegF src)
15440 %{
15441   predicate(n->as_Vector()->length() == 4);
15442   match(Set dst (ReplicateF src));
15443   ins_cost(INSN_COST);
15444   format %{ "dup  $dst, $src\t# vector (4F)" %}
15445   ins_encode %{
15446     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15447            as_FloatRegister($src$$reg));
15448   %}
15449   ins_pipe(vdup_reg_freg128);
15450 %}
15451 
15452 instruct replicate2D(vecX dst, vRegD src)
15453 %{
15454   predicate(n->as_Vector()->length() == 2);
15455   match(Set dst (ReplicateD src));
15456   ins_cost(INSN_COST);
15457   format %{ "dup  $dst, $src\t# vector (2D)" %}
15458   ins_encode %{
15459     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15460            as_FloatRegister($src$$reg));
15461   %}
15462   ins_pipe(vdup_reg_dreg128);
15463 %}
15464 
15465 // ====================REDUCTION ARITHMETIC====================================
15466 
15467 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15468 %{
15469   match(Set dst (AddReductionVI src1 src2));
15470   ins_cost(INSN_COST);
15471   effect(TEMP tmp, TEMP tmp2);
15472   format %{ "umov  $tmp, $src2, S, 0\n\t"
15473             "umov  $tmp2, $src2, S, 1\n\t"
15474             "addw  $dst, $src1, $tmp\n\t"
15475             "addw  $dst, $dst, $tmp2\t add reduction2i"
15476   %}
15477   ins_encode %{
15478     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15479     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15480     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15481     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15482   %}
15483   ins_pipe(pipe_class_default);
15484 %}
15485 
15486 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15487 %{
15488   match(Set dst (AddReductionVI src1 src2));
15489   ins_cost(INSN_COST);
15490   effect(TEMP tmp, TEMP tmp2);
15491   format %{ "addv  $tmp, T4S, $src2\n\t"
15492             "umov  $tmp2, $tmp, S, 0\n\t"
15493             "addw  $dst, $tmp2, $src1\t add reduction4i"
15494   %}
15495   ins_encode %{
15496     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15497             as_FloatRegister($src2$$reg));
15498     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15499     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15500   %}
15501   ins_pipe(pipe_class_default);
15502 %}
15503 
15504 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15505 %{
15506   match(Set dst (MulReductionVI src1 src2));
15507   ins_cost(INSN_COST);
15508   effect(TEMP tmp, TEMP dst);
15509   format %{ "umov  $tmp, $src2, S, 0\n\t"
15510             "mul   $dst, $tmp, $src1\n\t"
15511             "umov  $tmp, $src2, S, 1\n\t"
15512             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15513   %}
15514   ins_encode %{
15515     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15516     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15517     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15518     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15519   %}
15520   ins_pipe(pipe_class_default);
15521 %}
15522 
15523 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15524 %{
15525   match(Set dst (MulReductionVI src1 src2));
15526   ins_cost(INSN_COST);
15527   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15528   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15529             "mul   $tmp, $tmp, $src2\n\t"
15530             "umov  $tmp2, $tmp, S, 0\n\t"
15531             "mul   $dst, $tmp2, $src1\n\t"
15532             "umov  $tmp2, $tmp, S, 1\n\t"
15533             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15534   %}
15535   ins_encode %{
15536     __ ins(as_FloatRegister($tmp$$reg), __ D,
15537            as_FloatRegister($src2$$reg), 0, 1);
15538     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15539            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15540     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15541     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15542     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15543     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15544   %}
15545   ins_pipe(pipe_class_default);
15546 %}
15547 
15548 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15549 %{
15550   match(Set dst (AddReductionVF src1 src2));
15551   ins_cost(INSN_COST);
15552   effect(TEMP tmp, TEMP dst);
15553   format %{ "fadds $dst, $src1, $src2\n\t"
15554             "ins   $tmp, S, $src2, 0, 1\n\t"
15555             "fadds $dst, $dst, $tmp\t add reduction2f"
15556   %}
15557   ins_encode %{
15558     __ fadds(as_FloatRegister($dst$$reg),
15559              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15560     __ ins(as_FloatRegister($tmp$$reg), __ S,
15561            as_FloatRegister($src2$$reg), 0, 1);
15562     __ fadds(as_FloatRegister($dst$$reg),
15563              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15564   %}
15565   ins_pipe(pipe_class_default);
15566 %}
15567 
15568 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15569 %{
15570   match(Set dst (AddReductionVF src1 src2));
15571   ins_cost(INSN_COST);
15572   effect(TEMP tmp, TEMP dst);
15573   format %{ "fadds $dst, $src1, $src2\n\t"
15574             "ins   $tmp, S, $src2, 0, 1\n\t"
15575             "fadds $dst, $dst, $tmp\n\t"
15576             "ins   $tmp, S, $src2, 0, 2\n\t"
15577             "fadds $dst, $dst, $tmp\n\t"
15578             "ins   $tmp, S, $src2, 0, 3\n\t"
15579             "fadds $dst, $dst, $tmp\t add reduction4f"
15580   %}
15581   ins_encode %{
15582     __ fadds(as_FloatRegister($dst$$reg),
15583              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15584     __ ins(as_FloatRegister($tmp$$reg), __ S,
15585            as_FloatRegister($src2$$reg), 0, 1);
15586     __ fadds(as_FloatRegister($dst$$reg),
15587              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15588     __ ins(as_FloatRegister($tmp$$reg), __ S,
15589            as_FloatRegister($src2$$reg), 0, 2);
15590     __ fadds(as_FloatRegister($dst$$reg),
15591              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15592     __ ins(as_FloatRegister($tmp$$reg), __ S,
15593            as_FloatRegister($src2$$reg), 0, 3);
15594     __ fadds(as_FloatRegister($dst$$reg),
15595              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15596   %}
15597   ins_pipe(pipe_class_default);
15598 %}
15599 
15600 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15601 %{
15602   match(Set dst (MulReductionVF src1 src2));
15603   ins_cost(INSN_COST);
15604   effect(TEMP tmp, TEMP dst);
15605   format %{ "fmuls $dst, $src1, $src2\n\t"
15606             "ins   $tmp, S, $src2, 0, 1\n\t"
15607             "fmuls $dst, $dst, $tmp\t add reduction4f"
15608   %}
15609   ins_encode %{
15610     __ fmuls(as_FloatRegister($dst$$reg),
15611              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15612     __ ins(as_FloatRegister($tmp$$reg), __ S,
15613            as_FloatRegister($src2$$reg), 0, 1);
15614     __ fmuls(as_FloatRegister($dst$$reg),
15615              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15616   %}
15617   ins_pipe(pipe_class_default);
15618 %}
15619 
15620 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15621 %{
15622   match(Set dst (MulReductionVF src1 src2));
15623   ins_cost(INSN_COST);
15624   effect(TEMP tmp, TEMP dst);
15625   format %{ "fmuls $dst, $src1, $src2\n\t"
15626             "ins   $tmp, S, $src2, 0, 1\n\t"
15627             "fmuls $dst, $dst, $tmp\n\t"
15628             "ins   $tmp, S, $src2, 0, 2\n\t"
15629             "fmuls $dst, $dst, $tmp\n\t"
15630             "ins   $tmp, S, $src2, 0, 3\n\t"
15631             "fmuls $dst, $dst, $tmp\t add reduction4f"
15632   %}
15633   ins_encode %{
15634     __ fmuls(as_FloatRegister($dst$$reg),
15635              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15636     __ ins(as_FloatRegister($tmp$$reg), __ S,
15637            as_FloatRegister($src2$$reg), 0, 1);
15638     __ fmuls(as_FloatRegister($dst$$reg),
15639              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15640     __ ins(as_FloatRegister($tmp$$reg), __ S,
15641            as_FloatRegister($src2$$reg), 0, 2);
15642     __ fmuls(as_FloatRegister($dst$$reg),
15643              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15644     __ ins(as_FloatRegister($tmp$$reg), __ S,
15645            as_FloatRegister($src2$$reg), 0, 3);
15646     __ fmuls(as_FloatRegister($dst$$reg),
15647              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15648   %}
15649   ins_pipe(pipe_class_default);
15650 %}
15651 
15652 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15653 %{
15654   match(Set dst (AddReductionVD src1 src2));
15655   ins_cost(INSN_COST);
15656   effect(TEMP tmp, TEMP dst);
15657   format %{ "faddd $dst, $src1, $src2\n\t"
15658             "ins   $tmp, D, $src2, 0, 1\n\t"
15659             "faddd $dst, $dst, $tmp\t add reduction2d"
15660   %}
15661   ins_encode %{
15662     __ faddd(as_FloatRegister($dst$$reg),
15663              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15664     __ ins(as_FloatRegister($tmp$$reg), __ D,
15665            as_FloatRegister($src2$$reg), 0, 1);
15666     __ faddd(as_FloatRegister($dst$$reg),
15667              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15668   %}
15669   ins_pipe(pipe_class_default);
15670 %}
15671 
15672 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15673 %{
15674   match(Set dst (MulReductionVD src1 src2));
15675   ins_cost(INSN_COST);
15676   effect(TEMP tmp, TEMP dst);
15677   format %{ "fmuld $dst, $src1, $src2\n\t"
15678             "ins   $tmp, D, $src2, 0, 1\n\t"
15679             "fmuld $dst, $dst, $tmp\t add reduction2d"
15680   %}
15681   ins_encode %{
15682     __ fmuld(as_FloatRegister($dst$$reg),
15683              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15684     __ ins(as_FloatRegister($tmp$$reg), __ D,
15685            as_FloatRegister($src2$$reg), 0, 1);
15686     __ fmuld(as_FloatRegister($dst$$reg),
15687              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15688   %}
15689   ins_pipe(pipe_class_default);
15690 %}
15691 
15692 // ====================VECTOR ARITHMETIC=======================================
15693 
15694 // --------------------------------- ADD --------------------------------------
15695 
15696 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15697 %{
15698   predicate(n->as_Vector()->length() == 4 ||
15699             n->as_Vector()->length() == 8);
15700   match(Set dst (AddVB src1 src2));
15701   ins_cost(INSN_COST);
15702   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15703   ins_encode %{
15704     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15705             as_FloatRegister($src1$$reg),
15706             as_FloatRegister($src2$$reg));
15707   %}
15708   ins_pipe(vdop64);
15709 %}
15710 
15711 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15712 %{
15713   predicate(n->as_Vector()->length() == 16);
15714   match(Set dst (AddVB src1 src2));
15715   ins_cost(INSN_COST);
15716   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15717   ins_encode %{
15718     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15719             as_FloatRegister($src1$$reg),
15720             as_FloatRegister($src2$$reg));
15721   %}
15722   ins_pipe(vdop128);
15723 %}
15724 
15725 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15726 %{
15727   predicate(n->as_Vector()->length() == 2 ||
15728             n->as_Vector()->length() == 4);
15729   match(Set dst (AddVS src1 src2));
15730   ins_cost(INSN_COST);
15731   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15732   ins_encode %{
15733     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15734             as_FloatRegister($src1$$reg),
15735             as_FloatRegister($src2$$reg));
15736   %}
15737   ins_pipe(vdop64);
15738 %}
15739 
15740 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15741 %{
15742   predicate(n->as_Vector()->length() == 8);
15743   match(Set dst (AddVS src1 src2));
15744   ins_cost(INSN_COST);
15745   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15746   ins_encode %{
15747     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15748             as_FloatRegister($src1$$reg),
15749             as_FloatRegister($src2$$reg));
15750   %}
15751   ins_pipe(vdop128);
15752 %}
15753 
15754 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15755 %{
15756   predicate(n->as_Vector()->length() == 2);
15757   match(Set dst (AddVI src1 src2));
15758   ins_cost(INSN_COST);
15759   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15760   ins_encode %{
15761     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15762             as_FloatRegister($src1$$reg),
15763             as_FloatRegister($src2$$reg));
15764   %}
15765   ins_pipe(vdop64);
15766 %}
15767 
15768 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15769 %{
15770   predicate(n->as_Vector()->length() == 4);
15771   match(Set dst (AddVI src1 src2));
15772   ins_cost(INSN_COST);
15773   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15774   ins_encode %{
15775     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15776             as_FloatRegister($src1$$reg),
15777             as_FloatRegister($src2$$reg));
15778   %}
15779   ins_pipe(vdop128);
15780 %}
15781 
15782 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15783 %{
15784   predicate(n->as_Vector()->length() == 2);
15785   match(Set dst (AddVL src1 src2));
15786   ins_cost(INSN_COST);
15787   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15788   ins_encode %{
15789     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15790             as_FloatRegister($src1$$reg),
15791             as_FloatRegister($src2$$reg));
15792   %}
15793   ins_pipe(vdop128);
15794 %}
15795 
15796 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15797 %{
15798   predicate(n->as_Vector()->length() == 2);
15799   match(Set dst (AddVF src1 src2));
15800   ins_cost(INSN_COST);
15801   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15802   ins_encode %{
15803     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15804             as_FloatRegister($src1$$reg),
15805             as_FloatRegister($src2$$reg));
15806   %}
15807   ins_pipe(vdop_fp64);
15808 %}
15809 
15810 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15811 %{
15812   predicate(n->as_Vector()->length() == 4);
15813   match(Set dst (AddVF src1 src2));
15814   ins_cost(INSN_COST);
15815   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15816   ins_encode %{
15817     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15818             as_FloatRegister($src1$$reg),
15819             as_FloatRegister($src2$$reg));
15820   %}
15821   ins_pipe(vdop_fp128);
15822 %}
15823 
15824 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15825 %{
15826   match(Set dst (AddVD src1 src2));
15827   ins_cost(INSN_COST);
15828   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15829   ins_encode %{
15830     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15831             as_FloatRegister($src1$$reg),
15832             as_FloatRegister($src2$$reg));
15833   %}
15834   ins_pipe(vdop_fp128);
15835 %}
15836 
15837 // --------------------------------- SUB --------------------------------------
15838 
15839 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15840 %{
15841   predicate(n->as_Vector()->length() == 4 ||
15842             n->as_Vector()->length() == 8);
15843   match(Set dst (SubVB src1 src2));
15844   ins_cost(INSN_COST);
15845   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15846   ins_encode %{
15847     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15848             as_FloatRegister($src1$$reg),
15849             as_FloatRegister($src2$$reg));
15850   %}
15851   ins_pipe(vdop64);
15852 %}
15853 
15854 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15855 %{
15856   predicate(n->as_Vector()->length() == 16);
15857   match(Set dst (SubVB src1 src2));
15858   ins_cost(INSN_COST);
15859   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15860   ins_encode %{
15861     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15862             as_FloatRegister($src1$$reg),
15863             as_FloatRegister($src2$$reg));
15864   %}
15865   ins_pipe(vdop128);
15866 %}
15867 
15868 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15869 %{
15870   predicate(n->as_Vector()->length() == 2 ||
15871             n->as_Vector()->length() == 4);
15872   match(Set dst (SubVS src1 src2));
15873   ins_cost(INSN_COST);
15874   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15875   ins_encode %{
15876     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15877             as_FloatRegister($src1$$reg),
15878             as_FloatRegister($src2$$reg));
15879   %}
15880   ins_pipe(vdop64);
15881 %}
15882 
15883 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15884 %{
15885   predicate(n->as_Vector()->length() == 8);
15886   match(Set dst (SubVS src1 src2));
15887   ins_cost(INSN_COST);
15888   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15889   ins_encode %{
15890     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15891             as_FloatRegister($src1$$reg),
15892             as_FloatRegister($src2$$reg));
15893   %}
15894   ins_pipe(vdop128);
15895 %}
15896 
15897 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15898 %{
15899   predicate(n->as_Vector()->length() == 2);
15900   match(Set dst (SubVI src1 src2));
15901   ins_cost(INSN_COST);
15902   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15903   ins_encode %{
15904     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15905             as_FloatRegister($src1$$reg),
15906             as_FloatRegister($src2$$reg));
15907   %}
15908   ins_pipe(vdop64);
15909 %}
15910 
15911 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15912 %{
15913   predicate(n->as_Vector()->length() == 4);
15914   match(Set dst (SubVI src1 src2));
15915   ins_cost(INSN_COST);
15916   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15917   ins_encode %{
15918     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15919             as_FloatRegister($src1$$reg),
15920             as_FloatRegister($src2$$reg));
15921   %}
15922   ins_pipe(vdop128);
15923 %}
15924 
15925 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15926 %{
15927   predicate(n->as_Vector()->length() == 2);
15928   match(Set dst (SubVL src1 src2));
15929   ins_cost(INSN_COST);
15930   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15931   ins_encode %{
15932     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15933             as_FloatRegister($src1$$reg),
15934             as_FloatRegister($src2$$reg));
15935   %}
15936   ins_pipe(vdop128);
15937 %}
15938 
15939 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15940 %{
15941   predicate(n->as_Vector()->length() == 2);
15942   match(Set dst (SubVF src1 src2));
15943   ins_cost(INSN_COST);
15944   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15945   ins_encode %{
15946     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15947             as_FloatRegister($src1$$reg),
15948             as_FloatRegister($src2$$reg));
15949   %}
15950   ins_pipe(vdop_fp64);
15951 %}
15952 
15953 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15954 %{
15955   predicate(n->as_Vector()->length() == 4);
15956   match(Set dst (SubVF src1 src2));
15957   ins_cost(INSN_COST);
15958   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15959   ins_encode %{
15960     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15961             as_FloatRegister($src1$$reg),
15962             as_FloatRegister($src2$$reg));
15963   %}
15964   ins_pipe(vdop_fp128);
15965 %}
15966 
15967 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15968 %{
15969   predicate(n->as_Vector()->length() == 2);
15970   match(Set dst (SubVD src1 src2));
15971   ins_cost(INSN_COST);
15972   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15973   ins_encode %{
15974     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15975             as_FloatRegister($src1$$reg),
15976             as_FloatRegister($src2$$reg));
15977   %}
15978   ins_pipe(vdop_fp128);
15979 %}
15980 
15981 // --------------------------------- MUL --------------------------------------
15982 
15983 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15984 %{
15985   predicate(n->as_Vector()->length() == 2 ||
15986             n->as_Vector()->length() == 4);
15987   match(Set dst (MulVS src1 src2));
15988   ins_cost(INSN_COST);
15989   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15990   ins_encode %{
15991     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15992             as_FloatRegister($src1$$reg),
15993             as_FloatRegister($src2$$reg));
15994   %}
15995   ins_pipe(vmul64);
15996 %}
15997 
15998 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15999 %{
16000   predicate(n->as_Vector()->length() == 8);
16001   match(Set dst (MulVS src1 src2));
16002   ins_cost(INSN_COST);
16003   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16004   ins_encode %{
16005     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16006             as_FloatRegister($src1$$reg),
16007             as_FloatRegister($src2$$reg));
16008   %}
16009   ins_pipe(vmul128);
16010 %}
16011 
16012 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16013 %{
16014   predicate(n->as_Vector()->length() == 2);
16015   match(Set dst (MulVI src1 src2));
16016   ins_cost(INSN_COST);
16017   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16018   ins_encode %{
16019     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16020             as_FloatRegister($src1$$reg),
16021             as_FloatRegister($src2$$reg));
16022   %}
16023   ins_pipe(vmul64);
16024 %}
16025 
16026 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16027 %{
16028   predicate(n->as_Vector()->length() == 4);
16029   match(Set dst (MulVI src1 src2));
16030   ins_cost(INSN_COST);
16031   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16032   ins_encode %{
16033     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16034             as_FloatRegister($src1$$reg),
16035             as_FloatRegister($src2$$reg));
16036   %}
16037   ins_pipe(vmul128);
16038 %}
16039 
16040 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16041 %{
16042   predicate(n->as_Vector()->length() == 2);
16043   match(Set dst (MulVF src1 src2));
16044   ins_cost(INSN_COST);
16045   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16046   ins_encode %{
16047     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16048             as_FloatRegister($src1$$reg),
16049             as_FloatRegister($src2$$reg));
16050   %}
16051   ins_pipe(vmuldiv_fp64);
16052 %}
16053 
16054 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16055 %{
16056   predicate(n->as_Vector()->length() == 4);
16057   match(Set dst (MulVF src1 src2));
16058   ins_cost(INSN_COST);
16059   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16060   ins_encode %{
16061     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16062             as_FloatRegister($src1$$reg),
16063             as_FloatRegister($src2$$reg));
16064   %}
16065   ins_pipe(vmuldiv_fp128);
16066 %}
16067 
16068 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16069 %{
16070   predicate(n->as_Vector()->length() == 2);
16071   match(Set dst (MulVD src1 src2));
16072   ins_cost(INSN_COST);
16073   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16074   ins_encode %{
16075     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16076             as_FloatRegister($src1$$reg),
16077             as_FloatRegister($src2$$reg));
16078   %}
16079   ins_pipe(vmuldiv_fp128);
16080 %}
16081 
16082 // --------------------------------- MLA --------------------------------------
16083 
16084 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16085 %{
16086   predicate(n->as_Vector()->length() == 2 ||
16087             n->as_Vector()->length() == 4);
16088   match(Set dst (AddVS dst (MulVS src1 src2)));
16089   ins_cost(INSN_COST);
16090   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16091   ins_encode %{
16092     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16093             as_FloatRegister($src1$$reg),
16094             as_FloatRegister($src2$$reg));
16095   %}
16096   ins_pipe(vmla64);
16097 %}
16098 
16099 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16100 %{
16101   predicate(n->as_Vector()->length() == 8);
16102   match(Set dst (AddVS dst (MulVS src1 src2)));
16103   ins_cost(INSN_COST);
16104   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16105   ins_encode %{
16106     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16107             as_FloatRegister($src1$$reg),
16108             as_FloatRegister($src2$$reg));
16109   %}
16110   ins_pipe(vmla128);
16111 %}
16112 
16113 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16114 %{
16115   predicate(n->as_Vector()->length() == 2);
16116   match(Set dst (AddVI dst (MulVI src1 src2)));
16117   ins_cost(INSN_COST);
16118   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16119   ins_encode %{
16120     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16121             as_FloatRegister($src1$$reg),
16122             as_FloatRegister($src2$$reg));
16123   %}
16124   ins_pipe(vmla64);
16125 %}
16126 
16127 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16128 %{
16129   predicate(n->as_Vector()->length() == 4);
16130   match(Set dst (AddVI dst (MulVI src1 src2)));
16131   ins_cost(INSN_COST);
16132   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16133   ins_encode %{
16134     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16135             as_FloatRegister($src1$$reg),
16136             as_FloatRegister($src2$$reg));
16137   %}
16138   ins_pipe(vmla128);
16139 %}
16140 
16141 // dst + src1 * src2
16142 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16143   predicate(UseFMA && n->as_Vector()->length() == 2);
16144   match(Set dst (FmaVF  dst (Binary src1 src2)));
16145   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16146   ins_cost(INSN_COST);
16147   ins_encode %{
16148     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16149             as_FloatRegister($src1$$reg),
16150             as_FloatRegister($src2$$reg));
16151   %}
16152   ins_pipe(vmuldiv_fp64);
16153 %}
16154 
16155 // dst + src1 * src2
16156 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16157   predicate(UseFMA && n->as_Vector()->length() == 4);
16158   match(Set dst (FmaVF  dst (Binary src1 src2)));
16159   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16160   ins_cost(INSN_COST);
16161   ins_encode %{
16162     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16163             as_FloatRegister($src1$$reg),
16164             as_FloatRegister($src2$$reg));
16165   %}
16166   ins_pipe(vmuldiv_fp128);
16167 %}
16168 
16169 // dst + src1 * src2
16170 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16171   predicate(UseFMA && n->as_Vector()->length() == 2);
16172   match(Set dst (FmaVD  dst (Binary src1 src2)));
16173   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16174   ins_cost(INSN_COST);
16175   ins_encode %{
16176     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16177             as_FloatRegister($src1$$reg),
16178             as_FloatRegister($src2$$reg));
16179   %}
16180   ins_pipe(vmuldiv_fp128);
16181 %}
16182 
16183 // --------------------------------- MLS --------------------------------------
16184 
16185 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16186 %{
16187   predicate(n->as_Vector()->length() == 2 ||
16188             n->as_Vector()->length() == 4);
16189   match(Set dst (SubVS dst (MulVS src1 src2)));
16190   ins_cost(INSN_COST);
16191   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16192   ins_encode %{
16193     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16194             as_FloatRegister($src1$$reg),
16195             as_FloatRegister($src2$$reg));
16196   %}
16197   ins_pipe(vmla64);
16198 %}
16199 
16200 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16201 %{
16202   predicate(n->as_Vector()->length() == 8);
16203   match(Set dst (SubVS dst (MulVS src1 src2)));
16204   ins_cost(INSN_COST);
16205   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16206   ins_encode %{
16207     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16208             as_FloatRegister($src1$$reg),
16209             as_FloatRegister($src2$$reg));
16210   %}
16211   ins_pipe(vmla128);
16212 %}
16213 
16214 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16215 %{
16216   predicate(n->as_Vector()->length() == 2);
16217   match(Set dst (SubVI dst (MulVI src1 src2)));
16218   ins_cost(INSN_COST);
16219   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16220   ins_encode %{
16221     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16222             as_FloatRegister($src1$$reg),
16223             as_FloatRegister($src2$$reg));
16224   %}
16225   ins_pipe(vmla64);
16226 %}
16227 
16228 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16229 %{
16230   predicate(n->as_Vector()->length() == 4);
16231   match(Set dst (SubVI dst (MulVI src1 src2)));
16232   ins_cost(INSN_COST);
16233   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16234   ins_encode %{
16235     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16236             as_FloatRegister($src1$$reg),
16237             as_FloatRegister($src2$$reg));
16238   %}
16239   ins_pipe(vmla128);
16240 %}
16241 
16242 // dst - src1 * src2
16243 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16244   predicate(UseFMA && n->as_Vector()->length() == 2);
16245   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16246   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16247   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16248   ins_cost(INSN_COST);
16249   ins_encode %{
16250     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16251             as_FloatRegister($src1$$reg),
16252             as_FloatRegister($src2$$reg));
16253   %}
16254   ins_pipe(vmuldiv_fp64);
16255 %}
16256 
16257 // dst - src1 * src2
16258 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16259   predicate(UseFMA && n->as_Vector()->length() == 4);
16260   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16261   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16262   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16263   ins_cost(INSN_COST);
16264   ins_encode %{
16265     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16266             as_FloatRegister($src1$$reg),
16267             as_FloatRegister($src2$$reg));
16268   %}
16269   ins_pipe(vmuldiv_fp128);
16270 %}
16271 
16272 // dst - src1 * src2
16273 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16274   predicate(UseFMA && n->as_Vector()->length() == 2);
16275   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16276   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16277   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16278   ins_cost(INSN_COST);
16279   ins_encode %{
16280     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16281             as_FloatRegister($src1$$reg),
16282             as_FloatRegister($src2$$reg));
16283   %}
16284   ins_pipe(vmuldiv_fp128);
16285 %}
16286 
16287 // --------------------------------- DIV --------------------------------------
16288 
16289 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16290 %{
16291   predicate(n->as_Vector()->length() == 2);
16292   match(Set dst (DivVF src1 src2));
16293   ins_cost(INSN_COST);
16294   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16295   ins_encode %{
16296     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16297             as_FloatRegister($src1$$reg),
16298             as_FloatRegister($src2$$reg));
16299   %}
16300   ins_pipe(vmuldiv_fp64);
16301 %}
16302 
16303 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16304 %{
16305   predicate(n->as_Vector()->length() == 4);
16306   match(Set dst (DivVF src1 src2));
16307   ins_cost(INSN_COST);
16308   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16309   ins_encode %{
16310     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16311             as_FloatRegister($src1$$reg),
16312             as_FloatRegister($src2$$reg));
16313   %}
16314   ins_pipe(vmuldiv_fp128);
16315 %}
16316 
16317 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16318 %{
16319   predicate(n->as_Vector()->length() == 2);
16320   match(Set dst (DivVD src1 src2));
16321   ins_cost(INSN_COST);
16322   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16323   ins_encode %{
16324     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16325             as_FloatRegister($src1$$reg),
16326             as_FloatRegister($src2$$reg));
16327   %}
16328   ins_pipe(vmuldiv_fp128);
16329 %}
16330 
16331 // --------------------------------- SQRT -------------------------------------
16332 
16333 instruct vsqrt2D(vecX dst, vecX src)
16334 %{
16335   predicate(n->as_Vector()->length() == 2);
16336   match(Set dst (SqrtVD src));
16337   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16338   ins_encode %{
16339     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16340              as_FloatRegister($src$$reg));
16341   %}
16342   ins_pipe(vsqrt_fp128);
16343 %}
16344 
16345 // --------------------------------- ABS --------------------------------------
16346 
16347 instruct vabs2F(vecD dst, vecD src)
16348 %{
16349   predicate(n->as_Vector()->length() == 2);
16350   match(Set dst (AbsVF src));
16351   ins_cost(INSN_COST * 3);
16352   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16353   ins_encode %{
16354     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16355             as_FloatRegister($src$$reg));
16356   %}
16357   ins_pipe(vunop_fp64);
16358 %}
16359 
16360 instruct vabs4F(vecX dst, vecX src)
16361 %{
16362   predicate(n->as_Vector()->length() == 4);
16363   match(Set dst (AbsVF src));
16364   ins_cost(INSN_COST * 3);
16365   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16366   ins_encode %{
16367     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16368             as_FloatRegister($src$$reg));
16369   %}
16370   ins_pipe(vunop_fp128);
16371 %}
16372 
16373 instruct vabs2D(vecX dst, vecX src)
16374 %{
16375   predicate(n->as_Vector()->length() == 2);
16376   match(Set dst (AbsVD src));
16377   ins_cost(INSN_COST * 3);
16378   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16379   ins_encode %{
16380     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16381             as_FloatRegister($src$$reg));
16382   %}
16383   ins_pipe(vunop_fp128);
16384 %}
16385 
16386 // --------------------------------- NEG --------------------------------------
16387 
16388 instruct vneg2F(vecD dst, vecD src)
16389 %{
16390   predicate(n->as_Vector()->length() == 2);
16391   match(Set dst (NegVF src));
16392   ins_cost(INSN_COST * 3);
16393   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16394   ins_encode %{
16395     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16396             as_FloatRegister($src$$reg));
16397   %}
16398   ins_pipe(vunop_fp64);
16399 %}
16400 
16401 instruct vneg4F(vecX dst, vecX src)
16402 %{
16403   predicate(n->as_Vector()->length() == 4);
16404   match(Set dst (NegVF src));
16405   ins_cost(INSN_COST * 3);
16406   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16407   ins_encode %{
16408     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16409             as_FloatRegister($src$$reg));
16410   %}
16411   ins_pipe(vunop_fp128);
16412 %}
16413 
16414 instruct vneg2D(vecX dst, vecX src)
16415 %{
16416   predicate(n->as_Vector()->length() == 2);
16417   match(Set dst (NegVD src));
16418   ins_cost(INSN_COST * 3);
16419   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16420   ins_encode %{
16421     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16422             as_FloatRegister($src$$reg));
16423   %}
16424   ins_pipe(vunop_fp128);
16425 %}
16426 
16427 // --------------------------------- AND --------------------------------------
16428 
16429 instruct vand8B(vecD dst, vecD src1, vecD src2)
16430 %{
16431   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16432             n->as_Vector()->length_in_bytes() == 8);
16433   match(Set dst (AndV src1 src2));
16434   ins_cost(INSN_COST);
16435   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16436   ins_encode %{
16437     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16438             as_FloatRegister($src1$$reg),
16439             as_FloatRegister($src2$$reg));
16440   %}
16441   ins_pipe(vlogical64);
16442 %}
16443 
16444 instruct vand16B(vecX dst, vecX src1, vecX src2)
16445 %{
16446   predicate(n->as_Vector()->length_in_bytes() == 16);
16447   match(Set dst (AndV src1 src2));
16448   ins_cost(INSN_COST);
16449   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16450   ins_encode %{
16451     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16452             as_FloatRegister($src1$$reg),
16453             as_FloatRegister($src2$$reg));
16454   %}
16455   ins_pipe(vlogical128);
16456 %}
16457 
16458 // --------------------------------- OR ---------------------------------------
16459 
16460 instruct vor8B(vecD dst, vecD src1, vecD src2)
16461 %{
16462   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16463             n->as_Vector()->length_in_bytes() == 8);
16464   match(Set dst (OrV src1 src2));
16465   ins_cost(INSN_COST);
16466   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16467   ins_encode %{
16468     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16469             as_FloatRegister($src1$$reg),
16470             as_FloatRegister($src2$$reg));
16471   %}
16472   ins_pipe(vlogical64);
16473 %}
16474 
16475 instruct vor16B(vecX dst, vecX src1, vecX src2)
16476 %{
16477   predicate(n->as_Vector()->length_in_bytes() == 16);
16478   match(Set dst (OrV src1 src2));
16479   ins_cost(INSN_COST);
16480   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16481   ins_encode %{
16482     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16483             as_FloatRegister($src1$$reg),
16484             as_FloatRegister($src2$$reg));
16485   %}
16486   ins_pipe(vlogical128);
16487 %}
16488 
16489 // --------------------------------- XOR --------------------------------------
16490 
16491 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16492 %{
16493   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16494             n->as_Vector()->length_in_bytes() == 8);
16495   match(Set dst (XorV src1 src2));
16496   ins_cost(INSN_COST);
16497   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16498   ins_encode %{
16499     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16500             as_FloatRegister($src1$$reg),
16501             as_FloatRegister($src2$$reg));
16502   %}
16503   ins_pipe(vlogical64);
16504 %}
16505 
16506 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16507 %{
16508   predicate(n->as_Vector()->length_in_bytes() == 16);
16509   match(Set dst (XorV src1 src2));
16510   ins_cost(INSN_COST);
16511   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16512   ins_encode %{
16513     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16514             as_FloatRegister($src1$$reg),
16515             as_FloatRegister($src2$$reg));
16516   %}
16517   ins_pipe(vlogical128);
16518 %}
16519 
16520 // ------------------------------ Shift ---------------------------------------
16521 
16522 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16523   match(Set dst (LShiftCntV cnt));
16524   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16525   ins_encode %{
16526     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16527   %}
16528   ins_pipe(vdup_reg_reg128);
16529 %}
16530 
16531 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16532 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16533   match(Set dst (RShiftCntV cnt));
16534   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16535   ins_encode %{
16536     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16537     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16538   %}
16539   ins_pipe(vdup_reg_reg128);
16540 %}
16541 
16542 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16543   predicate(n->as_Vector()->length() == 4 ||
16544             n->as_Vector()->length() == 8);
16545   match(Set dst (LShiftVB src shift));
16546   match(Set dst (RShiftVB src shift));
16547   ins_cost(INSN_COST);
16548   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16549   ins_encode %{
16550     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16551             as_FloatRegister($src$$reg),
16552             as_FloatRegister($shift$$reg));
16553   %}
16554   ins_pipe(vshift64);
16555 %}
16556 
16557 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16558   predicate(n->as_Vector()->length() == 16);
16559   match(Set dst (LShiftVB src shift));
16560   match(Set dst (RShiftVB src shift));
16561   ins_cost(INSN_COST);
16562   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16563   ins_encode %{
16564     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16565             as_FloatRegister($src$$reg),
16566             as_FloatRegister($shift$$reg));
16567   %}
16568   ins_pipe(vshift128);
16569 %}
16570 
16571 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16572   predicate(n->as_Vector()->length() == 4 ||
16573             n->as_Vector()->length() == 8);
16574   match(Set dst (URShiftVB src shift));
16575   ins_cost(INSN_COST);
16576   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16577   ins_encode %{
16578     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16579             as_FloatRegister($src$$reg),
16580             as_FloatRegister($shift$$reg));
16581   %}
16582   ins_pipe(vshift64);
16583 %}
16584 
16585 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16586   predicate(n->as_Vector()->length() == 16);
16587   match(Set dst (URShiftVB src shift));
16588   ins_cost(INSN_COST);
16589   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16590   ins_encode %{
16591     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16592             as_FloatRegister($src$$reg),
16593             as_FloatRegister($shift$$reg));
16594   %}
16595   ins_pipe(vshift128);
16596 %}
16597 
16598 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16599   predicate(n->as_Vector()->length() == 4 ||
16600             n->as_Vector()->length() == 8);
16601   match(Set dst (LShiftVB src shift));
16602   ins_cost(INSN_COST);
16603   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16604   ins_encode %{
16605     int sh = (int)$shift$$constant;
16606     if (sh >= 8) {
16607       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16608              as_FloatRegister($src$$reg),
16609              as_FloatRegister($src$$reg));
16610     } else {
16611       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16612              as_FloatRegister($src$$reg), sh);
16613     }
16614   %}
16615   ins_pipe(vshift64_imm);
16616 %}
16617 
16618 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16619   predicate(n->as_Vector()->length() == 16);
16620   match(Set dst (LShiftVB src shift));
16621   ins_cost(INSN_COST);
16622   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16623   ins_encode %{
16624     int sh = (int)$shift$$constant;
16625     if (sh >= 8) {
16626       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16627              as_FloatRegister($src$$reg),
16628              as_FloatRegister($src$$reg));
16629     } else {
16630       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16631              as_FloatRegister($src$$reg), sh);
16632     }
16633   %}
16634   ins_pipe(vshift128_imm);
16635 %}
16636 
16637 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16638   predicate(n->as_Vector()->length() == 4 ||
16639             n->as_Vector()->length() == 8);
16640   match(Set dst (RShiftVB src shift));
16641   ins_cost(INSN_COST);
16642   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16643   ins_encode %{
16644     int sh = (int)$shift$$constant;
16645     if (sh >= 8) sh = 7;
16646     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16647            as_FloatRegister($src$$reg), sh);
16648   %}
16649   ins_pipe(vshift64_imm);
16650 %}
16651 
16652 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16653   predicate(n->as_Vector()->length() == 16);
16654   match(Set dst (RShiftVB src shift));
16655   ins_cost(INSN_COST);
16656   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16657   ins_encode %{
16658     int sh = (int)$shift$$constant;
16659     if (sh >= 8) sh = 7;
16660     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16661            as_FloatRegister($src$$reg), sh);
16662   %}
16663   ins_pipe(vshift128_imm);
16664 %}
16665 
16666 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16667   predicate(n->as_Vector()->length() == 4 ||
16668             n->as_Vector()->length() == 8);
16669   match(Set dst (URShiftVB src shift));
16670   ins_cost(INSN_COST);
16671   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16672   ins_encode %{
16673     int sh = (int)$shift$$constant;
16674     if (sh >= 8) {
16675       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16676              as_FloatRegister($src$$reg),
16677              as_FloatRegister($src$$reg));
16678     } else {
16679       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16680              as_FloatRegister($src$$reg), sh);
16681     }
16682   %}
16683   ins_pipe(vshift64_imm);
16684 %}
16685 
16686 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16687   predicate(n->as_Vector()->length() == 16);
16688   match(Set dst (URShiftVB src shift));
16689   ins_cost(INSN_COST);
16690   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16691   ins_encode %{
16692     int sh = (int)$shift$$constant;
16693     if (sh >= 8) {
16694       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16695              as_FloatRegister($src$$reg),
16696              as_FloatRegister($src$$reg));
16697     } else {
16698       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16699              as_FloatRegister($src$$reg), sh);
16700     }
16701   %}
16702   ins_pipe(vshift128_imm);
16703 %}
16704 
16705 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16706   predicate(n->as_Vector()->length() == 2 ||
16707             n->as_Vector()->length() == 4);
16708   match(Set dst (LShiftVS src shift));
16709   match(Set dst (RShiftVS src shift));
16710   ins_cost(INSN_COST);
16711   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16712   ins_encode %{
16713     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16714             as_FloatRegister($src$$reg),
16715             as_FloatRegister($shift$$reg));
16716   %}
16717   ins_pipe(vshift64);
16718 %}
16719 
16720 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16721   predicate(n->as_Vector()->length() == 8);
16722   match(Set dst (LShiftVS src shift));
16723   match(Set dst (RShiftVS src shift));
16724   ins_cost(INSN_COST);
16725   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16726   ins_encode %{
16727     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16728             as_FloatRegister($src$$reg),
16729             as_FloatRegister($shift$$reg));
16730   %}
16731   ins_pipe(vshift128);
16732 %}
16733 
16734 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16735   predicate(n->as_Vector()->length() == 2 ||
16736             n->as_Vector()->length() == 4);
16737   match(Set dst (URShiftVS src shift));
16738   ins_cost(INSN_COST);
16739   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16740   ins_encode %{
16741     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16742             as_FloatRegister($src$$reg),
16743             as_FloatRegister($shift$$reg));
16744   %}
16745   ins_pipe(vshift64);
16746 %}
16747 
16748 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16749   predicate(n->as_Vector()->length() == 8);
16750   match(Set dst (URShiftVS src shift));
16751   ins_cost(INSN_COST);
16752   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16753   ins_encode %{
16754     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16755             as_FloatRegister($src$$reg),
16756             as_FloatRegister($shift$$reg));
16757   %}
16758   ins_pipe(vshift128);
16759 %}
16760 
16761 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16762   predicate(n->as_Vector()->length() == 2 ||
16763             n->as_Vector()->length() == 4);
16764   match(Set dst (LShiftVS src shift));
16765   ins_cost(INSN_COST);
16766   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16767   ins_encode %{
16768     int sh = (int)$shift$$constant;
16769     if (sh >= 16) {
16770       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16771              as_FloatRegister($src$$reg),
16772              as_FloatRegister($src$$reg));
16773     } else {
16774       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16775              as_FloatRegister($src$$reg), sh);
16776     }
16777   %}
16778   ins_pipe(vshift64_imm);
16779 %}
16780 
16781 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16782   predicate(n->as_Vector()->length() == 8);
16783   match(Set dst (LShiftVS src shift));
16784   ins_cost(INSN_COST);
16785   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16786   ins_encode %{
16787     int sh = (int)$shift$$constant;
16788     if (sh >= 16) {
16789       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16790              as_FloatRegister($src$$reg),
16791              as_FloatRegister($src$$reg));
16792     } else {
16793       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16794              as_FloatRegister($src$$reg), sh);
16795     }
16796   %}
16797   ins_pipe(vshift128_imm);
16798 %}
16799 
16800 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16801   predicate(n->as_Vector()->length() == 2 ||
16802             n->as_Vector()->length() == 4);
16803   match(Set dst (RShiftVS src shift));
16804   ins_cost(INSN_COST);
16805   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16806   ins_encode %{
16807     int sh = (int)$shift$$constant;
16808     if (sh >= 16) sh = 15;
16809     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16810            as_FloatRegister($src$$reg), sh);
16811   %}
16812   ins_pipe(vshift64_imm);
16813 %}
16814 
16815 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16816   predicate(n->as_Vector()->length() == 8);
16817   match(Set dst (RShiftVS src shift));
16818   ins_cost(INSN_COST);
16819   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16820   ins_encode %{
16821     int sh = (int)$shift$$constant;
16822     if (sh >= 16) sh = 15;
16823     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16824            as_FloatRegister($src$$reg), sh);
16825   %}
16826   ins_pipe(vshift128_imm);
16827 %}
16828 
16829 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16830   predicate(n->as_Vector()->length() == 2 ||
16831             n->as_Vector()->length() == 4);
16832   match(Set dst (URShiftVS src shift));
16833   ins_cost(INSN_COST);
16834   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16835   ins_encode %{
16836     int sh = (int)$shift$$constant;
16837     if (sh >= 16) {
16838       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16839              as_FloatRegister($src$$reg),
16840              as_FloatRegister($src$$reg));
16841     } else {
16842       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16843              as_FloatRegister($src$$reg), sh);
16844     }
16845   %}
16846   ins_pipe(vshift64_imm);
16847 %}
16848 
16849 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16850   predicate(n->as_Vector()->length() == 8);
16851   match(Set dst (URShiftVS src shift));
16852   ins_cost(INSN_COST);
16853   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16854   ins_encode %{
16855     int sh = (int)$shift$$constant;
16856     if (sh >= 16) {
16857       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16858              as_FloatRegister($src$$reg),
16859              as_FloatRegister($src$$reg));
16860     } else {
16861       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16862              as_FloatRegister($src$$reg), sh);
16863     }
16864   %}
16865   ins_pipe(vshift128_imm);
16866 %}
16867 
16868 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16869   predicate(n->as_Vector()->length() == 2);
16870   match(Set dst (LShiftVI src shift));
16871   match(Set dst (RShiftVI src shift));
16872   ins_cost(INSN_COST);
16873   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16874   ins_encode %{
16875     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16876             as_FloatRegister($src$$reg),
16877             as_FloatRegister($shift$$reg));
16878   %}
16879   ins_pipe(vshift64);
16880 %}
16881 
16882 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16883   predicate(n->as_Vector()->length() == 4);
16884   match(Set dst (LShiftVI src shift));
16885   match(Set dst (RShiftVI src shift));
16886   ins_cost(INSN_COST);
16887   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16888   ins_encode %{
16889     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16890             as_FloatRegister($src$$reg),
16891             as_FloatRegister($shift$$reg));
16892   %}
16893   ins_pipe(vshift128);
16894 %}
16895 
16896 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16897   predicate(n->as_Vector()->length() == 2);
16898   match(Set dst (URShiftVI src shift));
16899   ins_cost(INSN_COST);
16900   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16901   ins_encode %{
16902     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16903             as_FloatRegister($src$$reg),
16904             as_FloatRegister($shift$$reg));
16905   %}
16906   ins_pipe(vshift64);
16907 %}
16908 
16909 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16910   predicate(n->as_Vector()->length() == 4);
16911   match(Set dst (URShiftVI src shift));
16912   ins_cost(INSN_COST);
16913   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16914   ins_encode %{
16915     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16916             as_FloatRegister($src$$reg),
16917             as_FloatRegister($shift$$reg));
16918   %}
16919   ins_pipe(vshift128);
16920 %}
16921 
16922 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16923   predicate(n->as_Vector()->length() == 2);
16924   match(Set dst (LShiftVI src shift));
16925   ins_cost(INSN_COST);
16926   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16927   ins_encode %{
16928     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16929            as_FloatRegister($src$$reg),
16930            (int)$shift$$constant);
16931   %}
16932   ins_pipe(vshift64_imm);
16933 %}
16934 
16935 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16936   predicate(n->as_Vector()->length() == 4);
16937   match(Set dst (LShiftVI src shift));
16938   ins_cost(INSN_COST);
16939   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16940   ins_encode %{
16941     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16942            as_FloatRegister($src$$reg),
16943            (int)$shift$$constant);
16944   %}
16945   ins_pipe(vshift128_imm);
16946 %}
16947 
16948 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16949   predicate(n->as_Vector()->length() == 2);
16950   match(Set dst (RShiftVI src shift));
16951   ins_cost(INSN_COST);
16952   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16953   ins_encode %{
16954     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16955             as_FloatRegister($src$$reg),
16956             (int)$shift$$constant);
16957   %}
16958   ins_pipe(vshift64_imm);
16959 %}
16960 
16961 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16962   predicate(n->as_Vector()->length() == 4);
16963   match(Set dst (RShiftVI src shift));
16964   ins_cost(INSN_COST);
16965   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16966   ins_encode %{
16967     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16968             as_FloatRegister($src$$reg),
16969             (int)$shift$$constant);
16970   %}
16971   ins_pipe(vshift128_imm);
16972 %}
16973 
16974 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16975   predicate(n->as_Vector()->length() == 2);
16976   match(Set dst (URShiftVI src shift));
16977   ins_cost(INSN_COST);
16978   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16979   ins_encode %{
16980     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16981             as_FloatRegister($src$$reg),
16982             (int)$shift$$constant);
16983   %}
16984   ins_pipe(vshift64_imm);
16985 %}
16986 
16987 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16988   predicate(n->as_Vector()->length() == 4);
16989   match(Set dst (URShiftVI src shift));
16990   ins_cost(INSN_COST);
16991   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16992   ins_encode %{
16993     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16994             as_FloatRegister($src$$reg),
16995             (int)$shift$$constant);
16996   %}
16997   ins_pipe(vshift128_imm);
16998 %}
16999 
17000 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17001   predicate(n->as_Vector()->length() == 2);
17002   match(Set dst (LShiftVL src shift));
17003   match(Set dst (RShiftVL src shift));
17004   ins_cost(INSN_COST);
17005   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17006   ins_encode %{
17007     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17008             as_FloatRegister($src$$reg),
17009             as_FloatRegister($shift$$reg));
17010   %}
17011   ins_pipe(vshift128);
17012 %}
17013 
17014 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
17015   predicate(n->as_Vector()->length() == 2);
17016   match(Set dst (URShiftVL src shift));
17017   ins_cost(INSN_COST);
17018   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
17019   ins_encode %{
17020     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17021             as_FloatRegister($src$$reg),
17022             as_FloatRegister($shift$$reg));
17023   %}
17024   ins_pipe(vshift128);
17025 %}
17026 
17027 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17028   predicate(n->as_Vector()->length() == 2);
17029   match(Set dst (LShiftVL src shift));
17030   ins_cost(INSN_COST);
17031   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17032   ins_encode %{
17033     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17034            as_FloatRegister($src$$reg),
17035            (int)$shift$$constant);
17036   %}
17037   ins_pipe(vshift128_imm);
17038 %}
17039 
17040 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17041   predicate(n->as_Vector()->length() == 2);
17042   match(Set dst (RShiftVL src shift));
17043   ins_cost(INSN_COST);
17044   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17045   ins_encode %{
17046     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17047             as_FloatRegister($src$$reg),
17048             (int)$shift$$constant);
17049   %}
17050   ins_pipe(vshift128_imm);
17051 %}
17052 
17053 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17054   predicate(n->as_Vector()->length() == 2);
17055   match(Set dst (URShiftVL src shift));
17056   ins_cost(INSN_COST);
17057   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17058   ins_encode %{
17059     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17060             as_FloatRegister($src$$reg),
17061             (int)$shift$$constant);
17062   %}
17063   ins_pipe(vshift128_imm);
17064 %}
17065 
17066 //----------PEEPHOLE RULES-----------------------------------------------------
17067 // These must follow all instruction definitions as they use the names
17068 // defined in the instructions definitions.
17069 //
17070 // peepmatch ( root_instr_name [preceding_instruction]* );
17071 //
17072 // peepconstraint %{
17073 // (instruction_number.operand_name relational_op instruction_number.operand_name
17074 //  [, ...] );
17075 // // instruction numbers are zero-based using left to right order in peepmatch
17076 //
17077 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17078 // // provide an instruction_number.operand_name for each operand that appears
17079 // // in the replacement instruction's match rule
17080 //
17081 // ---------VM FLAGS---------------------------------------------------------
17082 //
17083 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17084 //
17085 // Each peephole rule is given an identifying number starting with zero and
17086 // increasing by one in the order seen by the parser.  An individual peephole
17087 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17088 // on the command-line.
17089 //
17090 // ---------CURRENT LIMITATIONS----------------------------------------------
17091 //
17092 // Only match adjacent instructions in same basic block
17093 // Only equality constraints
17094 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17095 // Only one replacement instruction
17096 //
17097 // ---------EXAMPLE----------------------------------------------------------
17098 //
17099 // // pertinent parts of existing instructions in architecture description
17100 // instruct movI(iRegINoSp dst, iRegI src)
17101 // %{
17102 //   match(Set dst (CopyI src));
17103 // %}
17104 //
17105 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17106 // %{
17107 //   match(Set dst (AddI dst src));
17108 //   effect(KILL cr);
17109 // %}
17110 //
17111 // // Change (inc mov) to lea
17112 // peephole %{
17113 //   // increment preceeded by register-register move
17114 //   peepmatch ( incI_iReg movI );
17115 //   // require that the destination register of the increment
17116 //   // match the destination register of the move
17117 //   peepconstraint ( 0.dst == 1.dst );
17118 //   // construct a replacement instruction that sets
17119 //   // the destination to ( move's source register + one )
17120 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17121 // %}
17122 //
17123 
17124 // Implementation no longer uses movX instructions since
17125 // machine-independent system no longer uses CopyX nodes.
17126 //
17127 // peephole
17128 // %{
17129 //   peepmatch (incI_iReg movI);
17130 //   peepconstraint (0.dst == 1.dst);
17131 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17132 // %}
17133 
17134 // peephole
17135 // %{
17136 //   peepmatch (decI_iReg movI);
17137 //   peepconstraint (0.dst == 1.dst);
17138 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17139 // %}
17140 
17141 // peephole
17142 // %{
17143 //   peepmatch (addI_iReg_imm movI);
17144 //   peepconstraint (0.dst == 1.dst);
17145 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17146 // %}
17147 
17148 // peephole
17149 // %{
17150 //   peepmatch (incL_iReg movL);
17151 //   peepconstraint (0.dst == 1.dst);
17152 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17153 // %}
17154 
17155 // peephole
17156 // %{
17157 //   peepmatch (decL_iReg movL);
17158 //   peepconstraint (0.dst == 1.dst);
17159 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17160 // %}
17161 
17162 // peephole
17163 // %{
17164 //   peepmatch (addL_iReg_imm movL);
17165 //   peepconstraint (0.dst == 1.dst);
17166 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17167 // %}
17168 
17169 // peephole
17170 // %{
17171 //   peepmatch (addP_iReg_imm movP);
17172 //   peepconstraint (0.dst == 1.dst);
17173 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17174 // %}
17175 
17176 // // Change load of spilled value to only a spill
17177 // instruct storeI(memory mem, iRegI src)
17178 // %{
17179 //   match(Set mem (StoreI mem src));
17180 // %}
17181 //
17182 // instruct loadI(iRegINoSp dst, memory mem)
17183 // %{
17184 //   match(Set dst (LoadI mem));
17185 // %}
17186 //
17187 
17188 //----------SMARTSPILL RULES---------------------------------------------------
17189 // These must follow all instruction definitions as they use the names
17190 // defined in the instructions definitions.
17191 
17192 // Local Variables:
17193 // mode: c++
17194 // End:
--- EOF ---