1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 #if INCLUDE_SHENANDOAHGC
1004 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
1005 #endif
1006 
1007 class CallStubImpl {
1008 
1009   //--------------------------------------------------------------
1010   //---<  Used for optimization in Compile::shorten_branches  >---
1011   //--------------------------------------------------------------
1012 
1013  public:
1014   // Size of call trampoline stub.
1015   static uint size_call_trampoline() {
1016     return 0; // no call trampolines on this platform
1017   }
1018 
1019   // number of relocations needed by a call trampoline stub
1020   static uint reloc_call_trampoline() {
1021     return 0; // no call trampolines on this platform
1022   }
1023 };
1024 
1025 class HandlerImpl {
1026 
1027  public:
1028 
1029   static int emit_exception_handler(CodeBuffer &cbuf);
1030   static int emit_deopt_handler(CodeBuffer& cbuf);
1031 
1032   static uint size_exception_handler() {
1033     return MacroAssembler::far_branch_size();
1034   }
1035 
1036   static uint size_deopt_handler() {
1037     // count one adr and one far branch instruction
1038     return 4 * NativeInstruction::instruction_size;
1039   }
1040 };
1041 
1042  bool is_CAS(int opcode, bool maybe_volatile);
1043 
1044   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1045 
1046   bool unnecessary_acquire(const Node *barrier);
1047   bool needs_acquiring_load(const Node *load);
1048 
1049   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1050 
1051   bool unnecessary_release(const Node *barrier);
1052   bool unnecessary_volatile(const Node *barrier);
1053   bool needs_releasing_store(const Node *store);
1054 
1055   // predicate controlling translation of CompareAndSwapX
1056   bool needs_acquiring_load_exclusive(const Node *load);
1057 
1058   // predicate controlling translation of StoreCM
1059   bool unnecessary_storestore(const Node *storecm);
1060 
1061   // predicate controlling addressing modes
1062   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1063 %}
1064 
1065 source %{
1066 
1067   // Optimizaton of volatile gets and puts
1068   // -------------------------------------
1069   //
1070   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1071   // use to implement volatile reads and writes. For a volatile read
1072   // we simply need
1073   //
1074   //   ldar<x>
1075   //
1076   // and for a volatile write we need
1077   //
1078   //   stlr<x>
1079   //
1080   // Alternatively, we can implement them by pairing a normal
1081   // load/store with a memory barrier. For a volatile read we need
1082   //
1083   //   ldr<x>
1084   //   dmb ishld
1085   //
1086   // for a volatile write
1087   //
1088   //   dmb ish
1089   //   str<x>
1090   //   dmb ish
1091   //
1092   // We can also use ldaxr and stlxr to implement compare and swap CAS
1093   // sequences. These are normally translated to an instruction
1094   // sequence like the following
1095   //
1096   //   dmb      ish
1097   // retry:
1098   //   ldxr<x>   rval raddr
1099   //   cmp       rval rold
1100   //   b.ne done
1101   //   stlxr<x>  rval, rnew, rold
1102   //   cbnz      rval retry
1103   // done:
1104   //   cset      r0, eq
1105   //   dmb ishld
1106   //
1107   // Note that the exclusive store is already using an stlxr
1108   // instruction. That is required to ensure visibility to other
1109   // threads of the exclusive write (assuming it succeeds) before that
1110   // of any subsequent writes.
1111   //
1112   // The following instruction sequence is an improvement on the above
1113   //
1114   // retry:
1115   //   ldaxr<x>  rval raddr
1116   //   cmp       rval rold
1117   //   b.ne done
1118   //   stlxr<x>  rval, rnew, rold
1119   //   cbnz      rval retry
1120   // done:
1121   //   cset      r0, eq
1122   //
1123   // We don't need the leading dmb ish since the stlxr guarantees
1124   // visibility of prior writes in the case that the swap is
1125   // successful. Crucially we don't have to worry about the case where
1126   // the swap is not successful since no valid program should be
1127   // relying on visibility of prior changes by the attempting thread
1128   // in the case where the CAS fails.
1129   //
1130   // Similarly, we don't need the trailing dmb ishld if we substitute
1131   // an ldaxr instruction since that will provide all the guarantees we
1132   // require regarding observation of changes made by other threads
1133   // before any change to the CAS address observed by the load.
1134   //
1135   // In order to generate the desired instruction sequence we need to
1136   // be able to identify specific 'signature' ideal graph node
1137   // sequences which i) occur as a translation of a volatile reads or
1138   // writes or CAS operations and ii) do not occur through any other
1139   // translation or graph transformation. We can then provide
1140   // alternative aldc matching rules which translate these node
1141   // sequences to the desired machine code sequences. Selection of the
1142   // alternative rules can be implemented by predicates which identify
1143   // the relevant node sequences.
1144   //
1145   // The ideal graph generator translates a volatile read to the node
1146   // sequence
1147   //
1148   //   LoadX[mo_acquire]
1149   //   MemBarAcquire
1150   //
1151   // As a special case when using the compressed oops optimization we
1152   // may also see this variant
1153   //
1154   //   LoadN[mo_acquire]
1155   //   DecodeN
1156   //   MemBarAcquire
1157   //
1158   // A volatile write is translated to the node sequence
1159   //
1160   //   MemBarRelease
1161   //   StoreX[mo_release] {CardMark}-optional
1162   //   MemBarVolatile
1163   //
1164   // n.b. the above node patterns are generated with a strict
1165   // 'signature' configuration of input and output dependencies (see
1166   // the predicates below for exact details). The card mark may be as
1167   // simple as a few extra nodes or, in a few GC configurations, may
1168   // include more complex control flow between the leading and
1169   // trailing memory barriers. However, whatever the card mark
1170   // configuration these signatures are unique to translated volatile
1171   // reads/stores -- they will not appear as a result of any other
1172   // bytecode translation or inlining nor as a consequence of
1173   // optimizing transforms.
1174   //
1175   // We also want to catch inlined unsafe volatile gets and puts and
1176   // be able to implement them using either ldar<x>/stlr<x> or some
1177   // combination of ldr<x>/stlr<x> and dmb instructions.
1178   //
1179   // Inlined unsafe volatiles puts manifest as a minor variant of the
1180   // normal volatile put node sequence containing an extra cpuorder
1181   // membar
1182   //
1183   //   MemBarRelease
1184   //   MemBarCPUOrder
1185   //   StoreX[mo_release] {CardMark}-optional
1186   //   MemBarCPUOrder
1187   //   MemBarVolatile
1188   //
1189   // n.b. as an aside, a cpuorder membar is not itself subject to
1190   // matching and translation by adlc rules.  However, the rule
1191   // predicates need to detect its presence in order to correctly
1192   // select the desired adlc rules.
1193   //
1194   // Inlined unsafe volatile gets manifest as a slightly different
1195   // node sequence to a normal volatile get because of the
1196   // introduction of some CPUOrder memory barriers to bracket the
1197   // Load. However, but the same basic skeleton of a LoadX feeding a
1198   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1199   // present
1200   //
1201   //   MemBarCPUOrder
1202   //        ||       \\
1203   //   MemBarCPUOrder LoadX[mo_acquire]
1204   //        ||            |
1205   //        ||       {DecodeN} optional
1206   //        ||       /
1207   //     MemBarAcquire
1208   //
1209   // In this case the acquire membar does not directly depend on the
1210   // load. However, we can be sure that the load is generated from an
1211   // inlined unsafe volatile get if we see it dependent on this unique
1212   // sequence of membar nodes. Similarly, given an acquire membar we
1213   // can know that it was added because of an inlined unsafe volatile
1214   // get if it is fed and feeds a cpuorder membar and if its feed
1215   // membar also feeds an acquiring load.
1216   //
1217   // Finally an inlined (Unsafe) CAS operation is translated to the
1218   // following ideal graph
1219   //
1220   //   MemBarRelease
1221   //   MemBarCPUOrder
1222   //   CompareAndSwapX {CardMark}-optional
1223   //   MemBarCPUOrder
1224   //   MemBarAcquire
1225   //
1226   // So, where we can identify these volatile read and write
1227   // signatures we can choose to plant either of the above two code
1228   // sequences. For a volatile read we can simply plant a normal
1229   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1230   // also choose to inhibit translation of the MemBarAcquire and
1231   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1232   //
1233   // When we recognise a volatile store signature we can choose to
1234   // plant at a dmb ish as a translation for the MemBarRelease, a
1235   // normal str<x> and then a dmb ish for the MemBarVolatile.
1236   // Alternatively, we can inhibit translation of the MemBarRelease
1237   // and MemBarVolatile and instead plant a simple stlr<x>
1238   // instruction.
1239   //
1240   // when we recognise a CAS signature we can choose to plant a dmb
1241   // ish as a translation for the MemBarRelease, the conventional
1242   // macro-instruction sequence for the CompareAndSwap node (which
1243   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1244   // Alternatively, we can elide generation of the dmb instructions
1245   // and plant the alternative CompareAndSwap macro-instruction
1246   // sequence (which uses ldaxr<x>).
1247   //
1248   // Of course, the above only applies when we see these signature
1249   // configurations. We still want to plant dmb instructions in any
1250   // other cases where we may see a MemBarAcquire, MemBarRelease or
1251   // MemBarVolatile. For example, at the end of a constructor which
1252   // writes final/volatile fields we will see a MemBarRelease
1253   // instruction and this needs a 'dmb ish' lest we risk the
1254   // constructed object being visible without making the
1255   // final/volatile field writes visible.
1256   //
1257   // n.b. the translation rules below which rely on detection of the
1258   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1259   // If we see anything other than the signature configurations we
1260   // always just translate the loads and stores to ldr<x> and str<x>
1261   // and translate acquire, release and volatile membars to the
1262   // relevant dmb instructions.
1263   //
1264 
1265   // is_CAS(int opcode, bool maybe_volatile)
1266   //
1267   // return true if opcode is one of the possible CompareAndSwapX
1268   // values otherwise false.
1269 
1270   bool is_CAS(int opcode, bool maybe_volatile)
1271   {
1272     switch(opcode) {
1273       // We handle these
1274     case Op_CompareAndSwapI:
1275     case Op_CompareAndSwapL:
1276     case Op_CompareAndSwapP:
1277     case Op_CompareAndSwapN:
1278     case Op_ShenandoahCompareAndSwapP:
1279     case Op_ShenandoahCompareAndSwapN:
1280     case Op_CompareAndSwapB:
1281     case Op_CompareAndSwapS:
1282     case Op_GetAndSetI:
1283     case Op_GetAndSetL:
1284     case Op_GetAndSetP:
1285     case Op_GetAndSetN:
1286     case Op_GetAndAddI:
1287     case Op_GetAndAddL:
1288       return true;
1289     case Op_CompareAndExchangeI:
1290     case Op_CompareAndExchangeN:
1291     case Op_CompareAndExchangeB:
1292     case Op_CompareAndExchangeS:
1293     case Op_CompareAndExchangeL:
1294     case Op_CompareAndExchangeP:
1295     case Op_WeakCompareAndSwapB:
1296     case Op_WeakCompareAndSwapS:
1297     case Op_WeakCompareAndSwapI:
1298     case Op_WeakCompareAndSwapL:
1299     case Op_WeakCompareAndSwapP:
1300     case Op_WeakCompareAndSwapN:
1301     case Op_ShenandoahWeakCompareAndSwapP:
1302     case Op_ShenandoahWeakCompareAndSwapN:
1303     case Op_ShenandoahCompareAndExchangeP:
1304     case Op_ShenandoahCompareAndExchangeN:
1305       return maybe_volatile;
1306     default:
1307       return false;
1308     }
1309   }
1310 
1311   // helper to determine the maximum number of Phi nodes we may need to
1312   // traverse when searching from a card mark membar for the merge mem
1313   // feeding a trailing membar or vice versa
1314 
1315 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1316 
1317 bool unnecessary_acquire(const Node *barrier)
1318 {
1319   assert(barrier->is_MemBar(), "expecting a membar");
1320 
1321   if (UseBarriersForVolatile) {
1322     // we need to plant a dmb
1323     return false;
1324   }
1325 
1326   MemBarNode* mb = barrier->as_MemBar();
1327 
1328   if (mb->trailing_load()) {
1329     return true;
1330   }
1331 
1332   if (mb->trailing_load_store()) {
1333     Node* load_store = mb->in(MemBarNode::Precedent);
1334     assert(load_store->is_LoadStore(), "unexpected graph shape");
1335     return is_CAS(load_store->Opcode(), true);
1336   }
1337 
1338   return false;
1339 }
1340 
1341 bool needs_acquiring_load(const Node *n)
1342 {
1343   assert(n->is_Load(), "expecting a load");
1344   if (UseBarriersForVolatile) {
1345     // we use a normal load and a dmb
1346     return false;
1347   }
1348 
1349   LoadNode *ld = n->as_Load();
1350 
1351   return ld->is_acquire();
1352 }
1353 
1354 bool unnecessary_release(const Node *n)
1355 {
1356   assert((n->is_MemBar() &&
1357           n->Opcode() == Op_MemBarRelease),
1358          "expecting a release membar");
1359 
1360   if (UseBarriersForVolatile) {
1361     // we need to plant a dmb
1362     return false;
1363   }
1364 
1365   MemBarNode *barrier = n->as_MemBar();
1366   if (!barrier->leading()) {
1367     return false;
1368   } else {
1369     Node* trailing = barrier->trailing_membar();
1370     MemBarNode* trailing_mb = trailing->as_MemBar();
1371     assert(trailing_mb->trailing(), "Not a trailing membar?");
1372     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1373 
1374     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1375     if (mem->is_Store()) {
1376       assert(mem->as_Store()->is_release(), "");
1377       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1378       return true;
1379     } else {
1380       assert(mem->is_LoadStore(), "");
1381       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1382       return is_CAS(mem->Opcode(), true);
1383     }
1384   }
1385   return false;
1386 }
1387 
1388 bool unnecessary_volatile(const Node *n)
1389 {
1390   // assert n->is_MemBar();
1391   if (UseBarriersForVolatile) {
1392     // we need to plant a dmb
1393     return false;
1394   }
1395 
1396   MemBarNode *mbvol = n->as_MemBar();
1397 
1398   bool release = mbvol->trailing_store();
1399   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1400 #ifdef ASSERT
1401   if (release) {
1402     Node* leading = mbvol->leading_membar();
1403     assert(leading->Opcode() == Op_MemBarRelease, "");
1404     assert(leading->as_MemBar()->leading_store(), "");
1405     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1406   }
1407 #endif
1408 
1409   return release;
1410 }
1411 
1412 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1413 
1414 bool needs_releasing_store(const Node *n)
1415 {
1416   // assert n->is_Store();
1417   if (UseBarriersForVolatile) {
1418     // we use a normal store and dmb combination
1419     return false;
1420   }
1421 
1422   StoreNode *st = n->as_Store();
1423 
1424   return st->trailing_membar() != NULL;
1425 }
1426 
1427 // predicate controlling translation of CAS
1428 //
1429 // returns true if CAS needs to use an acquiring load otherwise false
1430 
1431 bool needs_acquiring_load_exclusive(const Node *n)
1432 {
1433   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1434   if (UseBarriersForVolatile) {
1435     return false;
1436   }
1437 
1438   LoadStoreNode* ldst = n->as_LoadStore();
1439   if (is_CAS(n->Opcode(), false)) {
1440     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1441   } else {
1442     return ldst->trailing_membar() != NULL;
1443   }
1444 
1445   // so we can just return true here
1446   return true;
1447 }
1448 
1449 // predicate controlling translation of StoreCM
1450 //
1451 // returns true if a StoreStore must precede the card write otherwise
1452 // false
1453 
1454 bool unnecessary_storestore(const Node *storecm)
1455 {
1456   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1457 
1458   // we need to generate a dmb ishst between an object put and the
1459   // associated card mark when we are using CMS without conditional
1460   // card marking
1461 
1462   if (UseConcMarkSweepGC && !UseCondCardMark) {
1463     return false;
1464   }
1465 
1466   // a storestore is unnecesary in all other cases
1467 
1468   return true;
1469 }
1470 
1471 
1472 #define __ _masm.
1473 
1474 // advance declarations for helper functions to convert register
1475 // indices to register objects
1476 
1477 // the ad file has to provide implementations of certain methods
1478 // expected by the generic code
1479 //
1480 // REQUIRED FUNCTIONALITY
1481 
1482 //=============================================================================
1483 
1484 // !!!!! Special hack to get all types of calls to specify the byte offset
1485 //       from the start of the call to the point where the return address
1486 //       will point.
1487 
1488 int MachCallStaticJavaNode::ret_addr_offset()
1489 {
1490   // call should be a simple bl
1491   int off = 4;
1492   return off;
1493 }
1494 
1495 int MachCallDynamicJavaNode::ret_addr_offset()
1496 {
1497   return 16; // movz, movk, movk, bl
1498 }
1499 
1500 int MachCallRuntimeNode::ret_addr_offset() {
1501   // for generated stubs the call will be
1502   //   far_call(addr)
1503   // for real runtime callouts it will be six instructions
1504   // see aarch64_enc_java_to_runtime
1505   //   adr(rscratch2, retaddr)
1506   //   lea(rscratch1, RuntimeAddress(addr)
1507   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1508   //   blrt rscratch1
1509   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1510   if (cb) {
1511     return MacroAssembler::far_branch_size();
1512   } else {
1513     return 6 * NativeInstruction::instruction_size;
1514   }
1515 }
1516 
1517 // Indicate if the safepoint node needs the polling page as an input
1518 
1519 // the shared code plants the oop data at the start of the generated
1520 // code for the safepoint node and that needs ot be at the load
1521 // instruction itself. so we cannot plant a mov of the safepoint poll
1522 // address followed by a load. setting this to true means the mov is
1523 // scheduled as a prior instruction. that's better for scheduling
1524 // anyway.
1525 
1526 bool SafePointNode::needs_polling_address_input()
1527 {
1528   return true;
1529 }
1530 
1531 //=============================================================================
1532 
1533 #ifndef PRODUCT
1534 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1535   st->print("BREAKPOINT");
1536 }
1537 #endif
1538 
1539 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1540   MacroAssembler _masm(&cbuf);
1541   __ brk(0);
1542 }
1543 
1544 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1545   return MachNode::size(ra_);
1546 }
1547 
1548 //=============================================================================
1549 
1550 #ifndef PRODUCT
1551   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1552     st->print("nop \t# %d bytes pad for loops and calls", _count);
1553   }
1554 #endif
1555 
1556   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1557     MacroAssembler _masm(&cbuf);
1558     for (int i = 0; i < _count; i++) {
1559       __ nop();
1560     }
1561   }
1562 
1563   uint MachNopNode::size(PhaseRegAlloc*) const {
1564     return _count * NativeInstruction::instruction_size;
1565   }
1566 
1567 //=============================================================================
1568 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1569 
1570 int Compile::ConstantTable::calculate_table_base_offset() const {
1571   return 0;  // absolute addressing, no offset
1572 }
1573 
1574 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1575 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1576   ShouldNotReachHere();
1577 }
1578 
1579 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1580   // Empty encoding
1581 }
1582 
1583 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1584   return 0;
1585 }
1586 
1587 #ifndef PRODUCT
1588 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1589   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1590 }
1591 #endif
1592 
1593 #ifndef PRODUCT
1594 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1595   Compile* C = ra_->C;
1596 
1597   int framesize = C->frame_slots() << LogBytesPerInt;
1598 
1599   if (C->need_stack_bang(framesize))
1600     st->print("# stack bang size=%d\n\t", framesize);
1601 
1602   if (framesize < ((1 << 9) + 2 * wordSize)) {
1603     st->print("sub  sp, sp, #%d\n\t", framesize);
1604     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1605     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1606   } else {
1607     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1608     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1609     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1610     st->print("sub  sp, sp, rscratch1");
1611   }
1612 }
1613 #endif
1614 
1615 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1616   Compile* C = ra_->C;
1617   MacroAssembler _masm(&cbuf);
1618 
1619   // n.b. frame size includes space for return pc and rfp
1620   const long framesize = C->frame_size_in_bytes();
1621   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1622 
1623   // insert a nop at the start of the prolog so we can patch in a
1624   // branch if we need to invalidate the method later
1625   __ nop();
1626 
1627   int bangsize = C->bang_size_in_bytes();
1628   if (C->need_stack_bang(bangsize) && UseStackBanging)
1629     __ generate_stack_overflow_check(bangsize);
1630 
1631   __ build_frame(framesize);
1632 
1633   if (NotifySimulator) {
1634     __ notify(Assembler::method_entry);
1635   }
1636 
1637   if (VerifyStackAtCalls) {
1638     Unimplemented();
1639   }
1640 
1641   C->set_frame_complete(cbuf.insts_size());
1642 
1643   if (C->has_mach_constant_base_node()) {
1644     // NOTE: We set the table base offset here because users might be
1645     // emitted before MachConstantBaseNode.
1646     Compile::ConstantTable& constant_table = C->constant_table();
1647     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1648   }
1649 }
1650 
1651 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1652 {
1653   return MachNode::size(ra_); // too many variables; just compute it
1654                               // the hard way
1655 }
1656 
1657 int MachPrologNode::reloc() const
1658 {
1659   return 0;
1660 }
1661 
1662 //=============================================================================
1663 
1664 #ifndef PRODUCT
1665 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1666   Compile* C = ra_->C;
1667   int framesize = C->frame_slots() << LogBytesPerInt;
1668 
1669   st->print("# pop frame %d\n\t",framesize);
1670 
1671   if (framesize == 0) {
1672     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1673   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1674     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1675     st->print("add  sp, sp, #%d\n\t", framesize);
1676   } else {
1677     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1678     st->print("add  sp, sp, rscratch1\n\t");
1679     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1680   }
1681 
1682   if (do_polling() && C->is_method_compilation()) {
1683     st->print("# touch polling page\n\t");
1684     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1685     st->print("ldr zr, [rscratch1]");
1686   }
1687 }
1688 #endif
1689 
1690 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1691   Compile* C = ra_->C;
1692   MacroAssembler _masm(&cbuf);
1693   int framesize = C->frame_slots() << LogBytesPerInt;
1694 
1695   __ remove_frame(framesize);
1696 
1697   if (NotifySimulator) {
1698     __ notify(Assembler::method_reentry);
1699   }
1700 
1701   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1702     __ reserved_stack_check();
1703   }
1704 
1705   if (do_polling() && C->is_method_compilation()) {
1706     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1707   }
1708 }
1709 
1710 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1711   // Variable size. Determine dynamically.
1712   return MachNode::size(ra_);
1713 }
1714 
1715 int MachEpilogNode::reloc() const {
1716   // Return number of relocatable values contained in this instruction.
1717   return 1; // 1 for polling page.
1718 }
1719 
1720 const Pipeline * MachEpilogNode::pipeline() const {
1721   return MachNode::pipeline_class();
1722 }
1723 
1724 // This method seems to be obsolete. It is declared in machnode.hpp
1725 // and defined in all *.ad files, but it is never called. Should we
1726 // get rid of it?
1727 int MachEpilogNode::safepoint_offset() const {
1728   assert(do_polling(), "no return for this epilog node");
1729   return 4;
1730 }
1731 
1732 //=============================================================================
1733 
1734 // Figure out which register class each belongs in: rc_int, rc_float or
1735 // rc_stack.
1736 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1737 
1738 static enum RC rc_class(OptoReg::Name reg) {
1739 
1740   if (reg == OptoReg::Bad) {
1741     return rc_bad;
1742   }
1743 
1744   // we have 30 int registers * 2 halves
1745   // (rscratch1 and rscratch2 are omitted)
1746 
1747   if (reg < 60) {
1748     return rc_int;
1749   }
1750 
1751   // we have 32 float register * 2 halves
1752   if (reg < 60 + 128) {
1753     return rc_float;
1754   }
1755 
1756   // Between float regs & stack is the flags regs.
1757   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1758 
1759   return rc_stack;
1760 }
1761 
1762 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1763   Compile* C = ra_->C;
1764 
1765   // Get registers to move.
1766   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1767   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1768   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1769   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1770 
1771   enum RC src_hi_rc = rc_class(src_hi);
1772   enum RC src_lo_rc = rc_class(src_lo);
1773   enum RC dst_hi_rc = rc_class(dst_hi);
1774   enum RC dst_lo_rc = rc_class(dst_lo);
1775 
1776   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1777 
1778   if (src_hi != OptoReg::Bad) {
1779     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1780            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1781            "expected aligned-adjacent pairs");
1782   }
1783 
1784   if (src_lo == dst_lo && src_hi == dst_hi) {
1785     return 0;            // Self copy, no move.
1786   }
1787 
1788   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1789               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1790   int src_offset = ra_->reg2offset(src_lo);
1791   int dst_offset = ra_->reg2offset(dst_lo);
1792 
1793   if (bottom_type()->isa_vect() != NULL) {
1794     uint ireg = ideal_reg();
1795     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1796     if (cbuf) {
1797       MacroAssembler _masm(cbuf);
1798       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1799       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1800         // stack->stack
1801         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1802         if (ireg == Op_VecD) {
1803           __ unspill(rscratch1, true, src_offset);
1804           __ spill(rscratch1, true, dst_offset);
1805         } else {
1806           __ spill_copy128(src_offset, dst_offset);
1807         }
1808       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1809         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1810                ireg == Op_VecD ? __ T8B : __ T16B,
1811                as_FloatRegister(Matcher::_regEncode[src_lo]));
1812       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1813         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1814                        ireg == Op_VecD ? __ D : __ Q,
1815                        ra_->reg2offset(dst_lo));
1816       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1817         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1818                        ireg == Op_VecD ? __ D : __ Q,
1819                        ra_->reg2offset(src_lo));
1820       } else {
1821         ShouldNotReachHere();
1822       }
1823     }
1824   } else if (cbuf) {
1825     MacroAssembler _masm(cbuf);
1826     switch (src_lo_rc) {
1827     case rc_int:
1828       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1829         if (is64) {
1830             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1831                    as_Register(Matcher::_regEncode[src_lo]));
1832         } else {
1833             MacroAssembler _masm(cbuf);
1834             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1835                     as_Register(Matcher::_regEncode[src_lo]));
1836         }
1837       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1838         if (is64) {
1839             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1840                      as_Register(Matcher::_regEncode[src_lo]));
1841         } else {
1842             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1843                      as_Register(Matcher::_regEncode[src_lo]));
1844         }
1845       } else {                    // gpr --> stack spill
1846         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1847         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1848       }
1849       break;
1850     case rc_float:
1851       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1852         if (is64) {
1853             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1854                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1855         } else {
1856             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1857                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1858         }
1859       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1860           if (cbuf) {
1861             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1862                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1863         } else {
1864             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1865                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1866         }
1867       } else {                    // fpr --> stack spill
1868         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1869         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1870                  is64 ? __ D : __ S, dst_offset);
1871       }
1872       break;
1873     case rc_stack:
1874       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1875         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1876       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1877         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1878                    is64 ? __ D : __ S, src_offset);
1879       } else {                    // stack --> stack copy
1880         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1881         __ unspill(rscratch1, is64, src_offset);
1882         __ spill(rscratch1, is64, dst_offset);
1883       }
1884       break;
1885     default:
1886       assert(false, "bad rc_class for spill");
1887       ShouldNotReachHere();
1888     }
1889   }
1890 
1891   if (st) {
1892     st->print("spill ");
1893     if (src_lo_rc == rc_stack) {
1894       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1895     } else {
1896       st->print("%s -> ", Matcher::regName[src_lo]);
1897     }
1898     if (dst_lo_rc == rc_stack) {
1899       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1900     } else {
1901       st->print("%s", Matcher::regName[dst_lo]);
1902     }
1903     if (bottom_type()->isa_vect() != NULL) {
1904       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1905     } else {
1906       st->print("\t# spill size = %d", is64 ? 64:32);
1907     }
1908   }
1909 
1910   return 0;
1911 
1912 }
1913 
1914 #ifndef PRODUCT
1915 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1916   if (!ra_)
1917     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1918   else
1919     implementation(NULL, ra_, false, st);
1920 }
1921 #endif
1922 
1923 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1924   implementation(&cbuf, ra_, false, NULL);
1925 }
1926 
1927 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1928   return MachNode::size(ra_);
1929 }
1930 
1931 //=============================================================================
1932 
1933 #ifndef PRODUCT
1934 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1935   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1936   int reg = ra_->get_reg_first(this);
1937   st->print("add %s, rsp, #%d]\t# box lock",
1938             Matcher::regName[reg], offset);
1939 }
1940 #endif
1941 
1942 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1943   MacroAssembler _masm(&cbuf);
1944 
1945   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1946   int reg    = ra_->get_encode(this);
1947 
1948   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1949     __ add(as_Register(reg), sp, offset);
1950   } else {
1951     ShouldNotReachHere();
1952   }
1953 }
1954 
1955 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1956   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1957   return 4;
1958 }
1959 
1960 //=============================================================================
1961 
1962 #ifndef PRODUCT
1963 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1964 {
1965   st->print_cr("# MachUEPNode");
1966   if (UseCompressedClassPointers) {
1967     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1968     if (Universe::narrow_klass_shift() != 0) {
1969       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1970     }
1971   } else {
1972    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1973   }
1974   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1975   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1976 }
1977 #endif
1978 
1979 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1980 {
1981   // This is the unverified entry point.
1982   MacroAssembler _masm(&cbuf);
1983 
1984   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1985   Label skip;
1986   // TODO
1987   // can we avoid this skip and still use a reloc?
1988   __ br(Assembler::EQ, skip);
1989   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1990   __ bind(skip);
1991 }
1992 
1993 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1994 {
1995   return MachNode::size(ra_);
1996 }
1997 
1998 // REQUIRED EMIT CODE
1999 
2000 //=============================================================================
2001 
2002 // Emit exception handler code.
2003 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2004 {
2005   // mov rscratch1 #exception_blob_entry_point
2006   // br rscratch1
2007   // Note that the code buffer's insts_mark is always relative to insts.
2008   // That's why we must use the macroassembler to generate a handler.
2009   MacroAssembler _masm(&cbuf);
2010   address base = __ start_a_stub(size_exception_handler());
2011   if (base == NULL) {
2012     ciEnv::current()->record_failure("CodeCache is full");
2013     return 0;  // CodeBuffer::expand failed
2014   }
2015   int offset = __ offset();
2016   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2017   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2018   __ end_a_stub();
2019   return offset;
2020 }
2021 
2022 // Emit deopt handler code.
2023 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2024 {
2025   // Note that the code buffer's insts_mark is always relative to insts.
2026   // That's why we must use the macroassembler to generate a handler.
2027   MacroAssembler _masm(&cbuf);
2028   address base = __ start_a_stub(size_deopt_handler());
2029   if (base == NULL) {
2030     ciEnv::current()->record_failure("CodeCache is full");
2031     return 0;  // CodeBuffer::expand failed
2032   }
2033   int offset = __ offset();
2034 
2035   __ adr(lr, __ pc());
2036   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2037 
2038   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2039   __ end_a_stub();
2040   return offset;
2041 }
2042 
2043 // REQUIRED MATCHER CODE
2044 
2045 //=============================================================================
2046 
2047 const bool Matcher::match_rule_supported(int opcode) {
2048 
2049   switch (opcode) {
2050   default:
2051     break;
2052   }
2053 
2054   if (!has_match_rule(opcode)) {
2055     return false;
2056   }
2057 
2058   return true;  // Per default match rules are supported.
2059 }
2060 
2061 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
2062 
2063   // TODO
2064   // identify extra cases that we might want to provide match rules for
2065   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2066   bool ret_value = match_rule_supported(opcode);
2067   // Add rules here.
2068 
2069   return ret_value;  // Per default match rules are supported.
2070 }
2071 
2072 const bool Matcher::has_predicated_vectors(void) {
2073   return false;
2074 }
2075 
2076 const int Matcher::float_pressure(int default_pressure_threshold) {
2077   return default_pressure_threshold;
2078 }
2079 
2080 int Matcher::regnum_to_fpu_offset(int regnum)
2081 {
2082   Unimplemented();
2083   return 0;
2084 }
2085 
2086 // Is this branch offset short enough that a short branch can be used?
2087 //
2088 // NOTE: If the platform does not provide any short branch variants, then
2089 //       this method should return false for offset 0.
2090 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2091   // The passed offset is relative to address of the branch.
2092 
2093   return (-32768 <= offset && offset < 32768);
2094 }
2095 
2096 const bool Matcher::isSimpleConstant64(jlong value) {
2097   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2098   // Probably always true, even if a temp register is required.
2099   return true;
2100 }
2101 
2102 // true just means we have fast l2f conversion
2103 const bool Matcher::convL2FSupported(void) {
2104   return true;
2105 }
2106 
2107 // Vector width in bytes.
2108 const int Matcher::vector_width_in_bytes(BasicType bt) {
2109   int size = MIN2(16,(int)MaxVectorSize);
2110   // Minimum 2 values in vector
2111   if (size < 2*type2aelembytes(bt)) size = 0;
2112   // But never < 4
2113   if (size < 4) size = 0;
2114   return size;
2115 }
2116 
2117 // Limits on vector size (number of elements) loaded into vector.
2118 const int Matcher::max_vector_size(const BasicType bt) {
2119   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2120 }
2121 const int Matcher::min_vector_size(const BasicType bt) {
2122 //  For the moment limit the vector size to 8 bytes
2123     int size = 8 / type2aelembytes(bt);
2124     if (size < 2) size = 2;
2125     return size;
2126 }
2127 
2128 // Vector ideal reg.
2129 const uint Matcher::vector_ideal_reg(int len) {
2130   switch(len) {
2131     case  8: return Op_VecD;
2132     case 16: return Op_VecX;
2133   }
2134   ShouldNotReachHere();
2135   return 0;
2136 }
2137 
2138 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2139   return Op_VecX;
2140 }
2141 
2142 // AES support not yet implemented
2143 const bool Matcher::pass_original_key_for_aes() {
2144   return false;
2145 }
2146 
2147 // x86 supports misaligned vectors store/load.
2148 const bool Matcher::misaligned_vectors_ok() {
2149   return !AlignVector; // can be changed by flag
2150 }
2151 
2152 // false => size gets scaled to BytesPerLong, ok.
2153 const bool Matcher::init_array_count_is_in_bytes = false;
2154 
2155 // Use conditional move (CMOVL)
2156 const int Matcher::long_cmove_cost() {
2157   // long cmoves are no more expensive than int cmoves
2158   return 0;
2159 }
2160 
2161 const int Matcher::float_cmove_cost() {
2162   // float cmoves are no more expensive than int cmoves
2163   return 0;
2164 }
2165 
2166 // Does the CPU require late expand (see block.cpp for description of late expand)?
2167 const bool Matcher::require_postalloc_expand = false;
2168 
2169 // Do we need to mask the count passed to shift instructions or does
2170 // the cpu only look at the lower 5/6 bits anyway?
2171 const bool Matcher::need_masked_shift_count = false;
2172 
2173 // This affects two different things:
2174 //  - how Decode nodes are matched
2175 //  - how ImplicitNullCheck opportunities are recognized
2176 // If true, the matcher will try to remove all Decodes and match them
2177 // (as operands) into nodes. NullChecks are not prepared to deal with
2178 // Decodes by final_graph_reshaping().
2179 // If false, final_graph_reshaping() forces the decode behind the Cmp
2180 // for a NullCheck. The matcher matches the Decode node into a register.
2181 // Implicit_null_check optimization moves the Decode along with the
2182 // memory operation back up before the NullCheck.
2183 bool Matcher::narrow_oop_use_complex_address() {
2184   return Universe::narrow_oop_shift() == 0;
2185 }
2186 
2187 bool Matcher::narrow_klass_use_complex_address() {
2188 // TODO
2189 // decide whether we need to set this to true
2190   return false;
2191 }
2192 
2193 bool Matcher::const_oop_prefer_decode() {
2194   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2195   return Universe::narrow_oop_base() == NULL;
2196 }
2197 
2198 bool Matcher::const_klass_prefer_decode() {
2199   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2200   return Universe::narrow_klass_base() == NULL;
2201 }
2202 
2203 // Is it better to copy float constants, or load them directly from
2204 // memory?  Intel can load a float constant from a direct address,
2205 // requiring no extra registers.  Most RISCs will have to materialize
2206 // an address into a register first, so they would do better to copy
2207 // the constant from stack.
2208 const bool Matcher::rematerialize_float_constants = false;
2209 
2210 // If CPU can load and store mis-aligned doubles directly then no
2211 // fixup is needed.  Else we split the double into 2 integer pieces
2212 // and move it piece-by-piece.  Only happens when passing doubles into
2213 // C code as the Java calling convention forces doubles to be aligned.
2214 const bool Matcher::misaligned_doubles_ok = true;
2215 
2216 // No-op on amd64
2217 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2218   Unimplemented();
2219 }
2220 
2221 // Advertise here if the CPU requires explicit rounding operations to
2222 // implement the UseStrictFP mode.
2223 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2224 
2225 // Are floats converted to double when stored to stack during
2226 // deoptimization?
2227 bool Matcher::float_in_double() { return false; }
2228 
2229 // Do ints take an entire long register or just half?
2230 // The relevant question is how the int is callee-saved:
2231 // the whole long is written but de-opt'ing will have to extract
2232 // the relevant 32 bits.
2233 const bool Matcher::int_in_long = true;
2234 
2235 // Return whether or not this register is ever used as an argument.
2236 // This function is used on startup to build the trampoline stubs in
2237 // generateOptoStub.  Registers not mentioned will be killed by the VM
2238 // call in the trampoline, and arguments in those registers not be
2239 // available to the callee.
2240 bool Matcher::can_be_java_arg(int reg)
2241 {
2242   return
2243     reg ==  R0_num || reg == R0_H_num ||
2244     reg ==  R1_num || reg == R1_H_num ||
2245     reg ==  R2_num || reg == R2_H_num ||
2246     reg ==  R3_num || reg == R3_H_num ||
2247     reg ==  R4_num || reg == R4_H_num ||
2248     reg ==  R5_num || reg == R5_H_num ||
2249     reg ==  R6_num || reg == R6_H_num ||
2250     reg ==  R7_num || reg == R7_H_num ||
2251     reg ==  V0_num || reg == V0_H_num ||
2252     reg ==  V1_num || reg == V1_H_num ||
2253     reg ==  V2_num || reg == V2_H_num ||
2254     reg ==  V3_num || reg == V3_H_num ||
2255     reg ==  V4_num || reg == V4_H_num ||
2256     reg ==  V5_num || reg == V5_H_num ||
2257     reg ==  V6_num || reg == V6_H_num ||
2258     reg ==  V7_num || reg == V7_H_num;
2259 }
2260 
2261 bool Matcher::is_spillable_arg(int reg)
2262 {
2263   return can_be_java_arg(reg);
2264 }
2265 
2266 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2267   return false;
2268 }
2269 
2270 RegMask Matcher::divI_proj_mask() {
2271   ShouldNotReachHere();
2272   return RegMask();
2273 }
2274 
2275 // Register for MODI projection of divmodI.
2276 RegMask Matcher::modI_proj_mask() {
2277   ShouldNotReachHere();
2278   return RegMask();
2279 }
2280 
2281 // Register for DIVL projection of divmodL.
2282 RegMask Matcher::divL_proj_mask() {
2283   ShouldNotReachHere();
2284   return RegMask();
2285 }
2286 
2287 // Register for MODL projection of divmodL.
2288 RegMask Matcher::modL_proj_mask() {
2289   ShouldNotReachHere();
2290   return RegMask();
2291 }
2292 
2293 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2294   return FP_REG_mask();
2295 }
2296 
2297 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2298   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2299     Node* u = addp->fast_out(i);
2300     if (u->is_Mem()) {
2301       int opsize = u->as_Mem()->memory_size();
2302       assert(opsize > 0, "unexpected memory operand size");
2303       if (u->as_Mem()->memory_size() != (1<<shift)) {
2304         return false;
2305       }
2306     }
2307   }
2308   return true;
2309 }
2310 
2311 const bool Matcher::convi2l_type_required = false;
2312 
2313 // Should the Matcher clone shifts on addressing modes, expecting them
2314 // to be subsumed into complex addressing expressions or compute them
2315 // into registers?
2316 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2317   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2318     return true;
2319   }
2320 
2321   Node *off = m->in(AddPNode::Offset);
2322   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2323       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2324       // Are there other uses besides address expressions?
2325       !is_visited(off)) {
2326     address_visited.set(off->_idx); // Flag as address_visited
2327     mstack.push(off->in(2), Visit);
2328     Node *conv = off->in(1);
2329     if (conv->Opcode() == Op_ConvI2L &&
2330         // Are there other uses besides address expressions?
2331         !is_visited(conv)) {
2332       address_visited.set(conv->_idx); // Flag as address_visited
2333       mstack.push(conv->in(1), Pre_Visit);
2334     } else {
2335       mstack.push(conv, Pre_Visit);
2336     }
2337     address_visited.test_set(m->_idx); // Flag as address_visited
2338     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2339     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2340     return true;
2341   } else if (off->Opcode() == Op_ConvI2L &&
2342              // Are there other uses besides address expressions?
2343              !is_visited(off)) {
2344     address_visited.test_set(m->_idx); // Flag as address_visited
2345     address_visited.set(off->_idx); // Flag as address_visited
2346     mstack.push(off->in(1), Pre_Visit);
2347     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2348     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2349     return true;
2350   }
2351   return false;
2352 }
2353 
2354 void Compile::reshape_address(AddPNode* addp) {
2355 }
2356 
2357 // helper for encoding java_to_runtime calls on sim
2358 //
2359 // this is needed to compute the extra arguments required when
2360 // planting a call to the simulator blrt instruction. the TypeFunc
2361 // can be queried to identify the counts for integral, and floating
2362 // arguments and the return type
2363 
2364 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2365 {
2366   int gps = 0;
2367   int fps = 0;
2368   const TypeTuple *domain = tf->domain();
2369   int max = domain->cnt();
2370   for (int i = TypeFunc::Parms; i < max; i++) {
2371     const Type *t = domain->field_at(i);
2372     switch(t->basic_type()) {
2373     case T_FLOAT:
2374     case T_DOUBLE:
2375       fps++;
2376     default:
2377       gps++;
2378     }
2379   }
2380   gpcnt = gps;
2381   fpcnt = fps;
2382   BasicType rt = tf->return_type();
2383   switch (rt) {
2384   case T_VOID:
2385     rtype = MacroAssembler::ret_type_void;
2386     break;
2387   default:
2388     rtype = MacroAssembler::ret_type_integral;
2389     break;
2390   case T_FLOAT:
2391     rtype = MacroAssembler::ret_type_float;
2392     break;
2393   case T_DOUBLE:
2394     rtype = MacroAssembler::ret_type_double;
2395     break;
2396   }
2397 }
2398 
2399 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2400   MacroAssembler _masm(&cbuf);                                          \
2401   {                                                                     \
2402     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2403     guarantee(DISP == 0, "mode not permitted for volatile");            \
2404     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2405     __ INSN(REG, as_Register(BASE));                                    \
2406   }
2407 
2408 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2409 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2410 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2411                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2412 
2413   // Used for all non-volatile memory accesses.  The use of
2414   // $mem->opcode() to discover whether this pattern uses sign-extended
2415   // offsets is something of a kludge.
2416   static void loadStore(MacroAssembler masm, mem_insn insn,
2417                          Register reg, int opcode,
2418                          Register base, int index, int size, int disp)
2419   {
2420     Address::extend scale;
2421 
2422     // Hooboy, this is fugly.  We need a way to communicate to the
2423     // encoder that the index needs to be sign extended, so we have to
2424     // enumerate all the cases.
2425     switch (opcode) {
2426     case INDINDEXSCALEDI2L:
2427     case INDINDEXSCALEDI2LN:
2428     case INDINDEXI2L:
2429     case INDINDEXI2LN:
2430       scale = Address::sxtw(size);
2431       break;
2432     default:
2433       scale = Address::lsl(size);
2434     }
2435 
2436     if (index == -1) {
2437       (masm.*insn)(reg, Address(base, disp));
2438     } else {
2439       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2440       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2441     }
2442   }
2443 
2444   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2445                          FloatRegister reg, int opcode,
2446                          Register base, int index, int size, int disp)
2447   {
2448     Address::extend scale;
2449 
2450     switch (opcode) {
2451     case INDINDEXSCALEDI2L:
2452     case INDINDEXSCALEDI2LN:
2453       scale = Address::sxtw(size);
2454       break;
2455     default:
2456       scale = Address::lsl(size);
2457     }
2458 
2459      if (index == -1) {
2460       (masm.*insn)(reg, Address(base, disp));
2461     } else {
2462       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2463       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2464     }
2465   }
2466 
2467   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2468                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2469                          int opcode, Register base, int index, int size, int disp)
2470   {
2471     if (index == -1) {
2472       (masm.*insn)(reg, T, Address(base, disp));
2473     } else {
2474       assert(disp == 0, "unsupported address mode");
2475       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2476     }
2477   }
2478 
2479 %}
2480 
2481 
2482 
2483 //----------ENCODING BLOCK-----------------------------------------------------
2484 // This block specifies the encoding classes used by the compiler to
2485 // output byte streams.  Encoding classes are parameterized macros
2486 // used by Machine Instruction Nodes in order to generate the bit
2487 // encoding of the instruction.  Operands specify their base encoding
2488 // interface with the interface keyword.  There are currently
2489 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2490 // COND_INTER.  REG_INTER causes an operand to generate a function
2491 // which returns its register number when queried.  CONST_INTER causes
2492 // an operand to generate a function which returns the value of the
2493 // constant when queried.  MEMORY_INTER causes an operand to generate
2494 // four functions which return the Base Register, the Index Register,
2495 // the Scale Value, and the Offset Value of the operand when queried.
2496 // COND_INTER causes an operand to generate six functions which return
2497 // the encoding code (ie - encoding bits for the instruction)
2498 // associated with each basic boolean condition for a conditional
2499 // instruction.
2500 //
2501 // Instructions specify two basic values for encoding.  Again, a
2502 // function is available to check if the constant displacement is an
2503 // oop. They use the ins_encode keyword to specify their encoding
2504 // classes (which must be a sequence of enc_class names, and their
2505 // parameters, specified in the encoding block), and they use the
2506 // opcode keyword to specify, in order, their primary, secondary, and
2507 // tertiary opcode.  Only the opcode sections which a particular
2508 // instruction needs for encoding need to be specified.
2509 encode %{
2510   // Build emit functions for each basic byte or larger field in the
2511   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2512   // from C++ code in the enc_class source block.  Emit functions will
2513   // live in the main source block for now.  In future, we can
2514   // generalize this by adding a syntax that specifies the sizes of
2515   // fields in an order, so that the adlc can build the emit functions
2516   // automagically
2517 
2518   // catch all for unimplemented encodings
2519   enc_class enc_unimplemented %{
2520     MacroAssembler _masm(&cbuf);
2521     __ unimplemented("C2 catch all");
2522   %}
2523 
2524   // BEGIN Non-volatile memory access
2525 
2526   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2527     Register dst_reg = as_Register($dst$$reg);
2528     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2529                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2530   %}
2531 
2532   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2533     Register dst_reg = as_Register($dst$$reg);
2534     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2535                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2536   %}
2537 
2538   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2539     Register dst_reg = as_Register($dst$$reg);
2540     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2541                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2542   %}
2543 
2544   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2545     Register dst_reg = as_Register($dst$$reg);
2546     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2547                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2548   %}
2549 
2550   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2551     Register dst_reg = as_Register($dst$$reg);
2552     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2553                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2554   %}
2555 
2556   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2557     Register dst_reg = as_Register($dst$$reg);
2558     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2559                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2560   %}
2561 
2562   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2563     Register dst_reg = as_Register($dst$$reg);
2564     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2565                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2566   %}
2567 
2568   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2569     Register dst_reg = as_Register($dst$$reg);
2570     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2571                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2572   %}
2573 
2574   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2575     Register dst_reg = as_Register($dst$$reg);
2576     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2577                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2578   %}
2579 
2580   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2581     Register dst_reg = as_Register($dst$$reg);
2582     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2583                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2584   %}
2585 
2586   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2587     Register dst_reg = as_Register($dst$$reg);
2588     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2589                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2590   %}
2591 
2592   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2593     Register dst_reg = as_Register($dst$$reg);
2594     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2595                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2596   %}
2597 
2598   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2599     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2600     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2601                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2602   %}
2603 
2604   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2605     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2606     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2607                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2608   %}
2609 
2610   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2611     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2612     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2613        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2614   %}
2615 
2616   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2617     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2618     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2619        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2620   %}
2621 
2622   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2623     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2624     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2625        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2626   %}
2627 
2628   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2629     Register src_reg = as_Register($src$$reg);
2630     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2631                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2632   %}
2633 
2634   enc_class aarch64_enc_strb0(memory mem) %{
2635     MacroAssembler _masm(&cbuf);
2636     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2637                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2638   %}
2639 
2640   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2641     MacroAssembler _masm(&cbuf);
2642     __ membar(Assembler::StoreStore);
2643     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2644                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2645   %}
2646 
2647   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2648     Register src_reg = as_Register($src$$reg);
2649     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2650                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2651   %}
2652 
2653   enc_class aarch64_enc_strh0(memory mem) %{
2654     MacroAssembler _masm(&cbuf);
2655     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2656                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2657   %}
2658 
2659   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2660     Register src_reg = as_Register($src$$reg);
2661     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2662                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2663   %}
2664 
2665   enc_class aarch64_enc_strw0(memory mem) %{
2666     MacroAssembler _masm(&cbuf);
2667     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2668                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2669   %}
2670 
2671   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2672     Register src_reg = as_Register($src$$reg);
2673     // we sometimes get asked to store the stack pointer into the
2674     // current thread -- we cannot do that directly on AArch64
2675     if (src_reg == r31_sp) {
2676       MacroAssembler _masm(&cbuf);
2677       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2678       __ mov(rscratch2, sp);
2679       src_reg = rscratch2;
2680     }
2681     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2682                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2683   %}
2684 
2685   enc_class aarch64_enc_str0(memory mem) %{
2686     MacroAssembler _masm(&cbuf);
2687     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2688                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2689   %}
2690 
2691   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2692     FloatRegister src_reg = as_FloatRegister($src$$reg);
2693     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2694                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2695   %}
2696 
2697   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2698     FloatRegister src_reg = as_FloatRegister($src$$reg);
2699     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2700                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2701   %}
2702 
2703   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2704     FloatRegister src_reg = as_FloatRegister($src$$reg);
2705     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2706        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2707   %}
2708 
2709   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2710     FloatRegister src_reg = as_FloatRegister($src$$reg);
2711     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2712        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2713   %}
2714 
2715   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2716     FloatRegister src_reg = as_FloatRegister($src$$reg);
2717     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2718        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2719   %}
2720 
2721   // END Non-volatile memory access
2722 
2723   // volatile loads and stores
2724 
2725   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2726     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2727                  rscratch1, stlrb);
2728   %}
2729 
2730   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2731     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2732                  rscratch1, stlrh);
2733   %}
2734 
2735   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2736     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2737                  rscratch1, stlrw);
2738   %}
2739 
2740 
2741   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2742     Register dst_reg = as_Register($dst$$reg);
2743     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2744              rscratch1, ldarb);
2745     __ sxtbw(dst_reg, dst_reg);
2746   %}
2747 
2748   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2749     Register dst_reg = as_Register($dst$$reg);
2750     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2751              rscratch1, ldarb);
2752     __ sxtb(dst_reg, dst_reg);
2753   %}
2754 
2755   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2756     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2757              rscratch1, ldarb);
2758   %}
2759 
2760   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2761     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2762              rscratch1, ldarb);
2763   %}
2764 
2765   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2766     Register dst_reg = as_Register($dst$$reg);
2767     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2768              rscratch1, ldarh);
2769     __ sxthw(dst_reg, dst_reg);
2770   %}
2771 
2772   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2773     Register dst_reg = as_Register($dst$$reg);
2774     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2775              rscratch1, ldarh);
2776     __ sxth(dst_reg, dst_reg);
2777   %}
2778 
2779   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2780     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2781              rscratch1, ldarh);
2782   %}
2783 
2784   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2785     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2786              rscratch1, ldarh);
2787   %}
2788 
2789   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2790     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2791              rscratch1, ldarw);
2792   %}
2793 
2794   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2795     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2796              rscratch1, ldarw);
2797   %}
2798 
2799   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2800     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2801              rscratch1, ldar);
2802   %}
2803 
2804   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2805     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2806              rscratch1, ldarw);
2807     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2808   %}
2809 
2810   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2811     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2812              rscratch1, ldar);
2813     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2814   %}
2815 
2816   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2817     Register src_reg = as_Register($src$$reg);
2818     // we sometimes get asked to store the stack pointer into the
2819     // current thread -- we cannot do that directly on AArch64
2820     if (src_reg == r31_sp) {
2821         MacroAssembler _masm(&cbuf);
2822       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2823       __ mov(rscratch2, sp);
2824       src_reg = rscratch2;
2825     }
2826     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2827                  rscratch1, stlr);
2828   %}
2829 
2830   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2831     {
2832       MacroAssembler _masm(&cbuf);
2833       FloatRegister src_reg = as_FloatRegister($src$$reg);
2834       __ fmovs(rscratch2, src_reg);
2835     }
2836     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2837                  rscratch1, stlrw);
2838   %}
2839 
2840   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2841     {
2842       MacroAssembler _masm(&cbuf);
2843       FloatRegister src_reg = as_FloatRegister($src$$reg);
2844       __ fmovd(rscratch2, src_reg);
2845     }
2846     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2847                  rscratch1, stlr);
2848   %}
2849 
2850   // synchronized read/update encodings
2851 
2852   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2853     MacroAssembler _masm(&cbuf);
2854     Register dst_reg = as_Register($dst$$reg);
2855     Register base = as_Register($mem$$base);
2856     int index = $mem$$index;
2857     int scale = $mem$$scale;
2858     int disp = $mem$$disp;
2859     if (index == -1) {
2860        if (disp != 0) {
2861         __ lea(rscratch1, Address(base, disp));
2862         __ ldaxr(dst_reg, rscratch1);
2863       } else {
2864         // TODO
2865         // should we ever get anything other than this case?
2866         __ ldaxr(dst_reg, base);
2867       }
2868     } else {
2869       Register index_reg = as_Register(index);
2870       if (disp == 0) {
2871         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2872         __ ldaxr(dst_reg, rscratch1);
2873       } else {
2874         __ lea(rscratch1, Address(base, disp));
2875         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2876         __ ldaxr(dst_reg, rscratch1);
2877       }
2878     }
2879   %}
2880 
2881   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2882     MacroAssembler _masm(&cbuf);
2883     Register src_reg = as_Register($src$$reg);
2884     Register base = as_Register($mem$$base);
2885     int index = $mem$$index;
2886     int scale = $mem$$scale;
2887     int disp = $mem$$disp;
2888     if (index == -1) {
2889        if (disp != 0) {
2890         __ lea(rscratch2, Address(base, disp));
2891         __ stlxr(rscratch1, src_reg, rscratch2);
2892       } else {
2893         // TODO
2894         // should we ever get anything other than this case?
2895         __ stlxr(rscratch1, src_reg, base);
2896       }
2897     } else {
2898       Register index_reg = as_Register(index);
2899       if (disp == 0) {
2900         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
2901         __ stlxr(rscratch1, src_reg, rscratch2);
2902       } else {
2903         __ lea(rscratch2, Address(base, disp));
2904         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
2905         __ stlxr(rscratch1, src_reg, rscratch2);
2906       }
2907     }
2908     __ cmpw(rscratch1, zr);
2909   %}
2910 
2911   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2912     MacroAssembler _masm(&cbuf);
2913     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2914     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2915                Assembler::xword, /*acquire*/ false, /*release*/ true,
2916                /*weak*/ false, noreg);
2917   %}
2918 
2919   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2920     MacroAssembler _masm(&cbuf);
2921     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2922     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2923                Assembler::word, /*acquire*/ false, /*release*/ true,
2924                /*weak*/ false, noreg);
2925   %}
2926 
2927   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2928     MacroAssembler _masm(&cbuf);
2929     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2930     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2931                Assembler::halfword, /*acquire*/ false, /*release*/ true,
2932                /*weak*/ false, noreg);
2933   %}
2934 
2935   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2936     MacroAssembler _masm(&cbuf);
2937     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2938     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2939                Assembler::byte, /*acquire*/ false, /*release*/ true,
2940                /*weak*/ false, noreg);
2941   %}
2942 
2943 
2944   enc_class aarch64_enc_cmpxchg_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
2945     MacroAssembler _masm(&cbuf);
2946     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2947     Register tmp = $tmp$$Register;
2948     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
2949 #if INCLUDE_SHENANDOAHGC
2950     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
2951                                                    /*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
2952 #else
2953     ShouldNotReachHere();
2954 #endif
2955   %}
2956 
2957   // The only difference between aarch64_enc_cmpxchg and
2958   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
2959   // CompareAndSwap sequence to serve as a barrier on acquiring a
2960   // lock.
2961   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2962     MacroAssembler _masm(&cbuf);
2963     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2964     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2965                Assembler::xword, /*acquire*/ true, /*release*/ true,
2966                /*weak*/ false, noreg);
2967   %}
2968 
2969   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2970     MacroAssembler _masm(&cbuf);
2971     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2972     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2973                Assembler::word, /*acquire*/ true, /*release*/ true,
2974                /*weak*/ false, noreg);
2975   %}
2976 
2977   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2978     MacroAssembler _masm(&cbuf);
2979     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2980     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2981                Assembler::halfword, /*acquire*/ true, /*release*/ true,
2982                /*weak*/ false, noreg);
2983   %}
2984 
2985   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2986     MacroAssembler _masm(&cbuf);
2987     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2988     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2989                Assembler::byte, /*acquire*/ true, /*release*/ true,
2990                /*weak*/ false, noreg);
2991   %}
2992 
2993   enc_class aarch64_enc_cmpxchg_acq_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
2994     MacroAssembler _masm(&cbuf);
2995     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2996     Register tmp = $tmp$$Register;
2997     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
2998 #if INCLUDE_SHENANDOAHGC
2999     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
3000                                                    /*acquire*/ true, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
3001 #else
3002     ShouldNotReachHere();
3003 #endif
3004   %}
3005 
3006   // auxiliary used for CompareAndSwapX to set result register
3007   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3008     MacroAssembler _masm(&cbuf);
3009     Register res_reg = as_Register($res$$reg);
3010     __ cset(res_reg, Assembler::EQ);
3011   %}
3012 
3013   // prefetch encodings
3014 
3015   enc_class aarch64_enc_prefetchw(memory mem) %{
3016     MacroAssembler _masm(&cbuf);
3017     Register base = as_Register($mem$$base);
3018     int index = $mem$$index;
3019     int scale = $mem$$scale;
3020     int disp = $mem$$disp;
3021     if (index == -1) {
3022       __ prfm(Address(base, disp), PSTL1KEEP);
3023     } else {
3024       Register index_reg = as_Register(index);
3025       if (disp == 0) {
3026         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3027       } else {
3028         __ lea(rscratch1, Address(base, disp));
3029         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3030       }
3031     }
3032   %}
3033 
3034   /// mov envcodings
3035 
3036   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3037     MacroAssembler _masm(&cbuf);
3038     u_int32_t con = (u_int32_t)$src$$constant;
3039     Register dst_reg = as_Register($dst$$reg);
3040     if (con == 0) {
3041       __ movw(dst_reg, zr);
3042     } else {
3043       __ movw(dst_reg, con);
3044     }
3045   %}
3046 
3047   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3048     MacroAssembler _masm(&cbuf);
3049     Register dst_reg = as_Register($dst$$reg);
3050     u_int64_t con = (u_int64_t)$src$$constant;
3051     if (con == 0) {
3052       __ mov(dst_reg, zr);
3053     } else {
3054       __ mov(dst_reg, con);
3055     }
3056   %}
3057 
3058   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3059     MacroAssembler _masm(&cbuf);
3060     Register dst_reg = as_Register($dst$$reg);
3061     address con = (address)$src$$constant;
3062     if (con == NULL || con == (address)1) {
3063       ShouldNotReachHere();
3064     } else {
3065       relocInfo::relocType rtype = $src->constant_reloc();
3066       if (rtype == relocInfo::oop_type) {
3067         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3068       } else if (rtype == relocInfo::metadata_type) {
3069         __ mov_metadata(dst_reg, (Metadata*)con);
3070       } else {
3071         assert(rtype == relocInfo::none, "unexpected reloc type");
3072         if (con < (address)(uintptr_t)os::vm_page_size()) {
3073           __ mov(dst_reg, con);
3074         } else {
3075           unsigned long offset;
3076           __ adrp(dst_reg, con, offset);
3077           __ add(dst_reg, dst_reg, offset);
3078         }
3079       }
3080     }
3081   %}
3082 
3083   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3084     MacroAssembler _masm(&cbuf);
3085     Register dst_reg = as_Register($dst$$reg);
3086     __ mov(dst_reg, zr);
3087   %}
3088 
3089   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3090     MacroAssembler _masm(&cbuf);
3091     Register dst_reg = as_Register($dst$$reg);
3092     __ mov(dst_reg, (u_int64_t)1);
3093   %}
3094 
3095   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3096     MacroAssembler _masm(&cbuf);
3097     address page = (address)$src$$constant;
3098     Register dst_reg = as_Register($dst$$reg);
3099     unsigned long off;
3100     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3101     assert(off == 0, "assumed offset == 0");
3102   %}
3103 
3104   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3105     MacroAssembler _masm(&cbuf);
3106     __ load_byte_map_base($dst$$Register);
3107   %}
3108 
3109   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3110     MacroAssembler _masm(&cbuf);
3111     Register dst_reg = as_Register($dst$$reg);
3112     address con = (address)$src$$constant;
3113     if (con == NULL) {
3114       ShouldNotReachHere();
3115     } else {
3116       relocInfo::relocType rtype = $src->constant_reloc();
3117       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3118       __ set_narrow_oop(dst_reg, (jobject)con);
3119     }
3120   %}
3121 
3122   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3123     MacroAssembler _masm(&cbuf);
3124     Register dst_reg = as_Register($dst$$reg);
3125     __ mov(dst_reg, zr);
3126   %}
3127 
3128   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3129     MacroAssembler _masm(&cbuf);
3130     Register dst_reg = as_Register($dst$$reg);
3131     address con = (address)$src$$constant;
3132     if (con == NULL) {
3133       ShouldNotReachHere();
3134     } else {
3135       relocInfo::relocType rtype = $src->constant_reloc();
3136       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3137       __ set_narrow_klass(dst_reg, (Klass *)con);
3138     }
3139   %}
3140 
3141   // arithmetic encodings
3142 
3143   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3144     MacroAssembler _masm(&cbuf);
3145     Register dst_reg = as_Register($dst$$reg);
3146     Register src_reg = as_Register($src1$$reg);
3147     int32_t con = (int32_t)$src2$$constant;
3148     // add has primary == 0, subtract has primary == 1
3149     if ($primary) { con = -con; }
3150     if (con < 0) {
3151       __ subw(dst_reg, src_reg, -con);
3152     } else {
3153       __ addw(dst_reg, src_reg, con);
3154     }
3155   %}
3156 
3157   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3158     MacroAssembler _masm(&cbuf);
3159     Register dst_reg = as_Register($dst$$reg);
3160     Register src_reg = as_Register($src1$$reg);
3161     int32_t con = (int32_t)$src2$$constant;
3162     // add has primary == 0, subtract has primary == 1
3163     if ($primary) { con = -con; }
3164     if (con < 0) {
3165       __ sub(dst_reg, src_reg, -con);
3166     } else {
3167       __ add(dst_reg, src_reg, con);
3168     }
3169   %}
3170 
3171   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3172     MacroAssembler _masm(&cbuf);
3173    Register dst_reg = as_Register($dst$$reg);
3174    Register src1_reg = as_Register($src1$$reg);
3175    Register src2_reg = as_Register($src2$$reg);
3176     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3177   %}
3178 
3179   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3180     MacroAssembler _masm(&cbuf);
3181    Register dst_reg = as_Register($dst$$reg);
3182    Register src1_reg = as_Register($src1$$reg);
3183    Register src2_reg = as_Register($src2$$reg);
3184     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3185   %}
3186 
3187   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3188     MacroAssembler _masm(&cbuf);
3189    Register dst_reg = as_Register($dst$$reg);
3190    Register src1_reg = as_Register($src1$$reg);
3191    Register src2_reg = as_Register($src2$$reg);
3192     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3193   %}
3194 
3195   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3196     MacroAssembler _masm(&cbuf);
3197    Register dst_reg = as_Register($dst$$reg);
3198    Register src1_reg = as_Register($src1$$reg);
3199    Register src2_reg = as_Register($src2$$reg);
3200     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3201   %}
3202 
3203   // compare instruction encodings
3204 
3205   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3206     MacroAssembler _masm(&cbuf);
3207     Register reg1 = as_Register($src1$$reg);
3208     Register reg2 = as_Register($src2$$reg);
3209     __ cmpw(reg1, reg2);
3210   %}
3211 
3212   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3213     MacroAssembler _masm(&cbuf);
3214     Register reg = as_Register($src1$$reg);
3215     int32_t val = $src2$$constant;
3216     if (val >= 0) {
3217       __ subsw(zr, reg, val);
3218     } else {
3219       __ addsw(zr, reg, -val);
3220     }
3221   %}
3222 
3223   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3224     MacroAssembler _masm(&cbuf);
3225     Register reg1 = as_Register($src1$$reg);
3226     u_int32_t val = (u_int32_t)$src2$$constant;
3227     __ movw(rscratch1, val);
3228     __ cmpw(reg1, rscratch1);
3229   %}
3230 
3231   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3232     MacroAssembler _masm(&cbuf);
3233     Register reg1 = as_Register($src1$$reg);
3234     Register reg2 = as_Register($src2$$reg);
3235     __ cmp(reg1, reg2);
3236   %}
3237 
3238   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3239     MacroAssembler _masm(&cbuf);
3240     Register reg = as_Register($src1$$reg);
3241     int64_t val = $src2$$constant;
3242     if (val >= 0) {
3243       __ subs(zr, reg, val);
3244     } else if (val != -val) {
3245       __ adds(zr, reg, -val);
3246     } else {
3247     // aargh, Long.MIN_VALUE is a special case
3248       __ orr(rscratch1, zr, (u_int64_t)val);
3249       __ subs(zr, reg, rscratch1);
3250     }
3251   %}
3252 
3253   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3254     MacroAssembler _masm(&cbuf);
3255     Register reg1 = as_Register($src1$$reg);
3256     u_int64_t val = (u_int64_t)$src2$$constant;
3257     __ mov(rscratch1, val);
3258     __ cmp(reg1, rscratch1);
3259   %}
3260 
3261   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3262     MacroAssembler _masm(&cbuf);
3263     Register reg1 = as_Register($src1$$reg);
3264     Register reg2 = as_Register($src2$$reg);
3265     __ cmp(reg1, reg2);
3266   %}
3267 
3268   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3269     MacroAssembler _masm(&cbuf);
3270     Register reg1 = as_Register($src1$$reg);
3271     Register reg2 = as_Register($src2$$reg);
3272     __ cmpw(reg1, reg2);
3273   %}
3274 
3275   enc_class aarch64_enc_testp(iRegP src) %{
3276     MacroAssembler _masm(&cbuf);
3277     Register reg = as_Register($src$$reg);
3278     __ cmp(reg, zr);
3279   %}
3280 
3281   enc_class aarch64_enc_testn(iRegN src) %{
3282     MacroAssembler _masm(&cbuf);
3283     Register reg = as_Register($src$$reg);
3284     __ cmpw(reg, zr);
3285   %}
3286 
3287   enc_class aarch64_enc_b(label lbl) %{
3288     MacroAssembler _masm(&cbuf);
3289     Label *L = $lbl$$label;
3290     __ b(*L);
3291   %}
3292 
3293   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3294     MacroAssembler _masm(&cbuf);
3295     Label *L = $lbl$$label;
3296     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3297   %}
3298 
3299   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3300     MacroAssembler _masm(&cbuf);
3301     Label *L = $lbl$$label;
3302     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3303   %}
3304 
3305   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3306   %{
3307      Register sub_reg = as_Register($sub$$reg);
3308      Register super_reg = as_Register($super$$reg);
3309      Register temp_reg = as_Register($temp$$reg);
3310      Register result_reg = as_Register($result$$reg);
3311 
3312      Label miss;
3313      MacroAssembler _masm(&cbuf);
3314      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3315                                      NULL, &miss,
3316                                      /*set_cond_codes:*/ true);
3317      if ($primary) {
3318        __ mov(result_reg, zr);
3319      }
3320      __ bind(miss);
3321   %}
3322 
3323   enc_class aarch64_enc_java_static_call(method meth) %{
3324     MacroAssembler _masm(&cbuf);
3325 
3326     address addr = (address)$meth$$method;
3327     address call;
3328     if (!_method) {
3329       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3330       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3331     } else {
3332       int method_index = resolved_method_index(cbuf);
3333       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3334                                                   : static_call_Relocation::spec(method_index);
3335       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3336 
3337       // Emit stub for static call
3338       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3339       if (stub == NULL) {
3340         ciEnv::current()->record_failure("CodeCache is full");
3341         return;
3342       }
3343     }
3344     if (call == NULL) {
3345       ciEnv::current()->record_failure("CodeCache is full");
3346       return;
3347     }
3348   %}
3349 
3350   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3351     MacroAssembler _masm(&cbuf);
3352     int method_index = resolved_method_index(cbuf);
3353     address call = __ ic_call((address)$meth$$method, method_index);
3354     if (call == NULL) {
3355       ciEnv::current()->record_failure("CodeCache is full");
3356       return;
3357     }
3358   %}
3359 
3360   enc_class aarch64_enc_call_epilog() %{
3361     MacroAssembler _masm(&cbuf);
3362     if (VerifyStackAtCalls) {
3363       // Check that stack depth is unchanged: find majik cookie on stack
3364       __ call_Unimplemented();
3365     }
3366   %}
3367 
3368   enc_class aarch64_enc_java_to_runtime(method meth) %{
3369     MacroAssembler _masm(&cbuf);
3370 
3371     // some calls to generated routines (arraycopy code) are scheduled
3372     // by C2 as runtime calls. if so we can call them using a br (they
3373     // will be in a reachable segment) otherwise we have to use a blrt
3374     // which loads the absolute address into a register.
3375     address entry = (address)$meth$$method;
3376     CodeBlob *cb = CodeCache::find_blob(entry);
3377     if (cb) {
3378       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3379       if (call == NULL) {
3380         ciEnv::current()->record_failure("CodeCache is full");
3381         return;
3382       }
3383     } else {
3384       int gpcnt;
3385       int fpcnt;
3386       int rtype;
3387       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3388       Label retaddr;
3389       __ adr(rscratch2, retaddr);
3390       __ lea(rscratch1, RuntimeAddress(entry));
3391       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3392       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3393       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3394       __ bind(retaddr);
3395       __ add(sp, sp, 2 * wordSize);
3396     }
3397   %}
3398 
3399   enc_class aarch64_enc_rethrow() %{
3400     MacroAssembler _masm(&cbuf);
3401     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3402   %}
3403 
3404   enc_class aarch64_enc_ret() %{
3405     MacroAssembler _masm(&cbuf);
3406     __ ret(lr);
3407   %}
3408 
3409   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3410     MacroAssembler _masm(&cbuf);
3411     Register target_reg = as_Register($jump_target$$reg);
3412     __ br(target_reg);
3413   %}
3414 
3415   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3416     MacroAssembler _masm(&cbuf);
3417     Register target_reg = as_Register($jump_target$$reg);
3418     // exception oop should be in r0
3419     // ret addr has been popped into lr
3420     // callee expects it in r3
3421     __ mov(r3, lr);
3422     __ br(target_reg);
3423   %}
3424 
3425   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3426     MacroAssembler _masm(&cbuf);
3427     Register oop = as_Register($object$$reg);
3428     Register box = as_Register($box$$reg);
3429     Register disp_hdr = as_Register($tmp$$reg);
3430     Register tmp = as_Register($tmp2$$reg);
3431     Label cont;
3432     Label object_has_monitor;
3433     Label cas_failed;
3434 
3435     assert_different_registers(oop, box, tmp, disp_hdr);
3436 
3437     // Load markOop from object into displaced_header.
3438     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3439 
3440     if (UseBiasedLocking && !UseOptoBiasInlining) {
3441       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3442     }
3443 
3444     // Handle existing monitor
3445     // we can use AArch64's bit test and branch here but
3446     // markoopDesc does not define a bit index just the bit value
3447     // so assert in case the bit pos changes
3448 #   define __monitor_value_log2 1
3449     assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
3450     __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
3451 #   undef __monitor_value_log2
3452 
3453     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
3454     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
3455 
3456     // Load Compare Value application register.
3457 
3458     // Initialize the box. (Must happen before we update the object mark!)
3459     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3460 
3461     // Compare object markOop with mark and if equal exchange scratch1
3462     // with object markOop.
3463     if (UseLSE) {
3464       __ mov(tmp, disp_hdr);
3465       __ casal(Assembler::xword, tmp, box, oop);
3466       __ cmp(tmp, disp_hdr);
3467       __ br(Assembler::EQ, cont);
3468     } else {
3469       Label retry_load;
3470       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3471         __ prfm(Address(oop), PSTL1STRM);
3472       __ bind(retry_load);
3473       __ ldaxr(tmp, oop);
3474       __ cmp(tmp, disp_hdr);
3475       __ br(Assembler::NE, cas_failed);
3476       // use stlxr to ensure update is immediately visible
3477       __ stlxr(tmp, box, oop);
3478       __ cbzw(tmp, cont);
3479       __ b(retry_load);
3480     }
3481 
3482     // Formerly:
3483     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3484     //               /*newv=*/box,
3485     //               /*addr=*/oop,
3486     //               /*tmp=*/tmp,
3487     //               cont,
3488     //               /*fail*/NULL);
3489 
3490     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3491 
3492     // If the compare-and-exchange succeeded, then we found an unlocked
3493     // object, will have now locked it will continue at label cont
3494 
3495     __ bind(cas_failed);
3496     // We did not see an unlocked object so try the fast recursive case.
3497 
3498     // Check if the owner is self by comparing the value in the
3499     // markOop of object (disp_hdr) with the stack pointer.
3500     __ mov(rscratch1, sp);
3501     __ sub(disp_hdr, disp_hdr, rscratch1);
3502     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3503     // If condition is true we are cont and hence we can store 0 as the
3504     // displaced header in the box, which indicates that it is a recursive lock.
3505     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3506     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3507 
3508     // Handle existing monitor.
3509     __ b(cont);
3510 
3511     __ bind(object_has_monitor);
3512     // The object's monitor m is unlocked iff m->owner == NULL,
3513     // otherwise m->owner may contain a thread or a stack address.
3514     //
3515     // Try to CAS m->owner from NULL to current thread.
3516     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3517     __ mov(disp_hdr, zr);
3518 
3519     if (UseLSE) {
3520       __ mov(rscratch1, disp_hdr);
3521       __ casal(Assembler::xword, rscratch1, rthread, tmp);
3522       __ cmp(rscratch1, disp_hdr);
3523     } else {
3524       Label retry_load, fail;
3525       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) {
3526         __ prfm(Address(tmp), PSTL1STRM);
3527       }
3528       __ bind(retry_load);
3529       __ ldaxr(rscratch1, tmp);
3530       __ cmp(disp_hdr, rscratch1);
3531       __ br(Assembler::NE, fail);
3532       // use stlxr to ensure update is immediately visible
3533       __ stlxr(rscratch1, rthread, tmp);
3534       __ cbnzw(rscratch1, retry_load);
3535       __ bind(fail);
3536     }
3537 
3538     // Label next;
3539     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3540     //               /*newv=*/rthread,
3541     //               /*addr=*/tmp,
3542     //               /*tmp=*/rscratch1,
3543     //               /*succeed*/next,
3544     //               /*fail*/NULL);
3545     // __ bind(next);
3546 
3547     // store a non-null value into the box.
3548     __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3549 
3550     // PPC port checks the following invariants
3551     // #ifdef ASSERT
3552     // bne(flag, cont);
3553     // We have acquired the monitor, check some invariants.
3554     // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
3555     // Invariant 1: _recursions should be 0.
3556     // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
3557     // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
3558     //                        "monitor->_recursions should be 0", -1);
3559     // Invariant 2: OwnerIsThread shouldn't be 0.
3560     // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
3561     //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
3562     //                           "monitor->OwnerIsThread shouldn't be 0", -1);
3563     // #endif
3564 
3565     __ bind(cont);
3566     // flag == EQ indicates success
3567     // flag == NE indicates failure
3568 
3569   %}
3570 
3571   // TODO
3572   // reimplement this with custom cmpxchgptr code
3573   // which avoids some of the unnecessary branching
3574   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3575     MacroAssembler _masm(&cbuf);
3576     Register oop = as_Register($object$$reg);
3577     Register box = as_Register($box$$reg);
3578     Register disp_hdr = as_Register($tmp$$reg);
3579     Register tmp = as_Register($tmp2$$reg);
3580     Label cont;
3581     Label object_has_monitor;
3582     Label cas_failed;
3583 
3584     assert_different_registers(oop, box, tmp, disp_hdr);
3585 
3586     if (UseBiasedLocking && !UseOptoBiasInlining) {
3587       __ biased_locking_exit(oop, tmp, cont);
3588     }
3589 
3590     // Find the lock address and load the displaced header from the stack.
3591     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3592 
3593     // If the displaced header is 0, we have a recursive unlock.
3594     __ cmp(disp_hdr, zr);
3595     __ br(Assembler::EQ, cont);
3596 
3597 
3598     // Handle existing monitor.
3599     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3600     __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3601 
3602     // Check if it is still a light weight lock, this is is true if we
3603     // see the stack address of the basicLock in the markOop of the
3604     // object.
3605 
3606       if (UseLSE) {
3607         __ mov(tmp, box);
3608         __ casl(Assembler::xword, tmp, disp_hdr, oop);
3609         __ cmp(tmp, box);
3610       } else {
3611         Label retry_load;
3612         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3613           __ prfm(Address(oop), PSTL1STRM);
3614         __ bind(retry_load);
3615         __ ldxr(tmp, oop);
3616         __ cmp(box, tmp);
3617         __ br(Assembler::NE, cas_failed);
3618         // use stlxr to ensure update is immediately visible
3619         __ stlxr(tmp, disp_hdr, oop);
3620         __ cbzw(tmp, cont);
3621         __ b(retry_load);
3622       }
3623 
3624     // __ cmpxchgptr(/*compare_value=*/box,
3625     //               /*exchange_value=*/disp_hdr,
3626     //               /*where=*/oop,
3627     //               /*result=*/tmp,
3628     //               cont,
3629     //               /*cas_failed*/NULL);
3630     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3631 
3632     __ bind(cas_failed);
3633 
3634     // Handle existing monitor.
3635     __ b(cont);
3636 
3637     __ bind(object_has_monitor);
3638     __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3639     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3640     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3641     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3642     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3643     __ cmp(rscratch1, zr);
3644     __ br(Assembler::NE, cont);
3645 
3646     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3647     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3648     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3649     __ cmp(rscratch1, zr);
3650     __ cbnz(rscratch1, cont);
3651     // need a release store here
3652     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3653     __ stlr(rscratch1, tmp); // rscratch1 is zero
3654 
3655     __ bind(cont);
3656     // flag == EQ indicates success
3657     // flag == NE indicates failure
3658   %}
3659 
3660 %}
3661 
3662 //----------FRAME--------------------------------------------------------------
3663 // Definition of frame structure and management information.
3664 //
3665 //  S T A C K   L A Y O U T    Allocators stack-slot number
3666 //                             |   (to get allocators register number
3667 //  G  Owned by    |        |  v    add OptoReg::stack0())
3668 //  r   CALLER     |        |
3669 //  o     |        +--------+      pad to even-align allocators stack-slot
3670 //  w     V        |  pad0  |        numbers; owned by CALLER
3671 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3672 //  h     ^        |   in   |  5
3673 //        |        |  args  |  4   Holes in incoming args owned by SELF
3674 //  |     |        |        |  3
3675 //  |     |        +--------+
3676 //  V     |        | old out|      Empty on Intel, window on Sparc
3677 //        |    old |preserve|      Must be even aligned.
3678 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3679 //        |        |   in   |  3   area for Intel ret address
3680 //     Owned by    |preserve|      Empty on Sparc.
3681 //       SELF      +--------+
3682 //        |        |  pad2  |  2   pad to align old SP
3683 //        |        +--------+  1
3684 //        |        | locks  |  0
3685 //        |        +--------+----> OptoReg::stack0(), even aligned
3686 //        |        |  pad1  | 11   pad to align new SP
3687 //        |        +--------+
3688 //        |        |        | 10
3689 //        |        | spills |  9   spills
3690 //        V        |        |  8   (pad0 slot for callee)
3691 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3692 //        ^        |  out   |  7
3693 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3694 //     Owned by    +--------+
3695 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3696 //        |    new |preserve|      Must be even-aligned.
3697 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3698 //        |        |        |
3699 //
3700 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3701 //         known from SELF's arguments and the Java calling convention.
3702 //         Region 6-7 is determined per call site.
3703 // Note 2: If the calling convention leaves holes in the incoming argument
3704 //         area, those holes are owned by SELF.  Holes in the outgoing area
3705 //         are owned by the CALLEE.  Holes should not be nessecary in the
3706 //         incoming area, as the Java calling convention is completely under
3707 //         the control of the AD file.  Doubles can be sorted and packed to
3708 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3709 //         varargs C calling conventions.
3710 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3711 //         even aligned with pad0 as needed.
3712 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3713 //           (the latter is true on Intel but is it false on AArch64?)
3714 //         region 6-11 is even aligned; it may be padded out more so that
3715 //         the region from SP to FP meets the minimum stack alignment.
3716 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3717 //         alignment.  Region 11, pad1, may be dynamically extended so that
3718 //         SP meets the minimum alignment.
3719 
3720 frame %{
3721   // What direction does stack grow in (assumed to be same for C & Java)
3722   stack_direction(TOWARDS_LOW);
3723 
3724   // These three registers define part of the calling convention
3725   // between compiled code and the interpreter.
3726 
3727   // Inline Cache Register or methodOop for I2C.
3728   inline_cache_reg(R12);
3729 
3730   // Method Oop Register when calling interpreter.
3731   interpreter_method_oop_reg(R12);
3732 
3733   // Number of stack slots consumed by locking an object
3734   sync_stack_slots(2);
3735 
3736   // Compiled code's Frame Pointer
3737   frame_pointer(R31);
3738 
3739   // Interpreter stores its frame pointer in a register which is
3740   // stored to the stack by I2CAdaptors.
3741   // I2CAdaptors convert from interpreted java to compiled java.
3742   interpreter_frame_pointer(R29);
3743 
3744   // Stack alignment requirement
3745   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3746 
3747   // Number of stack slots between incoming argument block and the start of
3748   // a new frame.  The PROLOG must add this many slots to the stack.  The
3749   // EPILOG must remove this many slots. aarch64 needs two slots for
3750   // return address and fp.
3751   // TODO think this is correct but check
3752   in_preserve_stack_slots(4);
3753 
3754   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3755   // for calls to C.  Supports the var-args backing area for register parms.
3756   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3757 
3758   // The after-PROLOG location of the return address.  Location of
3759   // return address specifies a type (REG or STACK) and a number
3760   // representing the register number (i.e. - use a register name) or
3761   // stack slot.
3762   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3763   // Otherwise, it is above the locks and verification slot and alignment word
3764   // TODO this may well be correct but need to check why that - 2 is there
3765   // ppc port uses 0 but we definitely need to allow for fixed_slots
3766   // which folds in the space used for monitors
3767   return_addr(STACK - 2 +
3768               align_up((Compile::current()->in_preserve_stack_slots() +
3769                         Compile::current()->fixed_slots()),
3770                        stack_alignment_in_slots()));
3771 
3772   // Body of function which returns an integer array locating
3773   // arguments either in registers or in stack slots.  Passed an array
3774   // of ideal registers called "sig" and a "length" count.  Stack-slot
3775   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3776   // arguments for a CALLEE.  Incoming stack arguments are
3777   // automatically biased by the preserve_stack_slots field above.
3778 
3779   calling_convention
3780   %{
3781     // No difference between ingoing/outgoing just pass false
3782     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3783   %}
3784 
3785   c_calling_convention
3786   %{
3787     // This is obviously always outgoing
3788     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3789   %}
3790 
3791   // Location of compiled Java return values.  Same as C for now.
3792   return_value
3793   %{
3794     // TODO do we allow ideal_reg == Op_RegN???
3795     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3796            "only return normal values");
3797 
3798     static const int lo[Op_RegL + 1] = { // enum name
3799       0,                                 // Op_Node
3800       0,                                 // Op_Set
3801       R0_num,                            // Op_RegN
3802       R0_num,                            // Op_RegI
3803       R0_num,                            // Op_RegP
3804       V0_num,                            // Op_RegF
3805       V0_num,                            // Op_RegD
3806       R0_num                             // Op_RegL
3807     };
3808 
3809     static const int hi[Op_RegL + 1] = { // enum name
3810       0,                                 // Op_Node
3811       0,                                 // Op_Set
3812       OptoReg::Bad,                       // Op_RegN
3813       OptoReg::Bad,                      // Op_RegI
3814       R0_H_num,                          // Op_RegP
3815       OptoReg::Bad,                      // Op_RegF
3816       V0_H_num,                          // Op_RegD
3817       R0_H_num                           // Op_RegL
3818     };
3819 
3820     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3821   %}
3822 %}
3823 
3824 //----------ATTRIBUTES---------------------------------------------------------
3825 //----------Operand Attributes-------------------------------------------------
3826 op_attrib op_cost(1);        // Required cost attribute
3827 
3828 //----------Instruction Attributes---------------------------------------------
3829 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3830 ins_attrib ins_size(32);        // Required size attribute (in bits)
3831 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3832                                 // a non-matching short branch variant
3833                                 // of some long branch?
3834 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3835                                 // be a power of 2) specifies the
3836                                 // alignment that some part of the
3837                                 // instruction (not necessarily the
3838                                 // start) requires.  If > 1, a
3839                                 // compute_padding() function must be
3840                                 // provided for the instruction
3841 
3842 //----------OPERANDS-----------------------------------------------------------
3843 // Operand definitions must precede instruction definitions for correct parsing
3844 // in the ADLC because operands constitute user defined types which are used in
3845 // instruction definitions.
3846 
3847 //----------Simple Operands----------------------------------------------------
3848 
3849 // Integer operands 32 bit
3850 // 32 bit immediate
3851 operand immI()
3852 %{
3853   match(ConI);
3854 
3855   op_cost(0);
3856   format %{ %}
3857   interface(CONST_INTER);
3858 %}
3859 
3860 // 32 bit zero
3861 operand immI0()
3862 %{
3863   predicate(n->get_int() == 0);
3864   match(ConI);
3865 
3866   op_cost(0);
3867   format %{ %}
3868   interface(CONST_INTER);
3869 %}
3870 
3871 // 32 bit unit increment
3872 operand immI_1()
3873 %{
3874   predicate(n->get_int() == 1);
3875   match(ConI);
3876 
3877   op_cost(0);
3878   format %{ %}
3879   interface(CONST_INTER);
3880 %}
3881 
3882 // 32 bit unit decrement
3883 operand immI_M1()
3884 %{
3885   predicate(n->get_int() == -1);
3886   match(ConI);
3887 
3888   op_cost(0);
3889   format %{ %}
3890   interface(CONST_INTER);
3891 %}
3892 
3893 // Shift values for add/sub extension shift
3894 operand immIExt()
3895 %{
3896   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3897   match(ConI);
3898 
3899   op_cost(0);
3900   format %{ %}
3901   interface(CONST_INTER);
3902 %}
3903 
3904 operand immI_le_4()
3905 %{
3906   predicate(n->get_int() <= 4);
3907   match(ConI);
3908 
3909   op_cost(0);
3910   format %{ %}
3911   interface(CONST_INTER);
3912 %}
3913 
3914 operand immI_31()
3915 %{
3916   predicate(n->get_int() == 31);
3917   match(ConI);
3918 
3919   op_cost(0);
3920   format %{ %}
3921   interface(CONST_INTER);
3922 %}
3923 
3924 operand immI_8()
3925 %{
3926   predicate(n->get_int() == 8);
3927   match(ConI);
3928 
3929   op_cost(0);
3930   format %{ %}
3931   interface(CONST_INTER);
3932 %}
3933 
3934 operand immI_16()
3935 %{
3936   predicate(n->get_int() == 16);
3937   match(ConI);
3938 
3939   op_cost(0);
3940   format %{ %}
3941   interface(CONST_INTER);
3942 %}
3943 
3944 operand immI_24()
3945 %{
3946   predicate(n->get_int() == 24);
3947   match(ConI);
3948 
3949   op_cost(0);
3950   format %{ %}
3951   interface(CONST_INTER);
3952 %}
3953 
3954 operand immI_32()
3955 %{
3956   predicate(n->get_int() == 32);
3957   match(ConI);
3958 
3959   op_cost(0);
3960   format %{ %}
3961   interface(CONST_INTER);
3962 %}
3963 
3964 operand immI_48()
3965 %{
3966   predicate(n->get_int() == 48);
3967   match(ConI);
3968 
3969   op_cost(0);
3970   format %{ %}
3971   interface(CONST_INTER);
3972 %}
3973 
3974 operand immI_56()
3975 %{
3976   predicate(n->get_int() == 56);
3977   match(ConI);
3978 
3979   op_cost(0);
3980   format %{ %}
3981   interface(CONST_INTER);
3982 %}
3983 
3984 operand immI_63()
3985 %{
3986   predicate(n->get_int() == 63);
3987   match(ConI);
3988 
3989   op_cost(0);
3990   format %{ %}
3991   interface(CONST_INTER);
3992 %}
3993 
3994 operand immI_64()
3995 %{
3996   predicate(n->get_int() == 64);
3997   match(ConI);
3998 
3999   op_cost(0);
4000   format %{ %}
4001   interface(CONST_INTER);
4002 %}
4003 
4004 operand immI_255()
4005 %{
4006   predicate(n->get_int() == 255);
4007   match(ConI);
4008 
4009   op_cost(0);
4010   format %{ %}
4011   interface(CONST_INTER);
4012 %}
4013 
4014 operand immI_65535()
4015 %{
4016   predicate(n->get_int() == 65535);
4017   match(ConI);
4018 
4019   op_cost(0);
4020   format %{ %}
4021   interface(CONST_INTER);
4022 %}
4023 
4024 operand immL_255()
4025 %{
4026   predicate(n->get_long() == 255L);
4027   match(ConL);
4028 
4029   op_cost(0);
4030   format %{ %}
4031   interface(CONST_INTER);
4032 %}
4033 
4034 operand immL_65535()
4035 %{
4036   predicate(n->get_long() == 65535L);
4037   match(ConL);
4038 
4039   op_cost(0);
4040   format %{ %}
4041   interface(CONST_INTER);
4042 %}
4043 
4044 operand immL_4294967295()
4045 %{
4046   predicate(n->get_long() == 4294967295L);
4047   match(ConL);
4048 
4049   op_cost(0);
4050   format %{ %}
4051   interface(CONST_INTER);
4052 %}
4053 
4054 operand immL_bitmask()
4055 %{
4056   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4057             && is_power_of_2(n->get_long() + 1));
4058   match(ConL);
4059 
4060   op_cost(0);
4061   format %{ %}
4062   interface(CONST_INTER);
4063 %}
4064 
4065 operand immI_bitmask()
4066 %{
4067   predicate(((n->get_int() & 0xc0000000) == 0)
4068             && is_power_of_2(n->get_int() + 1));
4069   match(ConI);
4070 
4071   op_cost(0);
4072   format %{ %}
4073   interface(CONST_INTER);
4074 %}
4075 
4076 // Scale values for scaled offset addressing modes (up to long but not quad)
4077 operand immIScale()
4078 %{
4079   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4080   match(ConI);
4081 
4082   op_cost(0);
4083   format %{ %}
4084   interface(CONST_INTER);
4085 %}
4086 
4087 // 26 bit signed offset -- for pc-relative branches
4088 operand immI26()
4089 %{
4090   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4091   match(ConI);
4092 
4093   op_cost(0);
4094   format %{ %}
4095   interface(CONST_INTER);
4096 %}
4097 
4098 // 19 bit signed offset -- for pc-relative loads
4099 operand immI19()
4100 %{
4101   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4102   match(ConI);
4103 
4104   op_cost(0);
4105   format %{ %}
4106   interface(CONST_INTER);
4107 %}
4108 
4109 // 12 bit unsigned offset -- for base plus immediate loads
4110 operand immIU12()
4111 %{
4112   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4113   match(ConI);
4114 
4115   op_cost(0);
4116   format %{ %}
4117   interface(CONST_INTER);
4118 %}
4119 
4120 operand immLU12()
4121 %{
4122   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4123   match(ConL);
4124 
4125   op_cost(0);
4126   format %{ %}
4127   interface(CONST_INTER);
4128 %}
4129 
4130 // Offset for scaled or unscaled immediate loads and stores
4131 operand immIOffset()
4132 %{
4133   predicate(Address::offset_ok_for_immed(n->get_int()));
4134   match(ConI);
4135 
4136   op_cost(0);
4137   format %{ %}
4138   interface(CONST_INTER);
4139 %}
4140 
4141 operand immIOffset4()
4142 %{
4143   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4144   match(ConI);
4145 
4146   op_cost(0);
4147   format %{ %}
4148   interface(CONST_INTER);
4149 %}
4150 
4151 operand immIOffset8()
4152 %{
4153   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4154   match(ConI);
4155 
4156   op_cost(0);
4157   format %{ %}
4158   interface(CONST_INTER);
4159 %}
4160 
4161 operand immIOffset16()
4162 %{
4163   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4164   match(ConI);
4165 
4166   op_cost(0);
4167   format %{ %}
4168   interface(CONST_INTER);
4169 %}
4170 
4171 operand immLoffset()
4172 %{
4173   predicate(Address::offset_ok_for_immed(n->get_long()));
4174   match(ConL);
4175 
4176   op_cost(0);
4177   format %{ %}
4178   interface(CONST_INTER);
4179 %}
4180 
4181 operand immLoffset4()
4182 %{
4183   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4184   match(ConL);
4185 
4186   op_cost(0);
4187   format %{ %}
4188   interface(CONST_INTER);
4189 %}
4190 
4191 operand immLoffset8()
4192 %{
4193   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4194   match(ConL);
4195 
4196   op_cost(0);
4197   format %{ %}
4198   interface(CONST_INTER);
4199 %}
4200 
4201 operand immLoffset16()
4202 %{
4203   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4204   match(ConL);
4205 
4206   op_cost(0);
4207   format %{ %}
4208   interface(CONST_INTER);
4209 %}
4210 
4211 // 32 bit integer valid for add sub immediate
4212 operand immIAddSub()
4213 %{
4214   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4215   match(ConI);
4216   op_cost(0);
4217   format %{ %}
4218   interface(CONST_INTER);
4219 %}
4220 
4221 // 32 bit unsigned integer valid for logical immediate
4222 // TODO -- check this is right when e.g the mask is 0x80000000
4223 operand immILog()
4224 %{
4225   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4226   match(ConI);
4227 
4228   op_cost(0);
4229   format %{ %}
4230   interface(CONST_INTER);
4231 %}
4232 
4233 // Integer operands 64 bit
4234 // 64 bit immediate
4235 operand immL()
4236 %{
4237   match(ConL);
4238 
4239   op_cost(0);
4240   format %{ %}
4241   interface(CONST_INTER);
4242 %}
4243 
4244 // 64 bit zero
4245 operand immL0()
4246 %{
4247   predicate(n->get_long() == 0);
4248   match(ConL);
4249 
4250   op_cost(0);
4251   format %{ %}
4252   interface(CONST_INTER);
4253 %}
4254 
4255 // 64 bit unit increment
4256 operand immL_1()
4257 %{
4258   predicate(n->get_long() == 1);
4259   match(ConL);
4260 
4261   op_cost(0);
4262   format %{ %}
4263   interface(CONST_INTER);
4264 %}
4265 
4266 // 64 bit unit decrement
4267 operand immL_M1()
4268 %{
4269   predicate(n->get_long() == -1);
4270   match(ConL);
4271 
4272   op_cost(0);
4273   format %{ %}
4274   interface(CONST_INTER);
4275 %}
4276 
4277 // 32 bit offset of pc in thread anchor
4278 
4279 operand immL_pc_off()
4280 %{
4281   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4282                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4283   match(ConL);
4284 
4285   op_cost(0);
4286   format %{ %}
4287   interface(CONST_INTER);
4288 %}
4289 
4290 // 64 bit integer valid for add sub immediate
4291 operand immLAddSub()
4292 %{
4293   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4294   match(ConL);
4295   op_cost(0);
4296   format %{ %}
4297   interface(CONST_INTER);
4298 %}
4299 
4300 // 64 bit integer valid for logical immediate
4301 operand immLLog()
4302 %{
4303   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4304   match(ConL);
4305   op_cost(0);
4306   format %{ %}
4307   interface(CONST_INTER);
4308 %}
4309 
4310 // Long Immediate: low 32-bit mask
4311 operand immL_32bits()
4312 %{
4313   predicate(n->get_long() == 0xFFFFFFFFL);
4314   match(ConL);
4315   op_cost(0);
4316   format %{ %}
4317   interface(CONST_INTER);
4318 %}
4319 
4320 // Pointer operands
4321 // Pointer Immediate
4322 operand immP()
4323 %{
4324   match(ConP);
4325 
4326   op_cost(0);
4327   format %{ %}
4328   interface(CONST_INTER);
4329 %}
4330 
4331 // NULL Pointer Immediate
4332 operand immP0()
4333 %{
4334   predicate(n->get_ptr() == 0);
4335   match(ConP);
4336 
4337   op_cost(0);
4338   format %{ %}
4339   interface(CONST_INTER);
4340 %}
4341 
4342 // Pointer Immediate One
4343 // this is used in object initialization (initial object header)
4344 operand immP_1()
4345 %{
4346   predicate(n->get_ptr() == 1);
4347   match(ConP);
4348 
4349   op_cost(0);
4350   format %{ %}
4351   interface(CONST_INTER);
4352 %}
4353 
4354 // Polling Page Pointer Immediate
4355 operand immPollPage()
4356 %{
4357   predicate((address)n->get_ptr() == os::get_polling_page());
4358   match(ConP);
4359 
4360   op_cost(0);
4361   format %{ %}
4362   interface(CONST_INTER);
4363 %}
4364 
4365 // Card Table Byte Map Base
4366 operand immByteMapBase()
4367 %{
4368   // Get base of card map
4369   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4370             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4371   match(ConP);
4372 
4373   op_cost(0);
4374   format %{ %}
4375   interface(CONST_INTER);
4376 %}
4377 
4378 // Pointer Immediate Minus One
4379 // this is used when we want to write the current PC to the thread anchor
4380 operand immP_M1()
4381 %{
4382   predicate(n->get_ptr() == -1);
4383   match(ConP);
4384 
4385   op_cost(0);
4386   format %{ %}
4387   interface(CONST_INTER);
4388 %}
4389 
4390 // Pointer Immediate Minus Two
4391 // this is used when we want to write the current PC to the thread anchor
4392 operand immP_M2()
4393 %{
4394   predicate(n->get_ptr() == -2);
4395   match(ConP);
4396 
4397   op_cost(0);
4398   format %{ %}
4399   interface(CONST_INTER);
4400 %}
4401 
4402 // Float and Double operands
4403 // Double Immediate
4404 operand immD()
4405 %{
4406   match(ConD);
4407   op_cost(0);
4408   format %{ %}
4409   interface(CONST_INTER);
4410 %}
4411 
4412 // Double Immediate: +0.0d
4413 operand immD0()
4414 %{
4415   predicate(jlong_cast(n->getd()) == 0);
4416   match(ConD);
4417 
4418   op_cost(0);
4419   format %{ %}
4420   interface(CONST_INTER);
4421 %}
4422 
4423 // constant 'double +0.0'.
4424 operand immDPacked()
4425 %{
4426   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4427   match(ConD);
4428   op_cost(0);
4429   format %{ %}
4430   interface(CONST_INTER);
4431 %}
4432 
4433 // Float Immediate
4434 operand immF()
4435 %{
4436   match(ConF);
4437   op_cost(0);
4438   format %{ %}
4439   interface(CONST_INTER);
4440 %}
4441 
4442 // Float Immediate: +0.0f.
4443 operand immF0()
4444 %{
4445   predicate(jint_cast(n->getf()) == 0);
4446   match(ConF);
4447 
4448   op_cost(0);
4449   format %{ %}
4450   interface(CONST_INTER);
4451 %}
4452 
4453 //
4454 operand immFPacked()
4455 %{
4456   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4457   match(ConF);
4458   op_cost(0);
4459   format %{ %}
4460   interface(CONST_INTER);
4461 %}
4462 
4463 // Narrow pointer operands
4464 // Narrow Pointer Immediate
4465 operand immN()
4466 %{
4467   match(ConN);
4468 
4469   op_cost(0);
4470   format %{ %}
4471   interface(CONST_INTER);
4472 %}
4473 
4474 // Narrow NULL Pointer Immediate
4475 operand immN0()
4476 %{
4477   predicate(n->get_narrowcon() == 0);
4478   match(ConN);
4479 
4480   op_cost(0);
4481   format %{ %}
4482   interface(CONST_INTER);
4483 %}
4484 
4485 operand immNKlass()
4486 %{
4487   match(ConNKlass);
4488 
4489   op_cost(0);
4490   format %{ %}
4491   interface(CONST_INTER);
4492 %}
4493 
4494 // Integer 32 bit Register Operands
4495 // Integer 32 bitRegister (excludes SP)
4496 operand iRegI()
4497 %{
4498   constraint(ALLOC_IN_RC(any_reg32));
4499   match(RegI);
4500   match(iRegINoSp);
4501   op_cost(0);
4502   format %{ %}
4503   interface(REG_INTER);
4504 %}
4505 
4506 // Integer 32 bit Register not Special
4507 operand iRegINoSp()
4508 %{
4509   constraint(ALLOC_IN_RC(no_special_reg32));
4510   match(RegI);
4511   op_cost(0);
4512   format %{ %}
4513   interface(REG_INTER);
4514 %}
4515 
4516 // Integer 64 bit Register Operands
4517 // Integer 64 bit Register (includes SP)
4518 operand iRegL()
4519 %{
4520   constraint(ALLOC_IN_RC(any_reg));
4521   match(RegL);
4522   match(iRegLNoSp);
4523   op_cost(0);
4524   format %{ %}
4525   interface(REG_INTER);
4526 %}
4527 
4528 // Integer 64 bit Register not Special
4529 operand iRegLNoSp()
4530 %{
4531   constraint(ALLOC_IN_RC(no_special_reg));
4532   match(RegL);
4533   match(iRegL_R0);
4534   format %{ %}
4535   interface(REG_INTER);
4536 %}
4537 
4538 // Pointer Register Operands
4539 // Pointer Register
4540 operand iRegP()
4541 %{
4542   constraint(ALLOC_IN_RC(ptr_reg));
4543   match(RegP);
4544   match(iRegPNoSp);
4545   match(iRegP_R0);
4546   //match(iRegP_R2);
4547   //match(iRegP_R4);
4548   //match(iRegP_R5);
4549   match(thread_RegP);
4550   op_cost(0);
4551   format %{ %}
4552   interface(REG_INTER);
4553 %}
4554 
4555 // Pointer 64 bit Register not Special
4556 operand iRegPNoSp()
4557 %{
4558   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4559   match(RegP);
4560   // match(iRegP);
4561   // match(iRegP_R0);
4562   // match(iRegP_R2);
4563   // match(iRegP_R4);
4564   // match(iRegP_R5);
4565   // match(thread_RegP);
4566   op_cost(0);
4567   format %{ %}
4568   interface(REG_INTER);
4569 %}
4570 
4571 // Pointer 64 bit Register R0 only
4572 operand iRegP_R0()
4573 %{
4574   constraint(ALLOC_IN_RC(r0_reg));
4575   match(RegP);
4576   // match(iRegP);
4577   match(iRegPNoSp);
4578   op_cost(0);
4579   format %{ %}
4580   interface(REG_INTER);
4581 %}
4582 
4583 // Pointer 64 bit Register R1 only
4584 operand iRegP_R1()
4585 %{
4586   constraint(ALLOC_IN_RC(r1_reg));
4587   match(RegP);
4588   // match(iRegP);
4589   match(iRegPNoSp);
4590   op_cost(0);
4591   format %{ %}
4592   interface(REG_INTER);
4593 %}
4594 
4595 // Pointer 64 bit Register R2 only
4596 operand iRegP_R2()
4597 %{
4598   constraint(ALLOC_IN_RC(r2_reg));
4599   match(RegP);
4600   // match(iRegP);
4601   match(iRegPNoSp);
4602   op_cost(0);
4603   format %{ %}
4604   interface(REG_INTER);
4605 %}
4606 
4607 // Pointer 64 bit Register R3 only
4608 operand iRegP_R3()
4609 %{
4610   constraint(ALLOC_IN_RC(r3_reg));
4611   match(RegP);
4612   // match(iRegP);
4613   match(iRegPNoSp);
4614   op_cost(0);
4615   format %{ %}
4616   interface(REG_INTER);
4617 %}
4618 
4619 // Pointer 64 bit Register R4 only
4620 operand iRegP_R4()
4621 %{
4622   constraint(ALLOC_IN_RC(r4_reg));
4623   match(RegP);
4624   // match(iRegP);
4625   match(iRegPNoSp);
4626   op_cost(0);
4627   format %{ %}
4628   interface(REG_INTER);
4629 %}
4630 
4631 // Pointer 64 bit Register R5 only
4632 operand iRegP_R5()
4633 %{
4634   constraint(ALLOC_IN_RC(r5_reg));
4635   match(RegP);
4636   // match(iRegP);
4637   match(iRegPNoSp);
4638   op_cost(0);
4639   format %{ %}
4640   interface(REG_INTER);
4641 %}
4642 
4643 // Pointer 64 bit Register R10 only
4644 operand iRegP_R10()
4645 %{
4646   constraint(ALLOC_IN_RC(r10_reg));
4647   match(RegP);
4648   // match(iRegP);
4649   match(iRegPNoSp);
4650   op_cost(0);
4651   format %{ %}
4652   interface(REG_INTER);
4653 %}
4654 
4655 // Long 64 bit Register R0 only
4656 operand iRegL_R0()
4657 %{
4658   constraint(ALLOC_IN_RC(r0_reg));
4659   match(RegL);
4660   match(iRegLNoSp);
4661   op_cost(0);
4662   format %{ %}
4663   interface(REG_INTER);
4664 %}
4665 
4666 // Long 64 bit Register R2 only
4667 operand iRegL_R2()
4668 %{
4669   constraint(ALLOC_IN_RC(r2_reg));
4670   match(RegL);
4671   match(iRegLNoSp);
4672   op_cost(0);
4673   format %{ %}
4674   interface(REG_INTER);
4675 %}
4676 
4677 // Long 64 bit Register R3 only
4678 operand iRegL_R3()
4679 %{
4680   constraint(ALLOC_IN_RC(r3_reg));
4681   match(RegL);
4682   match(iRegLNoSp);
4683   op_cost(0);
4684   format %{ %}
4685   interface(REG_INTER);
4686 %}
4687 
4688 // Long 64 bit Register R11 only
4689 operand iRegL_R11()
4690 %{
4691   constraint(ALLOC_IN_RC(r11_reg));
4692   match(RegL);
4693   match(iRegLNoSp);
4694   op_cost(0);
4695   format %{ %}
4696   interface(REG_INTER);
4697 %}
4698 
4699 // Pointer 64 bit Register FP only
4700 operand iRegP_FP()
4701 %{
4702   constraint(ALLOC_IN_RC(fp_reg));
4703   match(RegP);
4704   // match(iRegP);
4705   op_cost(0);
4706   format %{ %}
4707   interface(REG_INTER);
4708 %}
4709 
4710 // Register R0 only
4711 operand iRegI_R0()
4712 %{
4713   constraint(ALLOC_IN_RC(int_r0_reg));
4714   match(RegI);
4715   match(iRegINoSp);
4716   op_cost(0);
4717   format %{ %}
4718   interface(REG_INTER);
4719 %}
4720 
4721 // Register R2 only
4722 operand iRegI_R2()
4723 %{
4724   constraint(ALLOC_IN_RC(int_r2_reg));
4725   match(RegI);
4726   match(iRegINoSp);
4727   op_cost(0);
4728   format %{ %}
4729   interface(REG_INTER);
4730 %}
4731 
4732 // Register R3 only
4733 operand iRegI_R3()
4734 %{
4735   constraint(ALLOC_IN_RC(int_r3_reg));
4736   match(RegI);
4737   match(iRegINoSp);
4738   op_cost(0);
4739   format %{ %}
4740   interface(REG_INTER);
4741 %}
4742 
4743 
4744 // Register R4 only
4745 operand iRegI_R4()
4746 %{
4747   constraint(ALLOC_IN_RC(int_r4_reg));
4748   match(RegI);
4749   match(iRegINoSp);
4750   op_cost(0);
4751   format %{ %}
4752   interface(REG_INTER);
4753 %}
4754 
4755 
4756 // Pointer Register Operands
4757 // Narrow Pointer Register
4758 operand iRegN()
4759 %{
4760   constraint(ALLOC_IN_RC(any_reg32));
4761   match(RegN);
4762   match(iRegNNoSp);
4763   op_cost(0);
4764   format %{ %}
4765   interface(REG_INTER);
4766 %}
4767 
4768 operand iRegN_R0()
4769 %{
4770   constraint(ALLOC_IN_RC(r0_reg));
4771   match(iRegN);
4772   op_cost(0);
4773   format %{ %}
4774   interface(REG_INTER);
4775 %}
4776 
4777 operand iRegN_R2()
4778 %{
4779   constraint(ALLOC_IN_RC(r2_reg));
4780   match(iRegN);
4781   op_cost(0);
4782   format %{ %}
4783   interface(REG_INTER);
4784 %}
4785 
4786 operand iRegN_R3()
4787 %{
4788   constraint(ALLOC_IN_RC(r3_reg));
4789   match(iRegN);
4790   op_cost(0);
4791   format %{ %}
4792   interface(REG_INTER);
4793 %}
4794 
4795 // Integer 64 bit Register not Special
4796 operand iRegNNoSp()
4797 %{
4798   constraint(ALLOC_IN_RC(no_special_reg32));
4799   match(RegN);
4800   op_cost(0);
4801   format %{ %}
4802   interface(REG_INTER);
4803 %}
4804 
4805 // heap base register -- used for encoding immN0
4806 
4807 operand iRegIHeapbase()
4808 %{
4809   constraint(ALLOC_IN_RC(heapbase_reg));
4810   match(RegI);
4811   op_cost(0);
4812   format %{ %}
4813   interface(REG_INTER);
4814 %}
4815 
4816 // Float Register
4817 // Float register operands
4818 operand vRegF()
4819 %{
4820   constraint(ALLOC_IN_RC(float_reg));
4821   match(RegF);
4822 
4823   op_cost(0);
4824   format %{ %}
4825   interface(REG_INTER);
4826 %}
4827 
4828 // Double Register
4829 // Double register operands
4830 operand vRegD()
4831 %{
4832   constraint(ALLOC_IN_RC(double_reg));
4833   match(RegD);
4834 
4835   op_cost(0);
4836   format %{ %}
4837   interface(REG_INTER);
4838 %}
4839 
4840 operand vecD()
4841 %{
4842   constraint(ALLOC_IN_RC(vectord_reg));
4843   match(VecD);
4844 
4845   op_cost(0);
4846   format %{ %}
4847   interface(REG_INTER);
4848 %}
4849 
4850 operand vecX()
4851 %{
4852   constraint(ALLOC_IN_RC(vectorx_reg));
4853   match(VecX);
4854 
4855   op_cost(0);
4856   format %{ %}
4857   interface(REG_INTER);
4858 %}
4859 
4860 operand vRegD_V0()
4861 %{
4862   constraint(ALLOC_IN_RC(v0_reg));
4863   match(RegD);
4864   op_cost(0);
4865   format %{ %}
4866   interface(REG_INTER);
4867 %}
4868 
4869 operand vRegD_V1()
4870 %{
4871   constraint(ALLOC_IN_RC(v1_reg));
4872   match(RegD);
4873   op_cost(0);
4874   format %{ %}
4875   interface(REG_INTER);
4876 %}
4877 
4878 operand vRegD_V2()
4879 %{
4880   constraint(ALLOC_IN_RC(v2_reg));
4881   match(RegD);
4882   op_cost(0);
4883   format %{ %}
4884   interface(REG_INTER);
4885 %}
4886 
4887 operand vRegD_V3()
4888 %{
4889   constraint(ALLOC_IN_RC(v3_reg));
4890   match(RegD);
4891   op_cost(0);
4892   format %{ %}
4893   interface(REG_INTER);
4894 %}
4895 
4896 // Flags register, used as output of signed compare instructions
4897 
4898 // note that on AArch64 we also use this register as the output for
4899 // for floating point compare instructions (CmpF CmpD). this ensures
4900 // that ordered inequality tests use GT, GE, LT or LE none of which
4901 // pass through cases where the result is unordered i.e. one or both
4902 // inputs to the compare is a NaN. this means that the ideal code can
4903 // replace e.g. a GT with an LE and not end up capturing the NaN case
4904 // (where the comparison should always fail). EQ and NE tests are
4905 // always generated in ideal code so that unordered folds into the NE
4906 // case, matching the behaviour of AArch64 NE.
4907 //
4908 // This differs from x86 where the outputs of FP compares use a
4909 // special FP flags registers and where compares based on this
4910 // register are distinguished into ordered inequalities (cmpOpUCF) and
4911 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4912 // to explicitly handle the unordered case in branches. x86 also has
4913 // to include extra CMoveX rules to accept a cmpOpUCF input.
4914 
4915 operand rFlagsReg()
4916 %{
4917   constraint(ALLOC_IN_RC(int_flags));
4918   match(RegFlags);
4919 
4920   op_cost(0);
4921   format %{ "RFLAGS" %}
4922   interface(REG_INTER);
4923 %}
4924 
4925 // Flags register, used as output of unsigned compare instructions
4926 operand rFlagsRegU()
4927 %{
4928   constraint(ALLOC_IN_RC(int_flags));
4929   match(RegFlags);
4930 
4931   op_cost(0);
4932   format %{ "RFLAGSU" %}
4933   interface(REG_INTER);
4934 %}
4935 
4936 // Special Registers
4937 
4938 // Method Register
4939 operand inline_cache_RegP(iRegP reg)
4940 %{
4941   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4942   match(reg);
4943   match(iRegPNoSp);
4944   op_cost(0);
4945   format %{ %}
4946   interface(REG_INTER);
4947 %}
4948 
4949 operand interpreter_method_oop_RegP(iRegP reg)
4950 %{
4951   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4952   match(reg);
4953   match(iRegPNoSp);
4954   op_cost(0);
4955   format %{ %}
4956   interface(REG_INTER);
4957 %}
4958 
4959 // Thread Register
4960 operand thread_RegP(iRegP reg)
4961 %{
4962   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
4963   match(reg);
4964   op_cost(0);
4965   format %{ %}
4966   interface(REG_INTER);
4967 %}
4968 
4969 operand lr_RegP(iRegP reg)
4970 %{
4971   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
4972   match(reg);
4973   op_cost(0);
4974   format %{ %}
4975   interface(REG_INTER);
4976 %}
4977 
4978 //----------Memory Operands----------------------------------------------------
4979 
4980 operand indirect(iRegP reg)
4981 %{
4982   constraint(ALLOC_IN_RC(ptr_reg));
4983   match(reg);
4984   op_cost(0);
4985   format %{ "[$reg]" %}
4986   interface(MEMORY_INTER) %{
4987     base($reg);
4988     index(0xffffffff);
4989     scale(0x0);
4990     disp(0x0);
4991   %}
4992 %}
4993 
4994 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
4995 %{
4996   constraint(ALLOC_IN_RC(ptr_reg));
4997   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4998   match(AddP reg (LShiftL (ConvI2L ireg) scale));
4999   op_cost(0);
5000   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5001   interface(MEMORY_INTER) %{
5002     base($reg);
5003     index($ireg);
5004     scale($scale);
5005     disp(0x0);
5006   %}
5007 %}
5008 
5009 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5010 %{
5011   constraint(ALLOC_IN_RC(ptr_reg));
5012   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5013   match(AddP reg (LShiftL lreg scale));
5014   op_cost(0);
5015   format %{ "$reg, $lreg lsl($scale)" %}
5016   interface(MEMORY_INTER) %{
5017     base($reg);
5018     index($lreg);
5019     scale($scale);
5020     disp(0x0);
5021   %}
5022 %}
5023 
5024 operand indIndexI2L(iRegP reg, iRegI ireg)
5025 %{
5026   constraint(ALLOC_IN_RC(ptr_reg));
5027   match(AddP reg (ConvI2L ireg));
5028   op_cost(0);
5029   format %{ "$reg, $ireg, 0, I2L" %}
5030   interface(MEMORY_INTER) %{
5031     base($reg);
5032     index($ireg);
5033     scale(0x0);
5034     disp(0x0);
5035   %}
5036 %}
5037 
5038 operand indIndex(iRegP reg, iRegL lreg)
5039 %{
5040   constraint(ALLOC_IN_RC(ptr_reg));
5041   match(AddP reg lreg);
5042   op_cost(0);
5043   format %{ "$reg, $lreg" %}
5044   interface(MEMORY_INTER) %{
5045     base($reg);
5046     index($lreg);
5047     scale(0x0);
5048     disp(0x0);
5049   %}
5050 %}
5051 
5052 operand indOffI(iRegP reg, immIOffset off)
5053 %{
5054   constraint(ALLOC_IN_RC(ptr_reg));
5055   match(AddP reg off);
5056   op_cost(0);
5057   format %{ "[$reg, $off]" %}
5058   interface(MEMORY_INTER) %{
5059     base($reg);
5060     index(0xffffffff);
5061     scale(0x0);
5062     disp($off);
5063   %}
5064 %}
5065 
5066 operand indOffI4(iRegP reg, immIOffset4 off)
5067 %{
5068   constraint(ALLOC_IN_RC(ptr_reg));
5069   match(AddP reg off);
5070   op_cost(0);
5071   format %{ "[$reg, $off]" %}
5072   interface(MEMORY_INTER) %{
5073     base($reg);
5074     index(0xffffffff);
5075     scale(0x0);
5076     disp($off);
5077   %}
5078 %}
5079 
5080 operand indOffI8(iRegP reg, immIOffset8 off)
5081 %{
5082   constraint(ALLOC_IN_RC(ptr_reg));
5083   match(AddP reg off);
5084   op_cost(0);
5085   format %{ "[$reg, $off]" %}
5086   interface(MEMORY_INTER) %{
5087     base($reg);
5088     index(0xffffffff);
5089     scale(0x0);
5090     disp($off);
5091   %}
5092 %}
5093 
5094 operand indOffI16(iRegP reg, immIOffset16 off)
5095 %{
5096   constraint(ALLOC_IN_RC(ptr_reg));
5097   match(AddP reg off);
5098   op_cost(0);
5099   format %{ "[$reg, $off]" %}
5100   interface(MEMORY_INTER) %{
5101     base($reg);
5102     index(0xffffffff);
5103     scale(0x0);
5104     disp($off);
5105   %}
5106 %}
5107 
5108 operand indOffL(iRegP reg, immLoffset off)
5109 %{
5110   constraint(ALLOC_IN_RC(ptr_reg));
5111   match(AddP reg off);
5112   op_cost(0);
5113   format %{ "[$reg, $off]" %}
5114   interface(MEMORY_INTER) %{
5115     base($reg);
5116     index(0xffffffff);
5117     scale(0x0);
5118     disp($off);
5119   %}
5120 %}
5121 
5122 operand indOffL4(iRegP reg, immLoffset4 off)
5123 %{
5124   constraint(ALLOC_IN_RC(ptr_reg));
5125   match(AddP reg off);
5126   op_cost(0);
5127   format %{ "[$reg, $off]" %}
5128   interface(MEMORY_INTER) %{
5129     base($reg);
5130     index(0xffffffff);
5131     scale(0x0);
5132     disp($off);
5133   %}
5134 %}
5135 
5136 operand indOffL8(iRegP reg, immLoffset8 off)
5137 %{
5138   constraint(ALLOC_IN_RC(ptr_reg));
5139   match(AddP reg off);
5140   op_cost(0);
5141   format %{ "[$reg, $off]" %}
5142   interface(MEMORY_INTER) %{
5143     base($reg);
5144     index(0xffffffff);
5145     scale(0x0);
5146     disp($off);
5147   %}
5148 %}
5149 
5150 operand indOffL16(iRegP reg, immLoffset16 off)
5151 %{
5152   constraint(ALLOC_IN_RC(ptr_reg));
5153   match(AddP reg off);
5154   op_cost(0);
5155   format %{ "[$reg, $off]" %}
5156   interface(MEMORY_INTER) %{
5157     base($reg);
5158     index(0xffffffff);
5159     scale(0x0);
5160     disp($off);
5161   %}
5162 %}
5163 
5164 operand indirectN(iRegN reg)
5165 %{
5166   predicate(Universe::narrow_oop_shift() == 0);
5167   constraint(ALLOC_IN_RC(ptr_reg));
5168   match(DecodeN reg);
5169   op_cost(0);
5170   format %{ "[$reg]\t# narrow" %}
5171   interface(MEMORY_INTER) %{
5172     base($reg);
5173     index(0xffffffff);
5174     scale(0x0);
5175     disp(0x0);
5176   %}
5177 %}
5178 
5179 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5180 %{
5181   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5182   constraint(ALLOC_IN_RC(ptr_reg));
5183   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5184   op_cost(0);
5185   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5186   interface(MEMORY_INTER) %{
5187     base($reg);
5188     index($ireg);
5189     scale($scale);
5190     disp(0x0);
5191   %}
5192 %}
5193 
5194 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5195 %{
5196   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5197   constraint(ALLOC_IN_RC(ptr_reg));
5198   match(AddP (DecodeN reg) (LShiftL lreg scale));
5199   op_cost(0);
5200   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5201   interface(MEMORY_INTER) %{
5202     base($reg);
5203     index($lreg);
5204     scale($scale);
5205     disp(0x0);
5206   %}
5207 %}
5208 
5209 operand indIndexI2LN(iRegN reg, iRegI ireg)
5210 %{
5211   predicate(Universe::narrow_oop_shift() == 0);
5212   constraint(ALLOC_IN_RC(ptr_reg));
5213   match(AddP (DecodeN reg) (ConvI2L ireg));
5214   op_cost(0);
5215   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5216   interface(MEMORY_INTER) %{
5217     base($reg);
5218     index($ireg);
5219     scale(0x0);
5220     disp(0x0);
5221   %}
5222 %}
5223 
5224 operand indIndexN(iRegN reg, iRegL lreg)
5225 %{
5226   predicate(Universe::narrow_oop_shift() == 0);
5227   constraint(ALLOC_IN_RC(ptr_reg));
5228   match(AddP (DecodeN reg) lreg);
5229   op_cost(0);
5230   format %{ "$reg, $lreg\t# narrow" %}
5231   interface(MEMORY_INTER) %{
5232     base($reg);
5233     index($lreg);
5234     scale(0x0);
5235     disp(0x0);
5236   %}
5237 %}
5238 
5239 operand indOffIN(iRegN reg, immIOffset off)
5240 %{
5241   predicate(Universe::narrow_oop_shift() == 0);
5242   constraint(ALLOC_IN_RC(ptr_reg));
5243   match(AddP (DecodeN reg) off);
5244   op_cost(0);
5245   format %{ "[$reg, $off]\t# narrow" %}
5246   interface(MEMORY_INTER) %{
5247     base($reg);
5248     index(0xffffffff);
5249     scale(0x0);
5250     disp($off);
5251   %}
5252 %}
5253 
5254 operand indOffLN(iRegN reg, immLoffset off)
5255 %{
5256   predicate(Universe::narrow_oop_shift() == 0);
5257   constraint(ALLOC_IN_RC(ptr_reg));
5258   match(AddP (DecodeN reg) off);
5259   op_cost(0);
5260   format %{ "[$reg, $off]\t# narrow" %}
5261   interface(MEMORY_INTER) %{
5262     base($reg);
5263     index(0xffffffff);
5264     scale(0x0);
5265     disp($off);
5266   %}
5267 %}
5268 
5269 
5270 
5271 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5272 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5273 %{
5274   constraint(ALLOC_IN_RC(ptr_reg));
5275   match(AddP reg off);
5276   op_cost(0);
5277   format %{ "[$reg, $off]" %}
5278   interface(MEMORY_INTER) %{
5279     base($reg);
5280     index(0xffffffff);
5281     scale(0x0);
5282     disp($off);
5283   %}
5284 %}
5285 
5286 //----------Special Memory Operands--------------------------------------------
5287 // Stack Slot Operand - This operand is used for loading and storing temporary
5288 //                      values on the stack where a match requires a value to
5289 //                      flow through memory.
5290 operand stackSlotP(sRegP reg)
5291 %{
5292   constraint(ALLOC_IN_RC(stack_slots));
5293   op_cost(100);
5294   // No match rule because this operand is only generated in matching
5295   // match(RegP);
5296   format %{ "[$reg]" %}
5297   interface(MEMORY_INTER) %{
5298     base(0x1e);  // RSP
5299     index(0x0);  // No Index
5300     scale(0x0);  // No Scale
5301     disp($reg);  // Stack Offset
5302   %}
5303 %}
5304 
5305 operand stackSlotI(sRegI reg)
5306 %{
5307   constraint(ALLOC_IN_RC(stack_slots));
5308   // No match rule because this operand is only generated in matching
5309   // match(RegI);
5310   format %{ "[$reg]" %}
5311   interface(MEMORY_INTER) %{
5312     base(0x1e);  // RSP
5313     index(0x0);  // No Index
5314     scale(0x0);  // No Scale
5315     disp($reg);  // Stack Offset
5316   %}
5317 %}
5318 
5319 operand stackSlotF(sRegF reg)
5320 %{
5321   constraint(ALLOC_IN_RC(stack_slots));
5322   // No match rule because this operand is only generated in matching
5323   // match(RegF);
5324   format %{ "[$reg]" %}
5325   interface(MEMORY_INTER) %{
5326     base(0x1e);  // RSP
5327     index(0x0);  // No Index
5328     scale(0x0);  // No Scale
5329     disp($reg);  // Stack Offset
5330   %}
5331 %}
5332 
5333 operand stackSlotD(sRegD reg)
5334 %{
5335   constraint(ALLOC_IN_RC(stack_slots));
5336   // No match rule because this operand is only generated in matching
5337   // match(RegD);
5338   format %{ "[$reg]" %}
5339   interface(MEMORY_INTER) %{
5340     base(0x1e);  // RSP
5341     index(0x0);  // No Index
5342     scale(0x0);  // No Scale
5343     disp($reg);  // Stack Offset
5344   %}
5345 %}
5346 
5347 operand stackSlotL(sRegL reg)
5348 %{
5349   constraint(ALLOC_IN_RC(stack_slots));
5350   // No match rule because this operand is only generated in matching
5351   // match(RegL);
5352   format %{ "[$reg]" %}
5353   interface(MEMORY_INTER) %{
5354     base(0x1e);  // RSP
5355     index(0x0);  // No Index
5356     scale(0x0);  // No Scale
5357     disp($reg);  // Stack Offset
5358   %}
5359 %}
5360 
5361 // Operands for expressing Control Flow
5362 // NOTE: Label is a predefined operand which should not be redefined in
5363 //       the AD file. It is generically handled within the ADLC.
5364 
5365 //----------Conditional Branch Operands----------------------------------------
5366 // Comparison Op  - This is the operation of the comparison, and is limited to
5367 //                  the following set of codes:
5368 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5369 //
5370 // Other attributes of the comparison, such as unsignedness, are specified
5371 // by the comparison instruction that sets a condition code flags register.
5372 // That result is represented by a flags operand whose subtype is appropriate
5373 // to the unsignedness (etc.) of the comparison.
5374 //
5375 // Later, the instruction which matches both the Comparison Op (a Bool) and
5376 // the flags (produced by the Cmp) specifies the coding of the comparison op
5377 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5378 
5379 // used for signed integral comparisons and fp comparisons
5380 
5381 operand cmpOp()
5382 %{
5383   match(Bool);
5384 
5385   format %{ "" %}
5386   interface(COND_INTER) %{
5387     equal(0x0, "eq");
5388     not_equal(0x1, "ne");
5389     less(0xb, "lt");
5390     greater_equal(0xa, "ge");
5391     less_equal(0xd, "le");
5392     greater(0xc, "gt");
5393     overflow(0x6, "vs");
5394     no_overflow(0x7, "vc");
5395   %}
5396 %}
5397 
5398 // used for unsigned integral comparisons
5399 
5400 operand cmpOpU()
5401 %{
5402   match(Bool);
5403 
5404   format %{ "" %}
5405   interface(COND_INTER) %{
5406     equal(0x0, "eq");
5407     not_equal(0x1, "ne");
5408     less(0x3, "lo");
5409     greater_equal(0x2, "hs");
5410     less_equal(0x9, "ls");
5411     greater(0x8, "hi");
5412     overflow(0x6, "vs");
5413     no_overflow(0x7, "vc");
5414   %}
5415 %}
5416 
5417 // used for certain integral comparisons which can be
5418 // converted to cbxx or tbxx instructions
5419 
5420 operand cmpOpEqNe()
5421 %{
5422   match(Bool);
5423   match(CmpOp);
5424   op_cost(0);
5425   predicate(n->as_Bool()->_test._test == BoolTest::ne
5426             || n->as_Bool()->_test._test == BoolTest::eq);
5427 
5428   format %{ "" %}
5429   interface(COND_INTER) %{
5430     equal(0x0, "eq");
5431     not_equal(0x1, "ne");
5432     less(0xb, "lt");
5433     greater_equal(0xa, "ge");
5434     less_equal(0xd, "le");
5435     greater(0xc, "gt");
5436     overflow(0x6, "vs");
5437     no_overflow(0x7, "vc");
5438   %}
5439 %}
5440 
5441 // used for certain integral comparisons which can be
5442 // converted to cbxx or tbxx instructions
5443 
5444 operand cmpOpLtGe()
5445 %{
5446   match(Bool);
5447   match(CmpOp);
5448   op_cost(0);
5449 
5450   predicate(n->as_Bool()->_test._test == BoolTest::lt
5451             || n->as_Bool()->_test._test == BoolTest::ge);
5452 
5453   format %{ "" %}
5454   interface(COND_INTER) %{
5455     equal(0x0, "eq");
5456     not_equal(0x1, "ne");
5457     less(0xb, "lt");
5458     greater_equal(0xa, "ge");
5459     less_equal(0xd, "le");
5460     greater(0xc, "gt");
5461     overflow(0x6, "vs");
5462     no_overflow(0x7, "vc");
5463   %}
5464 %}
5465 
5466 // used for certain unsigned integral comparisons which can be
5467 // converted to cbxx or tbxx instructions
5468 
5469 operand cmpOpUEqNeLtGe()
5470 %{
5471   match(Bool);
5472   match(CmpOp);
5473   op_cost(0);
5474 
5475   predicate(n->as_Bool()->_test._test == BoolTest::eq
5476             || n->as_Bool()->_test._test == BoolTest::ne
5477             || n->as_Bool()->_test._test == BoolTest::lt
5478             || n->as_Bool()->_test._test == BoolTest::ge);
5479 
5480   format %{ "" %}
5481   interface(COND_INTER) %{
5482     equal(0x0, "eq");
5483     not_equal(0x1, "ne");
5484     less(0xb, "lt");
5485     greater_equal(0xa, "ge");
5486     less_equal(0xd, "le");
5487     greater(0xc, "gt");
5488     overflow(0x6, "vs");
5489     no_overflow(0x7, "vc");
5490   %}
5491 %}
5492 
5493 // Special operand allowing long args to int ops to be truncated for free
5494 
5495 operand iRegL2I(iRegL reg) %{
5496 
5497   op_cost(0);
5498 
5499   match(ConvL2I reg);
5500 
5501   format %{ "l2i($reg)" %}
5502 
5503   interface(REG_INTER)
5504 %}
5505 
5506 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5507 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5508 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5509 
5510 //----------OPERAND CLASSES----------------------------------------------------
5511 // Operand Classes are groups of operands that are used as to simplify
5512 // instruction definitions by not requiring the AD writer to specify
5513 // separate instructions for every form of operand when the
5514 // instruction accepts multiple operand types with the same basic
5515 // encoding and format. The classic case of this is memory operands.
5516 
5517 // memory is used to define read/write location for load/store
5518 // instruction defs. we can turn a memory op into an Address
5519 
5520 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5521                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5522 
5523 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5524 // operations. it allows the src to be either an iRegI or a (ConvL2I
5525 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5526 // can be elided because the 32-bit instruction will just employ the
5527 // lower 32 bits anyway.
5528 //
5529 // n.b. this does not elide all L2I conversions. if the truncated
5530 // value is consumed by more than one operation then the ConvL2I
5531 // cannot be bundled into the consuming nodes so an l2i gets planted
5532 // (actually a movw $dst $src) and the downstream instructions consume
5533 // the result of the l2i as an iRegI input. That's a shame since the
5534 // movw is actually redundant but its not too costly.
5535 
5536 opclass iRegIorL2I(iRegI, iRegL2I);
5537 
5538 //----------PIPELINE-----------------------------------------------------------
5539 // Rules which define the behavior of the target architectures pipeline.
5540 
5541 // For specific pipelines, eg A53, define the stages of that pipeline
5542 //pipe_desc(ISS, EX1, EX2, WR);
5543 #define ISS S0
5544 #define EX1 S1
5545 #define EX2 S2
5546 #define WR  S3
5547 
5548 // Integer ALU reg operation
5549 pipeline %{
5550 
5551 attributes %{
5552   // ARM instructions are of fixed length
5553   fixed_size_instructions;        // Fixed size instructions TODO does
5554   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5555   // ARM instructions come in 32-bit word units
5556   instruction_unit_size = 4;         // An instruction is 4 bytes long
5557   instruction_fetch_unit_size = 64;  // The processor fetches one line
5558   instruction_fetch_units = 1;       // of 64 bytes
5559 
5560   // List of nop instructions
5561   nops( MachNop );
5562 %}
5563 
5564 // We don't use an actual pipeline model so don't care about resources
5565 // or description. we do use pipeline classes to introduce fixed
5566 // latencies
5567 
5568 //----------RESOURCES----------------------------------------------------------
5569 // Resources are the functional units available to the machine
5570 
5571 resources( INS0, INS1, INS01 = INS0 | INS1,
5572            ALU0, ALU1, ALU = ALU0 | ALU1,
5573            MAC,
5574            DIV,
5575            BRANCH,
5576            LDST,
5577            NEON_FP);
5578 
5579 //----------PIPELINE DESCRIPTION-----------------------------------------------
5580 // Pipeline Description specifies the stages in the machine's pipeline
5581 
5582 // Define the pipeline as a generic 6 stage pipeline
5583 pipe_desc(S0, S1, S2, S3, S4, S5);
5584 
5585 //----------PIPELINE CLASSES---------------------------------------------------
5586 // Pipeline Classes describe the stages in which input and output are
5587 // referenced by the hardware pipeline.
5588 
5589 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5590 %{
5591   single_instruction;
5592   src1   : S1(read);
5593   src2   : S2(read);
5594   dst    : S5(write);
5595   INS01  : ISS;
5596   NEON_FP : S5;
5597 %}
5598 
5599 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5600 %{
5601   single_instruction;
5602   src1   : S1(read);
5603   src2   : S2(read);
5604   dst    : S5(write);
5605   INS01  : ISS;
5606   NEON_FP : S5;
5607 %}
5608 
5609 pipe_class fp_uop_s(vRegF dst, vRegF src)
5610 %{
5611   single_instruction;
5612   src    : S1(read);
5613   dst    : S5(write);
5614   INS01  : ISS;
5615   NEON_FP : S5;
5616 %}
5617 
5618 pipe_class fp_uop_d(vRegD dst, vRegD src)
5619 %{
5620   single_instruction;
5621   src    : S1(read);
5622   dst    : S5(write);
5623   INS01  : ISS;
5624   NEON_FP : S5;
5625 %}
5626 
5627 pipe_class fp_d2f(vRegF dst, vRegD src)
5628 %{
5629   single_instruction;
5630   src    : S1(read);
5631   dst    : S5(write);
5632   INS01  : ISS;
5633   NEON_FP : S5;
5634 %}
5635 
5636 pipe_class fp_f2d(vRegD dst, vRegF src)
5637 %{
5638   single_instruction;
5639   src    : S1(read);
5640   dst    : S5(write);
5641   INS01  : ISS;
5642   NEON_FP : S5;
5643 %}
5644 
5645 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5646 %{
5647   single_instruction;
5648   src    : S1(read);
5649   dst    : S5(write);
5650   INS01  : ISS;
5651   NEON_FP : S5;
5652 %}
5653 
5654 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5655 %{
5656   single_instruction;
5657   src    : S1(read);
5658   dst    : S5(write);
5659   INS01  : ISS;
5660   NEON_FP : S5;
5661 %}
5662 
5663 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5664 %{
5665   single_instruction;
5666   src    : S1(read);
5667   dst    : S5(write);
5668   INS01  : ISS;
5669   NEON_FP : S5;
5670 %}
5671 
5672 pipe_class fp_l2f(vRegF dst, iRegL src)
5673 %{
5674   single_instruction;
5675   src    : S1(read);
5676   dst    : S5(write);
5677   INS01  : ISS;
5678   NEON_FP : S5;
5679 %}
5680 
5681 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5682 %{
5683   single_instruction;
5684   src    : S1(read);
5685   dst    : S5(write);
5686   INS01  : ISS;
5687   NEON_FP : S5;
5688 %}
5689 
5690 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5691 %{
5692   single_instruction;
5693   src    : S1(read);
5694   dst    : S5(write);
5695   INS01  : ISS;
5696   NEON_FP : S5;
5697 %}
5698 
5699 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5700 %{
5701   single_instruction;
5702   src    : S1(read);
5703   dst    : S5(write);
5704   INS01  : ISS;
5705   NEON_FP : S5;
5706 %}
5707 
5708 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5709 %{
5710   single_instruction;
5711   src    : S1(read);
5712   dst    : S5(write);
5713   INS01  : ISS;
5714   NEON_FP : S5;
5715 %}
5716 
5717 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5718 %{
5719   single_instruction;
5720   src1   : S1(read);
5721   src2   : S2(read);
5722   dst    : S5(write);
5723   INS0   : ISS;
5724   NEON_FP : S5;
5725 %}
5726 
5727 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5728 %{
5729   single_instruction;
5730   src1   : S1(read);
5731   src2   : S2(read);
5732   dst    : S5(write);
5733   INS0   : ISS;
5734   NEON_FP : S5;
5735 %}
5736 
5737 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5738 %{
5739   single_instruction;
5740   cr     : S1(read);
5741   src1   : S1(read);
5742   src2   : S1(read);
5743   dst    : S3(write);
5744   INS01  : ISS;
5745   NEON_FP : S3;
5746 %}
5747 
5748 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5749 %{
5750   single_instruction;
5751   cr     : S1(read);
5752   src1   : S1(read);
5753   src2   : S1(read);
5754   dst    : S3(write);
5755   INS01  : ISS;
5756   NEON_FP : S3;
5757 %}
5758 
5759 pipe_class fp_imm_s(vRegF dst)
5760 %{
5761   single_instruction;
5762   dst    : S3(write);
5763   INS01  : ISS;
5764   NEON_FP : S3;
5765 %}
5766 
5767 pipe_class fp_imm_d(vRegD dst)
5768 %{
5769   single_instruction;
5770   dst    : S3(write);
5771   INS01  : ISS;
5772   NEON_FP : S3;
5773 %}
5774 
5775 pipe_class fp_load_constant_s(vRegF dst)
5776 %{
5777   single_instruction;
5778   dst    : S4(write);
5779   INS01  : ISS;
5780   NEON_FP : S4;
5781 %}
5782 
5783 pipe_class fp_load_constant_d(vRegD dst)
5784 %{
5785   single_instruction;
5786   dst    : S4(write);
5787   INS01  : ISS;
5788   NEON_FP : S4;
5789 %}
5790 
5791 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
5792 %{
5793   single_instruction;
5794   dst    : S5(write);
5795   src1   : S1(read);
5796   src2   : S1(read);
5797   INS01  : ISS;
5798   NEON_FP : S5;
5799 %}
5800 
5801 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
5802 %{
5803   single_instruction;
5804   dst    : S5(write);
5805   src1   : S1(read);
5806   src2   : S1(read);
5807   INS0   : ISS;
5808   NEON_FP : S5;
5809 %}
5810 
5811 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
5812 %{
5813   single_instruction;
5814   dst    : S5(write);
5815   src1   : S1(read);
5816   src2   : S1(read);
5817   dst    : S1(read);
5818   INS01  : ISS;
5819   NEON_FP : S5;
5820 %}
5821 
5822 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
5823 %{
5824   single_instruction;
5825   dst    : S5(write);
5826   src1   : S1(read);
5827   src2   : S1(read);
5828   dst    : S1(read);
5829   INS0   : ISS;
5830   NEON_FP : S5;
5831 %}
5832 
5833 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
5834 %{
5835   single_instruction;
5836   dst    : S4(write);
5837   src1   : S2(read);
5838   src2   : S2(read);
5839   INS01  : ISS;
5840   NEON_FP : S4;
5841 %}
5842 
5843 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
5844 %{
5845   single_instruction;
5846   dst    : S4(write);
5847   src1   : S2(read);
5848   src2   : S2(read);
5849   INS0   : ISS;
5850   NEON_FP : S4;
5851 %}
5852 
5853 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
5854 %{
5855   single_instruction;
5856   dst    : S3(write);
5857   src1   : S2(read);
5858   src2   : S2(read);
5859   INS01  : ISS;
5860   NEON_FP : S3;
5861 %}
5862 
5863 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
5864 %{
5865   single_instruction;
5866   dst    : S3(write);
5867   src1   : S2(read);
5868   src2   : S2(read);
5869   INS0   : ISS;
5870   NEON_FP : S3;
5871 %}
5872 
5873 pipe_class vshift64(vecD dst, vecD src, vecX shift)
5874 %{
5875   single_instruction;
5876   dst    : S3(write);
5877   src    : S1(read);
5878   shift  : S1(read);
5879   INS01  : ISS;
5880   NEON_FP : S3;
5881 %}
5882 
5883 pipe_class vshift128(vecX dst, vecX src, vecX shift)
5884 %{
5885   single_instruction;
5886   dst    : S3(write);
5887   src    : S1(read);
5888   shift  : S1(read);
5889   INS0   : ISS;
5890   NEON_FP : S3;
5891 %}
5892 
5893 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
5894 %{
5895   single_instruction;
5896   dst    : S3(write);
5897   src    : S1(read);
5898   INS01  : ISS;
5899   NEON_FP : S3;
5900 %}
5901 
5902 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
5903 %{
5904   single_instruction;
5905   dst    : S3(write);
5906   src    : S1(read);
5907   INS0   : ISS;
5908   NEON_FP : S3;
5909 %}
5910 
5911 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
5912 %{
5913   single_instruction;
5914   dst    : S5(write);
5915   src1   : S1(read);
5916   src2   : S1(read);
5917   INS01  : ISS;
5918   NEON_FP : S5;
5919 %}
5920 
5921 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
5922 %{
5923   single_instruction;
5924   dst    : S5(write);
5925   src1   : S1(read);
5926   src2   : S1(read);
5927   INS0   : ISS;
5928   NEON_FP : S5;
5929 %}
5930 
5931 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
5932 %{
5933   single_instruction;
5934   dst    : S5(write);
5935   src1   : S1(read);
5936   src2   : S1(read);
5937   INS0   : ISS;
5938   NEON_FP : S5;
5939 %}
5940 
5941 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
5942 %{
5943   single_instruction;
5944   dst    : S5(write);
5945   src1   : S1(read);
5946   src2   : S1(read);
5947   INS0   : ISS;
5948   NEON_FP : S5;
5949 %}
5950 
5951 pipe_class vsqrt_fp128(vecX dst, vecX src)
5952 %{
5953   single_instruction;
5954   dst    : S5(write);
5955   src    : S1(read);
5956   INS0   : ISS;
5957   NEON_FP : S5;
5958 %}
5959 
5960 pipe_class vunop_fp64(vecD dst, vecD src)
5961 %{
5962   single_instruction;
5963   dst    : S5(write);
5964   src    : S1(read);
5965   INS01  : ISS;
5966   NEON_FP : S5;
5967 %}
5968 
5969 pipe_class vunop_fp128(vecX dst, vecX src)
5970 %{
5971   single_instruction;
5972   dst    : S5(write);
5973   src    : S1(read);
5974   INS0   : ISS;
5975   NEON_FP : S5;
5976 %}
5977 
5978 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
5979 %{
5980   single_instruction;
5981   dst    : S3(write);
5982   src    : S1(read);
5983   INS01  : ISS;
5984   NEON_FP : S3;
5985 %}
5986 
5987 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
5988 %{
5989   single_instruction;
5990   dst    : S3(write);
5991   src    : S1(read);
5992   INS01  : ISS;
5993   NEON_FP : S3;
5994 %}
5995 
5996 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
5997 %{
5998   single_instruction;
5999   dst    : S3(write);
6000   src    : S1(read);
6001   INS01  : ISS;
6002   NEON_FP : S3;
6003 %}
6004 
6005 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
6006 %{
6007   single_instruction;
6008   dst    : S3(write);
6009   src    : S1(read);
6010   INS01  : ISS;
6011   NEON_FP : S3;
6012 %}
6013 
6014 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
6015 %{
6016   single_instruction;
6017   dst    : S3(write);
6018   src    : S1(read);
6019   INS01  : ISS;
6020   NEON_FP : S3;
6021 %}
6022 
6023 pipe_class vmovi_reg_imm64(vecD dst)
6024 %{
6025   single_instruction;
6026   dst    : S3(write);
6027   INS01  : ISS;
6028   NEON_FP : S3;
6029 %}
6030 
6031 pipe_class vmovi_reg_imm128(vecX dst)
6032 %{
6033   single_instruction;
6034   dst    : S3(write);
6035   INS0   : ISS;
6036   NEON_FP : S3;
6037 %}
6038 
6039 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6040 %{
6041   single_instruction;
6042   dst    : S5(write);
6043   mem    : ISS(read);
6044   INS01  : ISS;
6045   NEON_FP : S3;
6046 %}
6047 
6048 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6049 %{
6050   single_instruction;
6051   dst    : S5(write);
6052   mem    : ISS(read);
6053   INS01  : ISS;
6054   NEON_FP : S3;
6055 %}
6056 
6057 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6058 %{
6059   single_instruction;
6060   mem    : ISS(read);
6061   src    : S2(read);
6062   INS01  : ISS;
6063   NEON_FP : S3;
6064 %}
6065 
6066 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6067 %{
6068   single_instruction;
6069   mem    : ISS(read);
6070   src    : S2(read);
6071   INS01  : ISS;
6072   NEON_FP : S3;
6073 %}
6074 
6075 //------- Integer ALU operations --------------------------
6076 
6077 // Integer ALU reg-reg operation
6078 // Operands needed in EX1, result generated in EX2
6079 // Eg.  ADD     x0, x1, x2
6080 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6081 %{
6082   single_instruction;
6083   dst    : EX2(write);
6084   src1   : EX1(read);
6085   src2   : EX1(read);
6086   INS01  : ISS; // Dual issue as instruction 0 or 1
6087   ALU    : EX2;
6088 %}
6089 
6090 // Integer ALU reg-reg operation with constant shift
6091 // Shifted register must be available in LATE_ISS instead of EX1
6092 // Eg.  ADD     x0, x1, x2, LSL #2
6093 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6094 %{
6095   single_instruction;
6096   dst    : EX2(write);
6097   src1   : EX1(read);
6098   src2   : ISS(read);
6099   INS01  : ISS;
6100   ALU    : EX2;
6101 %}
6102 
6103 // Integer ALU reg operation with constant shift
6104 // Eg.  LSL     x0, x1, #shift
6105 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6106 %{
6107   single_instruction;
6108   dst    : EX2(write);
6109   src1   : ISS(read);
6110   INS01  : ISS;
6111   ALU    : EX2;
6112 %}
6113 
6114 // Integer ALU reg-reg operation with variable shift
6115 // Both operands must be available in LATE_ISS instead of EX1
6116 // Result is available in EX1 instead of EX2
6117 // Eg.  LSLV    x0, x1, x2
6118 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6119 %{
6120   single_instruction;
6121   dst    : EX1(write);
6122   src1   : ISS(read);
6123   src2   : ISS(read);
6124   INS01  : ISS;
6125   ALU    : EX1;
6126 %}
6127 
6128 // Integer ALU reg-reg operation with extract
6129 // As for _vshift above, but result generated in EX2
6130 // Eg.  EXTR    x0, x1, x2, #N
6131 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6132 %{
6133   single_instruction;
6134   dst    : EX2(write);
6135   src1   : ISS(read);
6136   src2   : ISS(read);
6137   INS1   : ISS; // Can only dual issue as Instruction 1
6138   ALU    : EX1;
6139 %}
6140 
6141 // Integer ALU reg operation
6142 // Eg.  NEG     x0, x1
6143 pipe_class ialu_reg(iRegI dst, iRegI src)
6144 %{
6145   single_instruction;
6146   dst    : EX2(write);
6147   src    : EX1(read);
6148   INS01  : ISS;
6149   ALU    : EX2;
6150 %}
6151 
6152 // Integer ALU reg mmediate operation
6153 // Eg.  ADD     x0, x1, #N
6154 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6155 %{
6156   single_instruction;
6157   dst    : EX2(write);
6158   src1   : EX1(read);
6159   INS01  : ISS;
6160   ALU    : EX2;
6161 %}
6162 
6163 // Integer ALU immediate operation (no source operands)
6164 // Eg.  MOV     x0, #N
6165 pipe_class ialu_imm(iRegI dst)
6166 %{
6167   single_instruction;
6168   dst    : EX1(write);
6169   INS01  : ISS;
6170   ALU    : EX1;
6171 %}
6172 
6173 //------- Compare operation -------------------------------
6174 
6175 // Compare reg-reg
6176 // Eg.  CMP     x0, x1
6177 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6178 %{
6179   single_instruction;
6180 //  fixed_latency(16);
6181   cr     : EX2(write);
6182   op1    : EX1(read);
6183   op2    : EX1(read);
6184   INS01  : ISS;
6185   ALU    : EX2;
6186 %}
6187 
6188 // Compare reg-reg
6189 // Eg.  CMP     x0, #N
6190 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6191 %{
6192   single_instruction;
6193 //  fixed_latency(16);
6194   cr     : EX2(write);
6195   op1    : EX1(read);
6196   INS01  : ISS;
6197   ALU    : EX2;
6198 %}
6199 
6200 //------- Conditional instructions ------------------------
6201 
6202 // Conditional no operands
6203 // Eg.  CSINC   x0, zr, zr, <cond>
6204 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6205 %{
6206   single_instruction;
6207   cr     : EX1(read);
6208   dst    : EX2(write);
6209   INS01  : ISS;
6210   ALU    : EX2;
6211 %}
6212 
6213 // Conditional 2 operand
6214 // EG.  CSEL    X0, X1, X2, <cond>
6215 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6216 %{
6217   single_instruction;
6218   cr     : EX1(read);
6219   src1   : EX1(read);
6220   src2   : EX1(read);
6221   dst    : EX2(write);
6222   INS01  : ISS;
6223   ALU    : EX2;
6224 %}
6225 
6226 // Conditional 2 operand
6227 // EG.  CSEL    X0, X1, X2, <cond>
6228 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6229 %{
6230   single_instruction;
6231   cr     : EX1(read);
6232   src    : EX1(read);
6233   dst    : EX2(write);
6234   INS01  : ISS;
6235   ALU    : EX2;
6236 %}
6237 
6238 //------- Multiply pipeline operations --------------------
6239 
6240 // Multiply reg-reg
6241 // Eg.  MUL     w0, w1, w2
6242 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6243 %{
6244   single_instruction;
6245   dst    : WR(write);
6246   src1   : ISS(read);
6247   src2   : ISS(read);
6248   INS01  : ISS;
6249   MAC    : WR;
6250 %}
6251 
6252 // Multiply accumulate
6253 // Eg.  MADD    w0, w1, w2, w3
6254 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6255 %{
6256   single_instruction;
6257   dst    : WR(write);
6258   src1   : ISS(read);
6259   src2   : ISS(read);
6260   src3   : ISS(read);
6261   INS01  : ISS;
6262   MAC    : WR;
6263 %}
6264 
6265 // Eg.  MUL     w0, w1, w2
6266 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6267 %{
6268   single_instruction;
6269   fixed_latency(3); // Maximum latency for 64 bit mul
6270   dst    : WR(write);
6271   src1   : ISS(read);
6272   src2   : ISS(read);
6273   INS01  : ISS;
6274   MAC    : WR;
6275 %}
6276 
6277 // Multiply accumulate
6278 // Eg.  MADD    w0, w1, w2, w3
6279 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6280 %{
6281   single_instruction;
6282   fixed_latency(3); // Maximum latency for 64 bit mul
6283   dst    : WR(write);
6284   src1   : ISS(read);
6285   src2   : ISS(read);
6286   src3   : ISS(read);
6287   INS01  : ISS;
6288   MAC    : WR;
6289 %}
6290 
6291 //------- Divide pipeline operations --------------------
6292 
6293 // Eg.  SDIV    w0, w1, w2
6294 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6295 %{
6296   single_instruction;
6297   fixed_latency(8); // Maximum latency for 32 bit divide
6298   dst    : WR(write);
6299   src1   : ISS(read);
6300   src2   : ISS(read);
6301   INS0   : ISS; // Can only dual issue as instruction 0
6302   DIV    : WR;
6303 %}
6304 
6305 // Eg.  SDIV    x0, x1, x2
6306 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6307 %{
6308   single_instruction;
6309   fixed_latency(16); // Maximum latency for 64 bit divide
6310   dst    : WR(write);
6311   src1   : ISS(read);
6312   src2   : ISS(read);
6313   INS0   : ISS; // Can only dual issue as instruction 0
6314   DIV    : WR;
6315 %}
6316 
6317 //------- Load pipeline operations ------------------------
6318 
6319 // Load - prefetch
6320 // Eg.  PFRM    <mem>
6321 pipe_class iload_prefetch(memory mem)
6322 %{
6323   single_instruction;
6324   mem    : ISS(read);
6325   INS01  : ISS;
6326   LDST   : WR;
6327 %}
6328 
6329 // Load - reg, mem
6330 // Eg.  LDR     x0, <mem>
6331 pipe_class iload_reg_mem(iRegI dst, memory mem)
6332 %{
6333   single_instruction;
6334   dst    : WR(write);
6335   mem    : ISS(read);
6336   INS01  : ISS;
6337   LDST   : WR;
6338 %}
6339 
6340 // Load - reg, reg
6341 // Eg.  LDR     x0, [sp, x1]
6342 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6343 %{
6344   single_instruction;
6345   dst    : WR(write);
6346   src    : ISS(read);
6347   INS01  : ISS;
6348   LDST   : WR;
6349 %}
6350 
6351 //------- Store pipeline operations -----------------------
6352 
6353 // Store - zr, mem
6354 // Eg.  STR     zr, <mem>
6355 pipe_class istore_mem(memory mem)
6356 %{
6357   single_instruction;
6358   mem    : ISS(read);
6359   INS01  : ISS;
6360   LDST   : WR;
6361 %}
6362 
6363 // Store - reg, mem
6364 // Eg.  STR     x0, <mem>
6365 pipe_class istore_reg_mem(iRegI src, memory mem)
6366 %{
6367   single_instruction;
6368   mem    : ISS(read);
6369   src    : EX2(read);
6370   INS01  : ISS;
6371   LDST   : WR;
6372 %}
6373 
6374 // Store - reg, reg
6375 // Eg. STR      x0, [sp, x1]
6376 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6377 %{
6378   single_instruction;
6379   dst    : ISS(read);
6380   src    : EX2(read);
6381   INS01  : ISS;
6382   LDST   : WR;
6383 %}
6384 
6385 //------- Store pipeline operations -----------------------
6386 
6387 // Branch
6388 pipe_class pipe_branch()
6389 %{
6390   single_instruction;
6391   INS01  : ISS;
6392   BRANCH : EX1;
6393 %}
6394 
6395 // Conditional branch
6396 pipe_class pipe_branch_cond(rFlagsReg cr)
6397 %{
6398   single_instruction;
6399   cr     : EX1(read);
6400   INS01  : ISS;
6401   BRANCH : EX1;
6402 %}
6403 
6404 // Compare & Branch
6405 // EG.  CBZ/CBNZ
6406 pipe_class pipe_cmp_branch(iRegI op1)
6407 %{
6408   single_instruction;
6409   op1    : EX1(read);
6410   INS01  : ISS;
6411   BRANCH : EX1;
6412 %}
6413 
6414 //------- Synchronisation operations ----------------------
6415 
6416 // Any operation requiring serialization.
6417 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6418 pipe_class pipe_serial()
6419 %{
6420   single_instruction;
6421   force_serialization;
6422   fixed_latency(16);
6423   INS01  : ISS(2); // Cannot dual issue with any other instruction
6424   LDST   : WR;
6425 %}
6426 
6427 // Generic big/slow expanded idiom - also serialized
6428 pipe_class pipe_slow()
6429 %{
6430   instruction_count(10);
6431   multiple_bundles;
6432   force_serialization;
6433   fixed_latency(16);
6434   INS01  : ISS(2); // Cannot dual issue with any other instruction
6435   LDST   : WR;
6436 %}
6437 
6438 // Empty pipeline class
6439 pipe_class pipe_class_empty()
6440 %{
6441   single_instruction;
6442   fixed_latency(0);
6443 %}
6444 
6445 // Default pipeline class.
6446 pipe_class pipe_class_default()
6447 %{
6448   single_instruction;
6449   fixed_latency(2);
6450 %}
6451 
6452 // Pipeline class for compares.
6453 pipe_class pipe_class_compare()
6454 %{
6455   single_instruction;
6456   fixed_latency(16);
6457 %}
6458 
6459 // Pipeline class for memory operations.
6460 pipe_class pipe_class_memory()
6461 %{
6462   single_instruction;
6463   fixed_latency(16);
6464 %}
6465 
6466 // Pipeline class for call.
6467 pipe_class pipe_class_call()
6468 %{
6469   single_instruction;
6470   fixed_latency(100);
6471 %}
6472 
6473 // Define the class for the Nop node.
6474 define %{
6475    MachNop = pipe_class_empty;
6476 %}
6477 
6478 %}
6479 //----------INSTRUCTIONS-------------------------------------------------------
6480 //
6481 // match      -- States which machine-independent subtree may be replaced
6482 //               by this instruction.
6483 // ins_cost   -- The estimated cost of this instruction is used by instruction
6484 //               selection to identify a minimum cost tree of machine
6485 //               instructions that matches a tree of machine-independent
6486 //               instructions.
6487 // format     -- A string providing the disassembly for this instruction.
6488 //               The value of an instruction's operand may be inserted
6489 //               by referring to it with a '$' prefix.
6490 // opcode     -- Three instruction opcodes may be provided.  These are referred
6491 //               to within an encode class as $primary, $secondary, and $tertiary
6492 //               rrspectively.  The primary opcode is commonly used to
6493 //               indicate the type of machine instruction, while secondary
6494 //               and tertiary are often used for prefix options or addressing
6495 //               modes.
6496 // ins_encode -- A list of encode classes with parameters. The encode class
6497 //               name must have been defined in an 'enc_class' specification
6498 //               in the encode section of the architecture description.
6499 
6500 // ============================================================================
6501 // Memory (Load/Store) Instructions
6502 
6503 // Load Instructions
6504 
6505 // Load Byte (8 bit signed)
6506 instruct loadB(iRegINoSp dst, memory mem)
6507 %{
6508   match(Set dst (LoadB mem));
6509   predicate(!needs_acquiring_load(n));
6510 
6511   ins_cost(4 * INSN_COST);
6512   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6513 
6514   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6515 
6516   ins_pipe(iload_reg_mem);
6517 %}
6518 
6519 // Load Byte (8 bit signed) into long
6520 instruct loadB2L(iRegLNoSp dst, memory mem)
6521 %{
6522   match(Set dst (ConvI2L (LoadB mem)));
6523   predicate(!needs_acquiring_load(n->in(1)));
6524 
6525   ins_cost(4 * INSN_COST);
6526   format %{ "ldrsb  $dst, $mem\t# byte" %}
6527 
6528   ins_encode(aarch64_enc_ldrsb(dst, mem));
6529 
6530   ins_pipe(iload_reg_mem);
6531 %}
6532 
6533 // Load Byte (8 bit unsigned)
6534 instruct loadUB(iRegINoSp dst, memory mem)
6535 %{
6536   match(Set dst (LoadUB mem));
6537   predicate(!needs_acquiring_load(n));
6538 
6539   ins_cost(4 * INSN_COST);
6540   format %{ "ldrbw  $dst, $mem\t# byte" %}
6541 
6542   ins_encode(aarch64_enc_ldrb(dst, mem));
6543 
6544   ins_pipe(iload_reg_mem);
6545 %}
6546 
6547 // Load Byte (8 bit unsigned) into long
6548 instruct loadUB2L(iRegLNoSp dst, memory mem)
6549 %{
6550   match(Set dst (ConvI2L (LoadUB mem)));
6551   predicate(!needs_acquiring_load(n->in(1)));
6552 
6553   ins_cost(4 * INSN_COST);
6554   format %{ "ldrb  $dst, $mem\t# byte" %}
6555 
6556   ins_encode(aarch64_enc_ldrb(dst, mem));
6557 
6558   ins_pipe(iload_reg_mem);
6559 %}
6560 
6561 // Load Short (16 bit signed)
6562 instruct loadS(iRegINoSp dst, memory mem)
6563 %{
6564   match(Set dst (LoadS mem));
6565   predicate(!needs_acquiring_load(n));
6566 
6567   ins_cost(4 * INSN_COST);
6568   format %{ "ldrshw  $dst, $mem\t# short" %}
6569 
6570   ins_encode(aarch64_enc_ldrshw(dst, mem));
6571 
6572   ins_pipe(iload_reg_mem);
6573 %}
6574 
6575 // Load Short (16 bit signed) into long
6576 instruct loadS2L(iRegLNoSp dst, memory mem)
6577 %{
6578   match(Set dst (ConvI2L (LoadS mem)));
6579   predicate(!needs_acquiring_load(n->in(1)));
6580 
6581   ins_cost(4 * INSN_COST);
6582   format %{ "ldrsh  $dst, $mem\t# short" %}
6583 
6584   ins_encode(aarch64_enc_ldrsh(dst, mem));
6585 
6586   ins_pipe(iload_reg_mem);
6587 %}
6588 
6589 // Load Char (16 bit unsigned)
6590 instruct loadUS(iRegINoSp dst, memory mem)
6591 %{
6592   match(Set dst (LoadUS mem));
6593   predicate(!needs_acquiring_load(n));
6594 
6595   ins_cost(4 * INSN_COST);
6596   format %{ "ldrh  $dst, $mem\t# short" %}
6597 
6598   ins_encode(aarch64_enc_ldrh(dst, mem));
6599 
6600   ins_pipe(iload_reg_mem);
6601 %}
6602 
6603 // Load Short/Char (16 bit unsigned) into long
6604 instruct loadUS2L(iRegLNoSp dst, memory mem)
6605 %{
6606   match(Set dst (ConvI2L (LoadUS mem)));
6607   predicate(!needs_acquiring_load(n->in(1)));
6608 
6609   ins_cost(4 * INSN_COST);
6610   format %{ "ldrh  $dst, $mem\t# short" %}
6611 
6612   ins_encode(aarch64_enc_ldrh(dst, mem));
6613 
6614   ins_pipe(iload_reg_mem);
6615 %}
6616 
6617 // Load Integer (32 bit signed)
6618 instruct loadI(iRegINoSp dst, memory mem)
6619 %{
6620   match(Set dst (LoadI mem));
6621   predicate(!needs_acquiring_load(n));
6622 
6623   ins_cost(4 * INSN_COST);
6624   format %{ "ldrw  $dst, $mem\t# int" %}
6625 
6626   ins_encode(aarch64_enc_ldrw(dst, mem));
6627 
6628   ins_pipe(iload_reg_mem);
6629 %}
6630 
6631 // Load Integer (32 bit signed) into long
6632 instruct loadI2L(iRegLNoSp dst, memory mem)
6633 %{
6634   match(Set dst (ConvI2L (LoadI mem)));
6635   predicate(!needs_acquiring_load(n->in(1)));
6636 
6637   ins_cost(4 * INSN_COST);
6638   format %{ "ldrsw  $dst, $mem\t# int" %}
6639 
6640   ins_encode(aarch64_enc_ldrsw(dst, mem));
6641 
6642   ins_pipe(iload_reg_mem);
6643 %}
6644 
6645 // Load Integer (32 bit unsigned) into long
6646 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6647 %{
6648   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6649   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6650 
6651   ins_cost(4 * INSN_COST);
6652   format %{ "ldrw  $dst, $mem\t# int" %}
6653 
6654   ins_encode(aarch64_enc_ldrw(dst, mem));
6655 
6656   ins_pipe(iload_reg_mem);
6657 %}
6658 
6659 // Load Long (64 bit signed)
6660 instruct loadL(iRegLNoSp dst, memory mem)
6661 %{
6662   match(Set dst (LoadL mem));
6663   predicate(!needs_acquiring_load(n));
6664 
6665   ins_cost(4 * INSN_COST);
6666   format %{ "ldr  $dst, $mem\t# int" %}
6667 
6668   ins_encode(aarch64_enc_ldr(dst, mem));
6669 
6670   ins_pipe(iload_reg_mem);
6671 %}
6672 
6673 // Load Range
6674 instruct loadRange(iRegINoSp dst, memory mem)
6675 %{
6676   match(Set dst (LoadRange mem));
6677 
6678   ins_cost(4 * INSN_COST);
6679   format %{ "ldrw  $dst, $mem\t# range" %}
6680 
6681   ins_encode(aarch64_enc_ldrw(dst, mem));
6682 
6683   ins_pipe(iload_reg_mem);
6684 %}
6685 
6686 // Load Pointer
6687 instruct loadP(iRegPNoSp dst, memory mem)
6688 %{
6689   match(Set dst (LoadP mem));
6690   predicate(!needs_acquiring_load(n));
6691 
6692   ins_cost(4 * INSN_COST);
6693   format %{ "ldr  $dst, $mem\t# ptr" %}
6694 
6695   ins_encode(aarch64_enc_ldr(dst, mem));
6696 
6697   ins_pipe(iload_reg_mem);
6698 %}
6699 
6700 // Load Compressed Pointer
6701 instruct loadN(iRegNNoSp dst, memory mem)
6702 %{
6703   match(Set dst (LoadN mem));
6704   predicate(!needs_acquiring_load(n));
6705 
6706   ins_cost(4 * INSN_COST);
6707   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6708 
6709   ins_encode(aarch64_enc_ldrw(dst, mem));
6710 
6711   ins_pipe(iload_reg_mem);
6712 %}
6713 
6714 // Load Klass Pointer
6715 instruct loadKlass(iRegPNoSp dst, memory mem)
6716 %{
6717   match(Set dst (LoadKlass mem));
6718   predicate(!needs_acquiring_load(n));
6719 
6720   ins_cost(4 * INSN_COST);
6721   format %{ "ldr  $dst, $mem\t# class" %}
6722 
6723   ins_encode(aarch64_enc_ldr(dst, mem));
6724 
6725   ins_pipe(iload_reg_mem);
6726 %}
6727 
6728 // Load Narrow Klass Pointer
6729 instruct loadNKlass(iRegNNoSp dst, memory mem)
6730 %{
6731   match(Set dst (LoadNKlass mem));
6732   predicate(!needs_acquiring_load(n));
6733 
6734   ins_cost(4 * INSN_COST);
6735   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6736 
6737   ins_encode(aarch64_enc_ldrw(dst, mem));
6738 
6739   ins_pipe(iload_reg_mem);
6740 %}
6741 
6742 // Load Float
6743 instruct loadF(vRegF dst, memory mem)
6744 %{
6745   match(Set dst (LoadF mem));
6746   predicate(!needs_acquiring_load(n));
6747 
6748   ins_cost(4 * INSN_COST);
6749   format %{ "ldrs  $dst, $mem\t# float" %}
6750 
6751   ins_encode( aarch64_enc_ldrs(dst, mem) );
6752 
6753   ins_pipe(pipe_class_memory);
6754 %}
6755 
6756 // Load Double
6757 instruct loadD(vRegD dst, memory mem)
6758 %{
6759   match(Set dst (LoadD mem));
6760   predicate(!needs_acquiring_load(n));
6761 
6762   ins_cost(4 * INSN_COST);
6763   format %{ "ldrd  $dst, $mem\t# double" %}
6764 
6765   ins_encode( aarch64_enc_ldrd(dst, mem) );
6766 
6767   ins_pipe(pipe_class_memory);
6768 %}
6769 
6770 
6771 // Load Int Constant
6772 instruct loadConI(iRegINoSp dst, immI src)
6773 %{
6774   match(Set dst src);
6775 
6776   ins_cost(INSN_COST);
6777   format %{ "mov $dst, $src\t# int" %}
6778 
6779   ins_encode( aarch64_enc_movw_imm(dst, src) );
6780 
6781   ins_pipe(ialu_imm);
6782 %}
6783 
6784 // Load Long Constant
6785 instruct loadConL(iRegLNoSp dst, immL src)
6786 %{
6787   match(Set dst src);
6788 
6789   ins_cost(INSN_COST);
6790   format %{ "mov $dst, $src\t# long" %}
6791 
6792   ins_encode( aarch64_enc_mov_imm(dst, src) );
6793 
6794   ins_pipe(ialu_imm);
6795 %}
6796 
6797 // Load Pointer Constant
6798 
6799 instruct loadConP(iRegPNoSp dst, immP con)
6800 %{
6801   match(Set dst con);
6802 
6803   ins_cost(INSN_COST * 4);
6804   format %{
6805     "mov  $dst, $con\t# ptr\n\t"
6806   %}
6807 
6808   ins_encode(aarch64_enc_mov_p(dst, con));
6809 
6810   ins_pipe(ialu_imm);
6811 %}
6812 
6813 // Load Null Pointer Constant
6814 
6815 instruct loadConP0(iRegPNoSp dst, immP0 con)
6816 %{
6817   match(Set dst con);
6818 
6819   ins_cost(INSN_COST);
6820   format %{ "mov  $dst, $con\t# NULL ptr" %}
6821 
6822   ins_encode(aarch64_enc_mov_p0(dst, con));
6823 
6824   ins_pipe(ialu_imm);
6825 %}
6826 
6827 // Load Pointer Constant One
6828 
6829 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6830 %{
6831   match(Set dst con);
6832 
6833   ins_cost(INSN_COST);
6834   format %{ "mov  $dst, $con\t# NULL ptr" %}
6835 
6836   ins_encode(aarch64_enc_mov_p1(dst, con));
6837 
6838   ins_pipe(ialu_imm);
6839 %}
6840 
6841 // Load Poll Page Constant
6842 
6843 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6844 %{
6845   match(Set dst con);
6846 
6847   ins_cost(INSN_COST);
6848   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6849 
6850   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6851 
6852   ins_pipe(ialu_imm);
6853 %}
6854 
6855 // Load Byte Map Base Constant
6856 
6857 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6858 %{
6859   match(Set dst con);
6860 
6861   ins_cost(INSN_COST);
6862   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6863 
6864   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6865 
6866   ins_pipe(ialu_imm);
6867 %}
6868 
6869 // Load Narrow Pointer Constant
6870 
6871 instruct loadConN(iRegNNoSp dst, immN con)
6872 %{
6873   match(Set dst con);
6874 
6875   ins_cost(INSN_COST * 4);
6876   format %{ "mov  $dst, $con\t# compressed ptr" %}
6877 
6878   ins_encode(aarch64_enc_mov_n(dst, con));
6879 
6880   ins_pipe(ialu_imm);
6881 %}
6882 
6883 // Load Narrow Null Pointer Constant
6884 
6885 instruct loadConN0(iRegNNoSp dst, immN0 con)
6886 %{
6887   match(Set dst con);
6888 
6889   ins_cost(INSN_COST);
6890   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6891 
6892   ins_encode(aarch64_enc_mov_n0(dst, con));
6893 
6894   ins_pipe(ialu_imm);
6895 %}
6896 
6897 // Load Narrow Klass Constant
6898 
6899 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6900 %{
6901   match(Set dst con);
6902 
6903   ins_cost(INSN_COST);
6904   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6905 
6906   ins_encode(aarch64_enc_mov_nk(dst, con));
6907 
6908   ins_pipe(ialu_imm);
6909 %}
6910 
6911 // Load Packed Float Constant
6912 
6913 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6914   match(Set dst con);
6915   ins_cost(INSN_COST * 4);
6916   format %{ "fmovs  $dst, $con"%}
6917   ins_encode %{
6918     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6919   %}
6920 
6921   ins_pipe(fp_imm_s);
6922 %}
6923 
6924 // Load Float Constant
6925 
6926 instruct loadConF(vRegF dst, immF con) %{
6927   match(Set dst con);
6928 
6929   ins_cost(INSN_COST * 4);
6930 
6931   format %{
6932     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6933   %}
6934 
6935   ins_encode %{
6936     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6937   %}
6938 
6939   ins_pipe(fp_load_constant_s);
6940 %}
6941 
6942 // Load Packed Double Constant
6943 
6944 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6945   match(Set dst con);
6946   ins_cost(INSN_COST);
6947   format %{ "fmovd  $dst, $con"%}
6948   ins_encode %{
6949     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6950   %}
6951 
6952   ins_pipe(fp_imm_d);
6953 %}
6954 
6955 // Load Double Constant
6956 
6957 instruct loadConD(vRegD dst, immD con) %{
6958   match(Set dst con);
6959 
6960   ins_cost(INSN_COST * 5);
6961   format %{
6962     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6963   %}
6964 
6965   ins_encode %{
6966     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6967   %}
6968 
6969   ins_pipe(fp_load_constant_d);
6970 %}
6971 
6972 // Store Instructions
6973 
6974 // Store CMS card-mark Immediate
6975 instruct storeimmCM0(immI0 zero, memory mem)
6976 %{
6977   match(Set mem (StoreCM mem zero));
6978   predicate(unnecessary_storestore(n));
6979 
6980   ins_cost(INSN_COST);
6981   format %{ "storestore (elided)\n\t"
6982             "strb zr, $mem\t# byte" %}
6983 
6984   ins_encode(aarch64_enc_strb0(mem));
6985 
6986   ins_pipe(istore_mem);
6987 %}
6988 
6989 // Store CMS card-mark Immediate with intervening StoreStore
6990 // needed when using CMS with no conditional card marking
6991 instruct storeimmCM0_ordered(immI0 zero, memory mem)
6992 %{
6993   match(Set mem (StoreCM mem zero));
6994 
6995   ins_cost(INSN_COST * 2);
6996   format %{ "storestore\n\t"
6997             "dmb ishst"
6998             "\n\tstrb zr, $mem\t# byte" %}
6999 
7000   ins_encode(aarch64_enc_strb0_ordered(mem));
7001 
7002   ins_pipe(istore_mem);
7003 %}
7004 
7005 // Store Byte
7006 instruct storeB(iRegIorL2I src, memory mem)
7007 %{
7008   match(Set mem (StoreB mem src));
7009   predicate(!needs_releasing_store(n));
7010 
7011   ins_cost(INSN_COST);
7012   format %{ "strb  $src, $mem\t# byte" %}
7013 
7014   ins_encode(aarch64_enc_strb(src, mem));
7015 
7016   ins_pipe(istore_reg_mem);
7017 %}
7018 
7019 
7020 instruct storeimmB0(immI0 zero, memory mem)
7021 %{
7022   match(Set mem (StoreB mem zero));
7023   predicate(!needs_releasing_store(n));
7024 
7025   ins_cost(INSN_COST);
7026   format %{ "strb rscractch2, $mem\t# byte" %}
7027 
7028   ins_encode(aarch64_enc_strb0(mem));
7029 
7030   ins_pipe(istore_mem);
7031 %}
7032 
7033 // Store Char/Short
7034 instruct storeC(iRegIorL2I src, memory mem)
7035 %{
7036   match(Set mem (StoreC mem src));
7037   predicate(!needs_releasing_store(n));
7038 
7039   ins_cost(INSN_COST);
7040   format %{ "strh  $src, $mem\t# short" %}
7041 
7042   ins_encode(aarch64_enc_strh(src, mem));
7043 
7044   ins_pipe(istore_reg_mem);
7045 %}
7046 
7047 instruct storeimmC0(immI0 zero, memory mem)
7048 %{
7049   match(Set mem (StoreC mem zero));
7050   predicate(!needs_releasing_store(n));
7051 
7052   ins_cost(INSN_COST);
7053   format %{ "strh  zr, $mem\t# short" %}
7054 
7055   ins_encode(aarch64_enc_strh0(mem));
7056 
7057   ins_pipe(istore_mem);
7058 %}
7059 
7060 // Store Integer
7061 
7062 instruct storeI(iRegIorL2I src, memory mem)
7063 %{
7064   match(Set mem(StoreI mem src));
7065   predicate(!needs_releasing_store(n));
7066 
7067   ins_cost(INSN_COST);
7068   format %{ "strw  $src, $mem\t# int" %}
7069 
7070   ins_encode(aarch64_enc_strw(src, mem));
7071 
7072   ins_pipe(istore_reg_mem);
7073 %}
7074 
7075 instruct storeimmI0(immI0 zero, memory mem)
7076 %{
7077   match(Set mem(StoreI mem zero));
7078   predicate(!needs_releasing_store(n));
7079 
7080   ins_cost(INSN_COST);
7081   format %{ "strw  zr, $mem\t# int" %}
7082 
7083   ins_encode(aarch64_enc_strw0(mem));
7084 
7085   ins_pipe(istore_mem);
7086 %}
7087 
7088 // Store Long (64 bit signed)
7089 instruct storeL(iRegL src, memory mem)
7090 %{
7091   match(Set mem (StoreL mem src));
7092   predicate(!needs_releasing_store(n));
7093 
7094   ins_cost(INSN_COST);
7095   format %{ "str  $src, $mem\t# int" %}
7096 
7097   ins_encode(aarch64_enc_str(src, mem));
7098 
7099   ins_pipe(istore_reg_mem);
7100 %}
7101 
7102 // Store Long (64 bit signed)
7103 instruct storeimmL0(immL0 zero, memory mem)
7104 %{
7105   match(Set mem (StoreL mem zero));
7106   predicate(!needs_releasing_store(n));
7107 
7108   ins_cost(INSN_COST);
7109   format %{ "str  zr, $mem\t# int" %}
7110 
7111   ins_encode(aarch64_enc_str0(mem));
7112 
7113   ins_pipe(istore_mem);
7114 %}
7115 
7116 // Store Pointer
7117 instruct storeP(iRegP src, memory mem)
7118 %{
7119   match(Set mem (StoreP mem src));
7120   predicate(!needs_releasing_store(n));
7121 
7122   ins_cost(INSN_COST);
7123   format %{ "str  $src, $mem\t# ptr" %}
7124 
7125   ins_encode(aarch64_enc_str(src, mem));
7126 
7127   ins_pipe(istore_reg_mem);
7128 %}
7129 
7130 // Store Pointer
7131 instruct storeimmP0(immP0 zero, memory mem)
7132 %{
7133   match(Set mem (StoreP mem zero));
7134   predicate(!needs_releasing_store(n));
7135 
7136   ins_cost(INSN_COST);
7137   format %{ "str zr, $mem\t# ptr" %}
7138 
7139   ins_encode(aarch64_enc_str0(mem));
7140 
7141   ins_pipe(istore_mem);
7142 %}
7143 
7144 // Store Compressed Pointer
7145 instruct storeN(iRegN src, memory mem)
7146 %{
7147   match(Set mem (StoreN mem src));
7148   predicate(!needs_releasing_store(n));
7149 
7150   ins_cost(INSN_COST);
7151   format %{ "strw  $src, $mem\t# compressed ptr" %}
7152 
7153   ins_encode(aarch64_enc_strw(src, mem));
7154 
7155   ins_pipe(istore_reg_mem);
7156 %}
7157 
7158 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7159 %{
7160   match(Set mem (StoreN mem zero));
7161   predicate(Universe::narrow_oop_base() == NULL &&
7162             Universe::narrow_klass_base() == NULL &&
7163             (!needs_releasing_store(n)));
7164 
7165   ins_cost(INSN_COST);
7166   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7167 
7168   ins_encode(aarch64_enc_strw(heapbase, mem));
7169 
7170   ins_pipe(istore_reg_mem);
7171 %}
7172 
7173 // Store Float
7174 instruct storeF(vRegF src, memory mem)
7175 %{
7176   match(Set mem (StoreF mem src));
7177   predicate(!needs_releasing_store(n));
7178 
7179   ins_cost(INSN_COST);
7180   format %{ "strs  $src, $mem\t# float" %}
7181 
7182   ins_encode( aarch64_enc_strs(src, mem) );
7183 
7184   ins_pipe(pipe_class_memory);
7185 %}
7186 
7187 // TODO
7188 // implement storeImmF0 and storeFImmPacked
7189 
7190 // Store Double
7191 instruct storeD(vRegD src, memory mem)
7192 %{
7193   match(Set mem (StoreD mem src));
7194   predicate(!needs_releasing_store(n));
7195 
7196   ins_cost(INSN_COST);
7197   format %{ "strd  $src, $mem\t# double" %}
7198 
7199   ins_encode( aarch64_enc_strd(src, mem) );
7200 
7201   ins_pipe(pipe_class_memory);
7202 %}
7203 
7204 // Store Compressed Klass Pointer
7205 instruct storeNKlass(iRegN src, memory mem)
7206 %{
7207   predicate(!needs_releasing_store(n));
7208   match(Set mem (StoreNKlass mem src));
7209 
7210   ins_cost(INSN_COST);
7211   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7212 
7213   ins_encode(aarch64_enc_strw(src, mem));
7214 
7215   ins_pipe(istore_reg_mem);
7216 %}
7217 
7218 // TODO
7219 // implement storeImmD0 and storeDImmPacked
7220 
7221 // prefetch instructions
7222 // Must be safe to execute with invalid address (cannot fault).
7223 
7224 instruct prefetchalloc( memory mem ) %{
7225   match(PrefetchAllocation mem);
7226 
7227   ins_cost(INSN_COST);
7228   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7229 
7230   ins_encode( aarch64_enc_prefetchw(mem) );
7231 
7232   ins_pipe(iload_prefetch);
7233 %}
7234 
7235 //  ---------------- volatile loads and stores ----------------
7236 
7237 // Load Byte (8 bit signed)
7238 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7239 %{
7240   match(Set dst (LoadB mem));
7241 
7242   ins_cost(VOLATILE_REF_COST);
7243   format %{ "ldarsb  $dst, $mem\t# byte" %}
7244 
7245   ins_encode(aarch64_enc_ldarsb(dst, mem));
7246 
7247   ins_pipe(pipe_serial);
7248 %}
7249 
7250 // Load Byte (8 bit signed) into long
7251 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7252 %{
7253   match(Set dst (ConvI2L (LoadB mem)));
7254 
7255   ins_cost(VOLATILE_REF_COST);
7256   format %{ "ldarsb  $dst, $mem\t# byte" %}
7257 
7258   ins_encode(aarch64_enc_ldarsb(dst, mem));
7259 
7260   ins_pipe(pipe_serial);
7261 %}
7262 
7263 // Load Byte (8 bit unsigned)
7264 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7265 %{
7266   match(Set dst (LoadUB mem));
7267 
7268   ins_cost(VOLATILE_REF_COST);
7269   format %{ "ldarb  $dst, $mem\t# byte" %}
7270 
7271   ins_encode(aarch64_enc_ldarb(dst, mem));
7272 
7273   ins_pipe(pipe_serial);
7274 %}
7275 
7276 // Load Byte (8 bit unsigned) into long
7277 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7278 %{
7279   match(Set dst (ConvI2L (LoadUB mem)));
7280 
7281   ins_cost(VOLATILE_REF_COST);
7282   format %{ "ldarb  $dst, $mem\t# byte" %}
7283 
7284   ins_encode(aarch64_enc_ldarb(dst, mem));
7285 
7286   ins_pipe(pipe_serial);
7287 %}
7288 
7289 // Load Short (16 bit signed)
7290 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7291 %{
7292   match(Set dst (LoadS mem));
7293 
7294   ins_cost(VOLATILE_REF_COST);
7295   format %{ "ldarshw  $dst, $mem\t# short" %}
7296 
7297   ins_encode(aarch64_enc_ldarshw(dst, mem));
7298 
7299   ins_pipe(pipe_serial);
7300 %}
7301 
7302 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7303 %{
7304   match(Set dst (LoadUS mem));
7305 
7306   ins_cost(VOLATILE_REF_COST);
7307   format %{ "ldarhw  $dst, $mem\t# short" %}
7308 
7309   ins_encode(aarch64_enc_ldarhw(dst, mem));
7310 
7311   ins_pipe(pipe_serial);
7312 %}
7313 
7314 // Load Short/Char (16 bit unsigned) into long
7315 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7316 %{
7317   match(Set dst (ConvI2L (LoadUS mem)));
7318 
7319   ins_cost(VOLATILE_REF_COST);
7320   format %{ "ldarh  $dst, $mem\t# short" %}
7321 
7322   ins_encode(aarch64_enc_ldarh(dst, mem));
7323 
7324   ins_pipe(pipe_serial);
7325 %}
7326 
7327 // Load Short/Char (16 bit signed) into long
7328 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7329 %{
7330   match(Set dst (ConvI2L (LoadS mem)));
7331 
7332   ins_cost(VOLATILE_REF_COST);
7333   format %{ "ldarh  $dst, $mem\t# short" %}
7334 
7335   ins_encode(aarch64_enc_ldarsh(dst, mem));
7336 
7337   ins_pipe(pipe_serial);
7338 %}
7339 
7340 // Load Integer (32 bit signed)
7341 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7342 %{
7343   match(Set dst (LoadI mem));
7344 
7345   ins_cost(VOLATILE_REF_COST);
7346   format %{ "ldarw  $dst, $mem\t# int" %}
7347 
7348   ins_encode(aarch64_enc_ldarw(dst, mem));
7349 
7350   ins_pipe(pipe_serial);
7351 %}
7352 
7353 // Load Integer (32 bit unsigned) into long
7354 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7355 %{
7356   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7357 
7358   ins_cost(VOLATILE_REF_COST);
7359   format %{ "ldarw  $dst, $mem\t# int" %}
7360 
7361   ins_encode(aarch64_enc_ldarw(dst, mem));
7362 
7363   ins_pipe(pipe_serial);
7364 %}
7365 
7366 // Load Long (64 bit signed)
7367 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7368 %{
7369   match(Set dst (LoadL mem));
7370 
7371   ins_cost(VOLATILE_REF_COST);
7372   format %{ "ldar  $dst, $mem\t# int" %}
7373 
7374   ins_encode(aarch64_enc_ldar(dst, mem));
7375 
7376   ins_pipe(pipe_serial);
7377 %}
7378 
7379 // Load Pointer
7380 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7381 %{
7382   match(Set dst (LoadP mem));
7383 
7384   ins_cost(VOLATILE_REF_COST);
7385   format %{ "ldar  $dst, $mem\t# ptr" %}
7386 
7387   ins_encode(aarch64_enc_ldar(dst, mem));
7388 
7389   ins_pipe(pipe_serial);
7390 %}
7391 
7392 // Load Compressed Pointer
7393 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7394 %{
7395   match(Set dst (LoadN mem));
7396 
7397   ins_cost(VOLATILE_REF_COST);
7398   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7399 
7400   ins_encode(aarch64_enc_ldarw(dst, mem));
7401 
7402   ins_pipe(pipe_serial);
7403 %}
7404 
7405 // Load Float
7406 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7407 %{
7408   match(Set dst (LoadF mem));
7409 
7410   ins_cost(VOLATILE_REF_COST);
7411   format %{ "ldars  $dst, $mem\t# float" %}
7412 
7413   ins_encode( aarch64_enc_fldars(dst, mem) );
7414 
7415   ins_pipe(pipe_serial);
7416 %}
7417 
7418 // Load Double
7419 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7420 %{
7421   match(Set dst (LoadD mem));
7422 
7423   ins_cost(VOLATILE_REF_COST);
7424   format %{ "ldard  $dst, $mem\t# double" %}
7425 
7426   ins_encode( aarch64_enc_fldard(dst, mem) );
7427 
7428   ins_pipe(pipe_serial);
7429 %}
7430 
7431 // Store Byte
7432 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7433 %{
7434   match(Set mem (StoreB mem src));
7435 
7436   ins_cost(VOLATILE_REF_COST);
7437   format %{ "stlrb  $src, $mem\t# byte" %}
7438 
7439   ins_encode(aarch64_enc_stlrb(src, mem));
7440 
7441   ins_pipe(pipe_class_memory);
7442 %}
7443 
7444 // Store Char/Short
7445 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7446 %{
7447   match(Set mem (StoreC mem src));
7448 
7449   ins_cost(VOLATILE_REF_COST);
7450   format %{ "stlrh  $src, $mem\t# short" %}
7451 
7452   ins_encode(aarch64_enc_stlrh(src, mem));
7453 
7454   ins_pipe(pipe_class_memory);
7455 %}
7456 
7457 // Store Integer
7458 
7459 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7460 %{
7461   match(Set mem(StoreI mem src));
7462 
7463   ins_cost(VOLATILE_REF_COST);
7464   format %{ "stlrw  $src, $mem\t# int" %}
7465 
7466   ins_encode(aarch64_enc_stlrw(src, mem));
7467 
7468   ins_pipe(pipe_class_memory);
7469 %}
7470 
7471 // Store Long (64 bit signed)
7472 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7473 %{
7474   match(Set mem (StoreL mem src));
7475 
7476   ins_cost(VOLATILE_REF_COST);
7477   format %{ "stlr  $src, $mem\t# int" %}
7478 
7479   ins_encode(aarch64_enc_stlr(src, mem));
7480 
7481   ins_pipe(pipe_class_memory);
7482 %}
7483 
7484 // Store Pointer
7485 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7486 %{
7487   match(Set mem (StoreP mem src));
7488 
7489   ins_cost(VOLATILE_REF_COST);
7490   format %{ "stlr  $src, $mem\t# ptr" %}
7491 
7492   ins_encode(aarch64_enc_stlr(src, mem));
7493 
7494   ins_pipe(pipe_class_memory);
7495 %}
7496 
7497 // Store Compressed Pointer
7498 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7499 %{
7500   match(Set mem (StoreN mem src));
7501 
7502   ins_cost(VOLATILE_REF_COST);
7503   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7504 
7505   ins_encode(aarch64_enc_stlrw(src, mem));
7506 
7507   ins_pipe(pipe_class_memory);
7508 %}
7509 
7510 // Store Float
7511 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7512 %{
7513   match(Set mem (StoreF mem src));
7514 
7515   ins_cost(VOLATILE_REF_COST);
7516   format %{ "stlrs  $src, $mem\t# float" %}
7517 
7518   ins_encode( aarch64_enc_fstlrs(src, mem) );
7519 
7520   ins_pipe(pipe_class_memory);
7521 %}
7522 
7523 // TODO
7524 // implement storeImmF0 and storeFImmPacked
7525 
7526 // Store Double
7527 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7528 %{
7529   match(Set mem (StoreD mem src));
7530 
7531   ins_cost(VOLATILE_REF_COST);
7532   format %{ "stlrd  $src, $mem\t# double" %}
7533 
7534   ins_encode( aarch64_enc_fstlrd(src, mem) );
7535 
7536   ins_pipe(pipe_class_memory);
7537 %}
7538 
7539 //  ---------------- end of volatile loads and stores ----------------
7540 
7541 // ============================================================================
7542 // BSWAP Instructions
7543 
7544 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7545   match(Set dst (ReverseBytesI src));
7546 
7547   ins_cost(INSN_COST);
7548   format %{ "revw  $dst, $src" %}
7549 
7550   ins_encode %{
7551     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7552   %}
7553 
7554   ins_pipe(ialu_reg);
7555 %}
7556 
7557 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7558   match(Set dst (ReverseBytesL src));
7559 
7560   ins_cost(INSN_COST);
7561   format %{ "rev  $dst, $src" %}
7562 
7563   ins_encode %{
7564     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7565   %}
7566 
7567   ins_pipe(ialu_reg);
7568 %}
7569 
7570 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7571   match(Set dst (ReverseBytesUS src));
7572 
7573   ins_cost(INSN_COST);
7574   format %{ "rev16w  $dst, $src" %}
7575 
7576   ins_encode %{
7577     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7578   %}
7579 
7580   ins_pipe(ialu_reg);
7581 %}
7582 
7583 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7584   match(Set dst (ReverseBytesS src));
7585 
7586   ins_cost(INSN_COST);
7587   format %{ "rev16w  $dst, $src\n\t"
7588             "sbfmw $dst, $dst, #0, #15" %}
7589 
7590   ins_encode %{
7591     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7592     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7593   %}
7594 
7595   ins_pipe(ialu_reg);
7596 %}
7597 
7598 // ============================================================================
7599 // Zero Count Instructions
7600 
7601 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7602   match(Set dst (CountLeadingZerosI src));
7603 
7604   ins_cost(INSN_COST);
7605   format %{ "clzw  $dst, $src" %}
7606   ins_encode %{
7607     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7608   %}
7609 
7610   ins_pipe(ialu_reg);
7611 %}
7612 
7613 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7614   match(Set dst (CountLeadingZerosL src));
7615 
7616   ins_cost(INSN_COST);
7617   format %{ "clz   $dst, $src" %}
7618   ins_encode %{
7619     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7620   %}
7621 
7622   ins_pipe(ialu_reg);
7623 %}
7624 
7625 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7626   match(Set dst (CountTrailingZerosI src));
7627 
7628   ins_cost(INSN_COST * 2);
7629   format %{ "rbitw  $dst, $src\n\t"
7630             "clzw   $dst, $dst" %}
7631   ins_encode %{
7632     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7633     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7634   %}
7635 
7636   ins_pipe(ialu_reg);
7637 %}
7638 
7639 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7640   match(Set dst (CountTrailingZerosL src));
7641 
7642   ins_cost(INSN_COST * 2);
7643   format %{ "rbit   $dst, $src\n\t"
7644             "clz    $dst, $dst" %}
7645   ins_encode %{
7646     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7647     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7648   %}
7649 
7650   ins_pipe(ialu_reg);
7651 %}
7652 
7653 //---------- Population Count Instructions -------------------------------------
7654 //
7655 
7656 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7657   predicate(UsePopCountInstruction);
7658   match(Set dst (PopCountI src));
7659   effect(TEMP tmp);
7660   ins_cost(INSN_COST * 13);
7661 
7662   format %{ "movw   $src, $src\n\t"
7663             "mov    $tmp, $src\t# vector (1D)\n\t"
7664             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7665             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7666             "mov    $dst, $tmp\t# vector (1D)" %}
7667   ins_encode %{
7668     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7669     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7670     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7671     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7672     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7673   %}
7674 
7675   ins_pipe(pipe_class_default);
7676 %}
7677 
7678 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7679   predicate(UsePopCountInstruction);
7680   match(Set dst (PopCountI (LoadI mem)));
7681   effect(TEMP tmp);
7682   ins_cost(INSN_COST * 13);
7683 
7684   format %{ "ldrs   $tmp, $mem\n\t"
7685             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7686             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7687             "mov    $dst, $tmp\t# vector (1D)" %}
7688   ins_encode %{
7689     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7690     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7691                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7692     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7693     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7694     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7695   %}
7696 
7697   ins_pipe(pipe_class_default);
7698 %}
7699 
7700 // Note: Long.bitCount(long) returns an int.
7701 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7702   predicate(UsePopCountInstruction);
7703   match(Set dst (PopCountL src));
7704   effect(TEMP tmp);
7705   ins_cost(INSN_COST * 13);
7706 
7707   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7708             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7709             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7710             "mov    $dst, $tmp\t# vector (1D)" %}
7711   ins_encode %{
7712     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7713     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7714     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7715     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7716   %}
7717 
7718   ins_pipe(pipe_class_default);
7719 %}
7720 
7721 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7722   predicate(UsePopCountInstruction);
7723   match(Set dst (PopCountL (LoadL mem)));
7724   effect(TEMP tmp);
7725   ins_cost(INSN_COST * 13);
7726 
7727   format %{ "ldrd   $tmp, $mem\n\t"
7728             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7729             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7730             "mov    $dst, $tmp\t# vector (1D)" %}
7731   ins_encode %{
7732     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7733     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7734                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7735     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7736     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7737     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7738   %}
7739 
7740   ins_pipe(pipe_class_default);
7741 %}
7742 
7743 // ============================================================================
7744 // MemBar Instruction
7745 
7746 instruct load_fence() %{
7747   match(LoadFence);
7748   ins_cost(VOLATILE_REF_COST);
7749 
7750   format %{ "load_fence" %}
7751 
7752   ins_encode %{
7753     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7754   %}
7755   ins_pipe(pipe_serial);
7756 %}
7757 
7758 instruct unnecessary_membar_acquire() %{
7759   predicate(unnecessary_acquire(n));
7760   match(MemBarAcquire);
7761   ins_cost(0);
7762 
7763   format %{ "membar_acquire (elided)" %}
7764 
7765   ins_encode %{
7766     __ block_comment("membar_acquire (elided)");
7767   %}
7768 
7769   ins_pipe(pipe_class_empty);
7770 %}
7771 
7772 instruct membar_acquire() %{
7773   match(MemBarAcquire);
7774   ins_cost(VOLATILE_REF_COST);
7775 
7776   format %{ "membar_acquire\n\t"
7777             "dmb ish" %}
7778 
7779   ins_encode %{
7780     __ block_comment("membar_acquire");
7781     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7782   %}
7783 
7784   ins_pipe(pipe_serial);
7785 %}
7786 
7787 
7788 instruct membar_acquire_lock() %{
7789   match(MemBarAcquireLock);
7790   ins_cost(VOLATILE_REF_COST);
7791 
7792   format %{ "membar_acquire_lock (elided)" %}
7793 
7794   ins_encode %{
7795     __ block_comment("membar_acquire_lock (elided)");
7796   %}
7797 
7798   ins_pipe(pipe_serial);
7799 %}
7800 
7801 instruct store_fence() %{
7802   match(StoreFence);
7803   ins_cost(VOLATILE_REF_COST);
7804 
7805   format %{ "store_fence" %}
7806 
7807   ins_encode %{
7808     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7809   %}
7810   ins_pipe(pipe_serial);
7811 %}
7812 
7813 instruct unnecessary_membar_release() %{
7814   predicate(unnecessary_release(n));
7815   match(MemBarRelease);
7816   ins_cost(0);
7817 
7818   format %{ "membar_release (elided)" %}
7819 
7820   ins_encode %{
7821     __ block_comment("membar_release (elided)");
7822   %}
7823   ins_pipe(pipe_serial);
7824 %}
7825 
7826 instruct membar_release() %{
7827   match(MemBarRelease);
7828   ins_cost(VOLATILE_REF_COST);
7829 
7830   format %{ "membar_release\n\t"
7831             "dmb ish" %}
7832 
7833   ins_encode %{
7834     __ block_comment("membar_release");
7835     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7836   %}
7837   ins_pipe(pipe_serial);
7838 %}
7839 
7840 instruct membar_storestore() %{
7841   match(MemBarStoreStore);
7842   ins_cost(VOLATILE_REF_COST);
7843 
7844   format %{ "MEMBAR-store-store" %}
7845 
7846   ins_encode %{
7847     __ membar(Assembler::StoreStore);
7848   %}
7849   ins_pipe(pipe_serial);
7850 %}
7851 
7852 instruct membar_release_lock() %{
7853   match(MemBarReleaseLock);
7854   ins_cost(VOLATILE_REF_COST);
7855 
7856   format %{ "membar_release_lock (elided)" %}
7857 
7858   ins_encode %{
7859     __ block_comment("membar_release_lock (elided)");
7860   %}
7861 
7862   ins_pipe(pipe_serial);
7863 %}
7864 
7865 instruct unnecessary_membar_volatile() %{
7866   predicate(unnecessary_volatile(n));
7867   match(MemBarVolatile);
7868   ins_cost(0);
7869 
7870   format %{ "membar_volatile (elided)" %}
7871 
7872   ins_encode %{
7873     __ block_comment("membar_volatile (elided)");
7874   %}
7875 
7876   ins_pipe(pipe_serial);
7877 %}
7878 
7879 instruct membar_volatile() %{
7880   match(MemBarVolatile);
7881   ins_cost(VOLATILE_REF_COST*100);
7882 
7883   format %{ "membar_volatile\n\t"
7884              "dmb ish"%}
7885 
7886   ins_encode %{
7887     __ block_comment("membar_volatile");
7888     __ membar(Assembler::StoreLoad);
7889   %}
7890 
7891   ins_pipe(pipe_serial);
7892 %}
7893 
7894 // ============================================================================
7895 // Cast/Convert Instructions
7896 
7897 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7898   match(Set dst (CastX2P src));
7899 
7900   ins_cost(INSN_COST);
7901   format %{ "mov $dst, $src\t# long -> ptr" %}
7902 
7903   ins_encode %{
7904     if ($dst$$reg != $src$$reg) {
7905       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7906     }
7907   %}
7908 
7909   ins_pipe(ialu_reg);
7910 %}
7911 
7912 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7913   match(Set dst (CastP2X src));
7914 
7915   ins_cost(INSN_COST);
7916   format %{ "mov $dst, $src\t# ptr -> long" %}
7917 
7918   ins_encode %{
7919     if ($dst$$reg != $src$$reg) {
7920       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7921     }
7922   %}
7923 
7924   ins_pipe(ialu_reg);
7925 %}
7926 
7927 // Convert oop into int for vectors alignment masking
7928 instruct convP2I(iRegINoSp dst, iRegP src) %{
7929   match(Set dst (ConvL2I (CastP2X src)));
7930 
7931   ins_cost(INSN_COST);
7932   format %{ "movw $dst, $src\t# ptr -> int" %}
7933   ins_encode %{
7934     __ movw($dst$$Register, $src$$Register);
7935   %}
7936 
7937   ins_pipe(ialu_reg);
7938 %}
7939 
7940 // Convert compressed oop into int for vectors alignment masking
7941 // in case of 32bit oops (heap < 4Gb).
7942 instruct convN2I(iRegINoSp dst, iRegN src)
7943 %{
7944   predicate(Universe::narrow_oop_shift() == 0);
7945   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7946 
7947   ins_cost(INSN_COST);
7948   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7949   ins_encode %{
7950     __ movw($dst$$Register, $src$$Register);
7951   %}
7952 
7953   ins_pipe(ialu_reg);
7954 %}
7955 
7956 instruct shenandoahRB(iRegPNoSp dst, iRegP src, rFlagsReg cr) %{
7957   match(Set dst (ShenandoahReadBarrier src));
7958   format %{ "shenandoah_rb $dst,$src" %}
7959   ins_encode %{
7960 #if INCLUDE_SHENANDOAHGC
7961     Register s = $src$$Register;
7962     Register d = $dst$$Register;
7963     __ ldr(d, Address(s, ShenandoahBrooksPointer::byte_offset()));
7964 #else
7965     ShouldNotReachHere();
7966 #endif
7967   %}
7968   ins_pipe(pipe_class_memory);
7969 %}
7970 
7971 
7972 // Convert oop pointer into compressed form
7973 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7974   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7975   match(Set dst (EncodeP src));
7976   effect(KILL cr);
7977   ins_cost(INSN_COST * 3);
7978   format %{ "encode_heap_oop $dst, $src" %}
7979   ins_encode %{
7980     Register s = $src$$Register;
7981     Register d = $dst$$Register;
7982     __ encode_heap_oop(d, s);
7983   %}
7984   ins_pipe(ialu_reg);
7985 %}
7986 
7987 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7988   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7989   match(Set dst (EncodeP src));
7990   ins_cost(INSN_COST * 3);
7991   format %{ "encode_heap_oop_not_null $dst, $src" %}
7992   ins_encode %{
7993     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7994   %}
7995   ins_pipe(ialu_reg);
7996 %}
7997 
7998 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7999   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8000             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8001   match(Set dst (DecodeN src));
8002   ins_cost(INSN_COST * 3);
8003   format %{ "decode_heap_oop $dst, $src" %}
8004   ins_encode %{
8005     Register s = $src$$Register;
8006     Register d = $dst$$Register;
8007     __ decode_heap_oop(d, s);
8008   %}
8009   ins_pipe(ialu_reg);
8010 %}
8011 
8012 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8013   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8014             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8015   match(Set dst (DecodeN src));
8016   ins_cost(INSN_COST * 3);
8017   format %{ "decode_heap_oop_not_null $dst, $src" %}
8018   ins_encode %{
8019     Register s = $src$$Register;
8020     Register d = $dst$$Register;
8021     __ decode_heap_oop_not_null(d, s);
8022   %}
8023   ins_pipe(ialu_reg);
8024 %}
8025 
8026 // n.b. AArch64 implementations of encode_klass_not_null and
8027 // decode_klass_not_null do not modify the flags register so, unlike
8028 // Intel, we don't kill CR as a side effect here
8029 
8030 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8031   match(Set dst (EncodePKlass src));
8032 
8033   ins_cost(INSN_COST * 3);
8034   format %{ "encode_klass_not_null $dst,$src" %}
8035 
8036   ins_encode %{
8037     Register src_reg = as_Register($src$$reg);
8038     Register dst_reg = as_Register($dst$$reg);
8039     __ encode_klass_not_null(dst_reg, src_reg);
8040   %}
8041 
8042    ins_pipe(ialu_reg);
8043 %}
8044 
8045 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8046   match(Set dst (DecodeNKlass src));
8047 
8048   ins_cost(INSN_COST * 3);
8049   format %{ "decode_klass_not_null $dst,$src" %}
8050 
8051   ins_encode %{
8052     Register src_reg = as_Register($src$$reg);
8053     Register dst_reg = as_Register($dst$$reg);
8054     if (dst_reg != src_reg) {
8055       __ decode_klass_not_null(dst_reg, src_reg);
8056     } else {
8057       __ decode_klass_not_null(dst_reg);
8058     }
8059   %}
8060 
8061    ins_pipe(ialu_reg);
8062 %}
8063 
8064 instruct checkCastPP(iRegPNoSp dst)
8065 %{
8066   match(Set dst (CheckCastPP dst));
8067 
8068   size(0);
8069   format %{ "# checkcastPP of $dst" %}
8070   ins_encode(/* empty encoding */);
8071   ins_pipe(pipe_class_empty);
8072 %}
8073 
8074 instruct castPP(iRegPNoSp dst)
8075 %{
8076   match(Set dst (CastPP dst));
8077 
8078   size(0);
8079   format %{ "# castPP of $dst" %}
8080   ins_encode(/* empty encoding */);
8081   ins_pipe(pipe_class_empty);
8082 %}
8083 
8084 instruct castII(iRegI dst)
8085 %{
8086   match(Set dst (CastII dst));
8087 
8088   size(0);
8089   format %{ "# castII of $dst" %}
8090   ins_encode(/* empty encoding */);
8091   ins_cost(0);
8092   ins_pipe(pipe_class_empty);
8093 %}
8094 
8095 // ============================================================================
8096 // Atomic operation instructions
8097 //
8098 // Intel and SPARC both implement Ideal Node LoadPLocked and
8099 // Store{PIL}Conditional instructions using a normal load for the
8100 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8101 //
8102 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8103 // pair to lock object allocations from Eden space when not using
8104 // TLABs.
8105 //
8106 // There does not appear to be a Load{IL}Locked Ideal Node and the
8107 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8108 // and to use StoreIConditional only for 32-bit and StoreLConditional
8109 // only for 64-bit.
8110 //
8111 // We implement LoadPLocked and StorePLocked instructions using,
8112 // respectively the AArch64 hw load-exclusive and store-conditional
8113 // instructions. Whereas we must implement each of
8114 // Store{IL}Conditional using a CAS which employs a pair of
8115 // instructions comprising a load-exclusive followed by a
8116 // store-conditional.
8117 
8118 
8119 // Locked-load (linked load) of the current heap-top
8120 // used when updating the eden heap top
8121 // implemented using ldaxr on AArch64
8122 
8123 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8124 %{
8125   match(Set dst (LoadPLocked mem));
8126 
8127   ins_cost(VOLATILE_REF_COST);
8128 
8129   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8130 
8131   ins_encode(aarch64_enc_ldaxr(dst, mem));
8132 
8133   ins_pipe(pipe_serial);
8134 %}
8135 
8136 // Conditional-store of the updated heap-top.
8137 // Used during allocation of the shared heap.
8138 // Sets flag (EQ) on success.
8139 // implemented using stlxr on AArch64.
8140 
8141 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8142 %{
8143   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8144 
8145   ins_cost(VOLATILE_REF_COST);
8146 
8147  // TODO
8148  // do we need to do a store-conditional release or can we just use a
8149  // plain store-conditional?
8150 
8151   format %{
8152     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8153     "cmpw rscratch1, zr\t# EQ on successful write"
8154   %}
8155 
8156   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8157 
8158   ins_pipe(pipe_serial);
8159 %}
8160 
8161 
8162 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8163 // when attempting to rebias a lock towards the current thread.  We
8164 // must use the acquire form of cmpxchg in order to guarantee acquire
8165 // semantics in this case.
8166 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8167 %{
8168   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8169 
8170   ins_cost(VOLATILE_REF_COST);
8171 
8172   format %{
8173     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8174     "cmpw rscratch1, zr\t# EQ on successful write"
8175   %}
8176 
8177   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8178 
8179   ins_pipe(pipe_slow);
8180 %}
8181 
8182 // storeIConditional also has acquire semantics, for no better reason
8183 // than matching storeLConditional.  At the time of writing this
8184 // comment storeIConditional was not used anywhere by AArch64.
8185 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8186 %{
8187   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8188 
8189   ins_cost(VOLATILE_REF_COST);
8190 
8191   format %{
8192     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8193     "cmpw rscratch1, zr\t# EQ on successful write"
8194   %}
8195 
8196   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8197 
8198   ins_pipe(pipe_slow);
8199 %}
8200 
8201 // standard CompareAndSwapX when we are using barriers
8202 // these have higher priority than the rules selected by a predicate
8203 
8204 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8205 // can't match them
8206 
8207 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8208 
8209   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8210   ins_cost(2 * VOLATILE_REF_COST);
8211 
8212   effect(KILL cr);
8213 
8214   format %{
8215     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8216     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8217   %}
8218 
8219   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8220             aarch64_enc_cset_eq(res));
8221 
8222   ins_pipe(pipe_slow);
8223 %}
8224 
8225 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8226 
8227   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8228   ins_cost(2 * VOLATILE_REF_COST);
8229 
8230   effect(KILL cr);
8231 
8232   format %{
8233     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8234     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8235   %}
8236 
8237   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8238             aarch64_enc_cset_eq(res));
8239 
8240   ins_pipe(pipe_slow);
8241 %}
8242 
8243 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8244 
8245   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8246   ins_cost(2 * VOLATILE_REF_COST);
8247 
8248   effect(KILL cr);
8249 
8250  format %{
8251     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8252     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8253  %}
8254 
8255  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8256             aarch64_enc_cset_eq(res));
8257 
8258   ins_pipe(pipe_slow);
8259 %}
8260 
8261 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8262 
8263   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8264   ins_cost(2 * VOLATILE_REF_COST);
8265 
8266   effect(KILL cr);
8267 
8268  format %{
8269     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8270     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8271  %}
8272 
8273  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8274             aarch64_enc_cset_eq(res));
8275 
8276   ins_pipe(pipe_slow);
8277 %}
8278 
8279 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8280 
8281   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8282   ins_cost(2 * VOLATILE_REF_COST);
8283 
8284   effect(KILL cr);
8285 
8286  format %{
8287     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8288     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8289  %}
8290 
8291  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8292             aarch64_enc_cset_eq(res));
8293 
8294   ins_pipe(pipe_slow);
8295 %}
8296 
8297 instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
8298 
8299   match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
8300   ins_cost(2 * VOLATILE_REF_COST);
8301 
8302   effect(TEMP tmp, KILL cr);
8303 
8304   format %{
8305     "cmpxchg_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
8306     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8307   %}
8308 
8309   ins_encode(aarch64_enc_cmpxchg_oop_shenandoah(mem, oldval, newval, tmp),
8310              aarch64_enc_cset_eq(res));
8311 
8312   ins_pipe(pipe_slow);
8313 %}
8314 
8315 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8316 
8317   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8318   ins_cost(2 * VOLATILE_REF_COST);
8319 
8320   effect(KILL cr);
8321 
8322  format %{
8323     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8324     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8325  %}
8326 
8327  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8328             aarch64_enc_cset_eq(res));
8329 
8330   ins_pipe(pipe_slow);
8331 %}
8332 
8333 instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
8334 
8335   match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval)));
8336   ins_cost(2 * VOLATILE_REF_COST);
8337 
8338   effect(TEMP tmp, KILL cr);
8339 
8340   format %{
8341     "cmpxchgw_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
8342     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8343   %}
8344 
8345   ins_encode %{
8346     Register tmp = $tmp$$Register;
8347     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
8348 #if INCLUDE_SHENANDOAHGC
8349     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
8350     __ cset($res$$Register, Assembler::EQ);
8351 #else
8352     ShouldNotReachHere();
8353 #endif
8354   %}
8355 
8356   ins_pipe(pipe_slow);
8357 %}
8358 
8359 // alternative CompareAndSwapX when we are eliding barriers
8360 
8361 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8362 
8363   predicate(needs_acquiring_load_exclusive(n));
8364   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8365   ins_cost(VOLATILE_REF_COST);
8366 
8367   effect(KILL cr);
8368 
8369   format %{
8370     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8371     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8372   %}
8373 
8374   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8375             aarch64_enc_cset_eq(res));
8376 
8377   ins_pipe(pipe_slow);
8378 %}
8379 
8380 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8381 
8382   predicate(needs_acquiring_load_exclusive(n));
8383   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8384   ins_cost(VOLATILE_REF_COST);
8385 
8386   effect(KILL cr);
8387 
8388   format %{
8389     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8390     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8391   %}
8392 
8393   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8394             aarch64_enc_cset_eq(res));
8395 
8396   ins_pipe(pipe_slow);
8397 %}
8398 
8399 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8400 
8401   predicate(needs_acquiring_load_exclusive(n));
8402   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8403   ins_cost(VOLATILE_REF_COST);
8404 
8405   effect(KILL cr);
8406 
8407  format %{
8408     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8409     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8410  %}
8411 
8412  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8413             aarch64_enc_cset_eq(res));
8414 
8415   ins_pipe(pipe_slow);
8416 %}
8417 
8418 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8419 
8420   predicate(needs_acquiring_load_exclusive(n));
8421   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8422   ins_cost(VOLATILE_REF_COST);
8423 
8424   effect(KILL cr);
8425 
8426  format %{
8427     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8428     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8429  %}
8430 
8431  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8432             aarch64_enc_cset_eq(res));
8433 
8434   ins_pipe(pipe_slow);
8435 %}
8436 
8437 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8438 
8439   predicate(needs_acquiring_load_exclusive(n));
8440   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8441   ins_cost(VOLATILE_REF_COST);
8442 
8443   effect(KILL cr);
8444 
8445  format %{
8446     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8447     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8448  %}
8449 
8450  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8451             aarch64_enc_cset_eq(res));
8452 
8453   ins_pipe(pipe_slow);
8454 %}
8455 
8456 instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
8457 
8458   predicate(needs_acquiring_load_exclusive(n));
8459   match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
8460   ins_cost(VOLATILE_REF_COST);
8461 
8462   effect(TEMP tmp, KILL cr);
8463 
8464   format %{
8465     "cmpxchg_acq_shenandoah_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
8466     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8467   %}
8468 
8469   ins_encode(aarch64_enc_cmpxchg_acq_oop_shenandoah(mem, oldval, newval, tmp),
8470              aarch64_enc_cset_eq(res));
8471 
8472   ins_pipe(pipe_slow);
8473 %}
8474 
8475 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8476 
8477   predicate(needs_acquiring_load_exclusive(n));
8478   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8479   ins_cost(VOLATILE_REF_COST);
8480 
8481   effect(KILL cr);
8482 
8483  format %{
8484     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8485     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8486  %}
8487 
8488  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8489             aarch64_enc_cset_eq(res));
8490 
8491   ins_pipe(pipe_slow);
8492 %}
8493 
8494 instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
8495 
8496   predicate(needs_acquiring_load_exclusive(n));
8497   match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval)));
8498   ins_cost(VOLATILE_REF_COST);
8499 
8500   effect(TEMP tmp, KILL cr);
8501 
8502  format %{
8503     "cmpxchgw_acq_shenandoah_narrow_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
8504     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8505  %}
8506 
8507   ins_encode %{
8508     Register tmp = $tmp$$Register;
8509     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
8510 #if INCLUDE_SHENANDOAHGC
8511     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
8512     __ cset($res$$Register, Assembler::EQ);
8513 #else
8514     ShouldNotReachHere();
8515 #endif
8516   %}
8517 
8518   ins_pipe(pipe_slow);
8519 %}
8520 
8521 // ---------------------------------------------------------------------
8522 
8523 
8524 // BEGIN This section of the file is automatically generated. Do not edit --------------
8525 
8526 // Sundry CAS operations.  Note that release is always true,
8527 // regardless of the memory ordering of the CAS.  This is because we
8528 // need the volatile case to be sequentially consistent but there is
8529 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8530 // can't check the type of memory ordering here, so we always emit a
8531 // STLXR.
8532 
8533 // This section is generated from aarch64_ad_cas.m4
8534 
8535 
8536 
8537 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8538   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8539   ins_cost(2 * VOLATILE_REF_COST);
8540   effect(TEMP_DEF res, KILL cr);
8541   format %{
8542     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8543   %}
8544   ins_encode %{
8545     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8546                Assembler::byte, /*acquire*/ false, /*release*/ true,
8547                /*weak*/ false, $res$$Register);
8548     __ sxtbw($res$$Register, $res$$Register);
8549   %}
8550   ins_pipe(pipe_slow);
8551 %}
8552 
8553 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8554   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8555   ins_cost(2 * VOLATILE_REF_COST);
8556   effect(TEMP_DEF res, KILL cr);
8557   format %{
8558     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8559   %}
8560   ins_encode %{
8561     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8562                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8563                /*weak*/ false, $res$$Register);
8564     __ sxthw($res$$Register, $res$$Register);
8565   %}
8566   ins_pipe(pipe_slow);
8567 %}
8568 
8569 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8570   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8571   ins_cost(2 * VOLATILE_REF_COST);
8572   effect(TEMP_DEF res, KILL cr);
8573   format %{
8574     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8575   %}
8576   ins_encode %{
8577     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8578                Assembler::word, /*acquire*/ false, /*release*/ true,
8579                /*weak*/ false, $res$$Register);
8580   %}
8581   ins_pipe(pipe_slow);
8582 %}
8583 
8584 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8585   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8586   ins_cost(2 * VOLATILE_REF_COST);
8587   effect(TEMP_DEF res, KILL cr);
8588   format %{
8589     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8590   %}
8591   ins_encode %{
8592     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8593                Assembler::xword, /*acquire*/ false, /*release*/ true,
8594                /*weak*/ false, $res$$Register);
8595   %}
8596   ins_pipe(pipe_slow);
8597 %}
8598 
8599 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8600   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8601   ins_cost(2 * VOLATILE_REF_COST);
8602   effect(TEMP_DEF res, KILL cr);
8603   format %{
8604     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8605   %}
8606   ins_encode %{
8607     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8608                Assembler::word, /*acquire*/ false, /*release*/ true,
8609                /*weak*/ false, $res$$Register);
8610   %}
8611   ins_pipe(pipe_slow);
8612 %}
8613 
8614 instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
8615   match(Set res (ShenandoahCompareAndExchangeN mem (Binary oldval newval)));
8616   ins_cost(2 * VOLATILE_REF_COST);
8617   effect(TEMP_DEF res, TEMP tmp, KILL cr);
8618   format %{
8619     "cmpxchgw_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8620   %}
8621   ins_encode %{
8622     Register tmp = $tmp$$Register;
8623     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
8624 #if INCLUDE_SHENANDOAHGC
8625     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
8626                                                    /*acquire*/ false, /*release*/ true, /*weak*/ false, /* encode*/ false, noreg, noreg, rscratch2, $res$$Register);
8627 #else
8628     ShouldNotReachHere();
8629 #endif
8630   %}
8631   ins_pipe(pipe_slow);
8632 %}
8633 
8634 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8635   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8636   ins_cost(2 * VOLATILE_REF_COST);
8637   effect(TEMP_DEF res, KILL cr);
8638   format %{
8639     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8640   %}
8641   ins_encode %{
8642     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8643                Assembler::xword, /*acquire*/ false, /*release*/ true,
8644                /*weak*/ false, $res$$Register);
8645   %}
8646   ins_pipe(pipe_slow);
8647 %}
8648 
8649 instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
8650   match(Set res (ShenandoahCompareAndExchangeP mem (Binary oldval newval)));
8651   ins_cost(2 * VOLATILE_REF_COST);
8652   effect(TEMP_DEF res, TEMP tmp, KILL cr);
8653   format %{
8654     "cmpxchg_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
8655   %}
8656   ins_encode %{
8657     Register tmp = $tmp$$Register;
8658     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
8659 #if INCLUDE_SHENANDOAHGC
8660     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
8661                                                    /*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg, rscratch2, $res$$Register);
8662 #else
8663     ShouldNotReachHere();
8664 #endif
8665   %}
8666   ins_pipe(pipe_slow);
8667 %}
8668 
8669 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8670   predicate(needs_acquiring_load_exclusive(n));
8671   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8672   ins_cost(VOLATILE_REF_COST);
8673   effect(TEMP_DEF res, KILL cr);
8674   format %{
8675     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8676   %}
8677   ins_encode %{
8678     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8679                Assembler::byte, /*acquire*/ true, /*release*/ true,
8680                /*weak*/ false, $res$$Register);
8681     __ sxtbw($res$$Register, $res$$Register);
8682   %}
8683   ins_pipe(pipe_slow);
8684 %}
8685 
8686 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8687   predicate(needs_acquiring_load_exclusive(n));
8688   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8689   ins_cost(VOLATILE_REF_COST);
8690   effect(TEMP_DEF res, KILL cr);
8691   format %{
8692     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8693   %}
8694   ins_encode %{
8695     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8696                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8697                /*weak*/ false, $res$$Register);
8698     __ sxthw($res$$Register, $res$$Register);
8699   %}
8700   ins_pipe(pipe_slow);
8701 %}
8702 
8703 
8704 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8705   predicate(needs_acquiring_load_exclusive(n));
8706   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8707   ins_cost(VOLATILE_REF_COST);
8708   effect(TEMP_DEF res, KILL cr);
8709   format %{
8710     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8711   %}
8712   ins_encode %{
8713     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8714                Assembler::word, /*acquire*/ true, /*release*/ true,
8715                /*weak*/ false, $res$$Register);
8716   %}
8717   ins_pipe(pipe_slow);
8718 %}
8719 
8720 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8721   predicate(needs_acquiring_load_exclusive(n));
8722   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8723   ins_cost(VOLATILE_REF_COST);
8724   effect(TEMP_DEF res, KILL cr);
8725   format %{
8726     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8727   %}
8728   ins_encode %{
8729     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8730                Assembler::xword, /*acquire*/ true, /*release*/ true,
8731                /*weak*/ false, $res$$Register);
8732   %}
8733   ins_pipe(pipe_slow);
8734 %}
8735 
8736 
8737 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8738   predicate(needs_acquiring_load_exclusive(n));
8739   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8740   ins_cost(VOLATILE_REF_COST);
8741   effect(TEMP_DEF res, KILL cr);
8742   format %{
8743     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8744   %}
8745   ins_encode %{
8746     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8747                Assembler::word, /*acquire*/ true, /*release*/ true,
8748                /*weak*/ false, $res$$Register);
8749   %}
8750   ins_pipe(pipe_slow);
8751 %}
8752 
8753 instruct compareAndExchangeNAcq_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
8754   predicate(needs_acquiring_load_exclusive(n));
8755   match(Set res (ShenandoahCompareAndExchangeN mem (Binary oldval newval)));
8756   ins_cost(VOLATILE_REF_COST);
8757   effect(TEMP_DEF res, TEMP tmp, KILL cr);
8758   format %{
8759     "cmpxchgw_acq_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8760   %}
8761   ins_encode %{
8762     Register tmp = $tmp$$Register;
8763     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
8764 #if INCLUDE_SHENANDOAHGC
8765     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
8766                                                    /*acquire*/ true, /*release*/ true, /*weak*/ false, /* encode*/ false, noreg, noreg, rscratch2, $res$$Register);
8767 #else
8768     ShouldNotReachHere();
8769 #endif
8770   %}
8771   ins_pipe(pipe_slow);
8772 %}
8773 
8774 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8775   predicate(needs_acquiring_load_exclusive(n));
8776   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8777   ins_cost(VOLATILE_REF_COST);
8778   effect(TEMP_DEF res, KILL cr);
8779   format %{
8780     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8781   %}
8782   ins_encode %{
8783     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8784                Assembler::xword, /*acquire*/ true, /*release*/ true,
8785                /*weak*/ false, $res$$Register);
8786   %}
8787   ins_pipe(pipe_slow);
8788 %}
8789 
8790 instruct compareAndExchangePAcq_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
8791   predicate(needs_acquiring_load_exclusive(n));
8792   match(Set res (ShenandoahCompareAndExchangeP mem (Binary oldval newval)));
8793   ins_cost(VOLATILE_REF_COST);
8794   effect(TEMP_DEF res, TEMP tmp, KILL cr);
8795   format %{
8796     "cmpxchg_acq_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
8797   %}
8798   ins_encode %{
8799     Register tmp = $tmp$$Register;
8800     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
8801 #if INCLUDE_SHENANDOAHGC
8802     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
8803                                                    /*acquire*/ true, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg, rscratch2, $res$$Register);
8804 #else
8805     ShouldNotReachHere();
8806 #endif
8807   %}
8808   ins_pipe(pipe_slow);
8809 %}
8810 
8811 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8812   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8813   ins_cost(2 * VOLATILE_REF_COST);
8814   effect(KILL cr);
8815   format %{
8816     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8817     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8818   %}
8819   ins_encode %{
8820     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8821                Assembler::byte, /*acquire*/ false, /*release*/ true,
8822                /*weak*/ true, noreg);
8823     __ csetw($res$$Register, Assembler::EQ);
8824   %}
8825   ins_pipe(pipe_slow);
8826 %}
8827 
8828 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8829   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8830   ins_cost(2 * VOLATILE_REF_COST);
8831   effect(KILL cr);
8832   format %{
8833     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8834     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8835   %}
8836   ins_encode %{
8837     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8838                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8839                /*weak*/ true, noreg);
8840     __ csetw($res$$Register, Assembler::EQ);
8841   %}
8842   ins_pipe(pipe_slow);
8843 %}
8844 
8845 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8846   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8847   ins_cost(2 * VOLATILE_REF_COST);
8848   effect(KILL cr);
8849   format %{
8850     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8851     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8852   %}
8853   ins_encode %{
8854     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8855                Assembler::word, /*acquire*/ false, /*release*/ true,
8856                /*weak*/ true, noreg);
8857     __ csetw($res$$Register, Assembler::EQ);
8858   %}
8859   ins_pipe(pipe_slow);
8860 %}
8861 
8862 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8863   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8864   ins_cost(2 * VOLATILE_REF_COST);
8865   effect(KILL cr);
8866   format %{
8867     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8868     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8869   %}
8870   ins_encode %{
8871     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8872                Assembler::xword, /*acquire*/ false, /*release*/ true,
8873                /*weak*/ true, noreg);
8874     __ csetw($res$$Register, Assembler::EQ);
8875   %}
8876   ins_pipe(pipe_slow);
8877 %}
8878 
8879 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8880   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8881   ins_cost(2 * VOLATILE_REF_COST);
8882   effect(KILL cr);
8883   format %{
8884     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8885     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8886   %}
8887   ins_encode %{
8888     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8889                Assembler::word, /*acquire*/ false, /*release*/ true,
8890                /*weak*/ true, noreg);
8891     __ csetw($res$$Register, Assembler::EQ);
8892   %}
8893   ins_pipe(pipe_slow);
8894 %}
8895 
8896 instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
8897   match(Set res (ShenandoahWeakCompareAndSwapN mem (Binary oldval newval)));
8898   ins_cost(2 * VOLATILE_REF_COST);
8899   effect(TEMP tmp, KILL cr);
8900   format %{
8901     "cmpxchgw_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8902     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8903   %}
8904   ins_encode %{
8905     Register tmp = $tmp$$Register;
8906     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
8907 #if INCLUDE_SHENANDOAHGC
8908     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
8909                                                    /*acquire*/ false, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
8910 #else
8911     ShouldNotReachHere();
8912 #endif
8913     __ csetw($res$$Register, Assembler::EQ);
8914   %}
8915   ins_pipe(pipe_slow);
8916 %}
8917 
8918 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8919   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8920   ins_cost(2 * VOLATILE_REF_COST);
8921   effect(KILL cr);
8922   format %{
8923     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8924     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8925   %}
8926   ins_encode %{
8927     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8928                Assembler::xword, /*acquire*/ false, /*release*/ true,
8929                /*weak*/ true, noreg);
8930     __ csetw($res$$Register, Assembler::EQ);
8931   %}
8932   ins_pipe(pipe_slow);
8933 %}
8934 
8935 instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
8936   match(Set res (ShenandoahWeakCompareAndSwapP mem (Binary oldval newval)));
8937   ins_cost(2 * VOLATILE_REF_COST);
8938   effect(TEMP tmp, KILL cr);
8939   format %{
8940     "cmpxchg_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8941     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8942   %}
8943   ins_encode %{
8944     Register tmp = $tmp$$Register;
8945     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
8946 #if INCLUDE_SHENANDOAHGC
8947     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
8948                                                    /*acquire*/ false, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
8949 #else
8950     ShouldNotReachHere();
8951 #endif
8952     __ csetw($res$$Register, Assembler::EQ);
8953   %}
8954   ins_pipe(pipe_slow);
8955 %}
8956 
8957 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8958   predicate(needs_acquiring_load_exclusive(n));
8959   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8960   ins_cost(VOLATILE_REF_COST);
8961   effect(KILL cr);
8962   format %{
8963     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8964     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8965   %}
8966   ins_encode %{
8967     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8968                Assembler::byte, /*acquire*/ true, /*release*/ true,
8969                /*weak*/ true, noreg);
8970     __ csetw($res$$Register, Assembler::EQ);
8971   %}
8972   ins_pipe(pipe_slow);
8973 %}
8974 
8975 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8976   predicate(needs_acquiring_load_exclusive(n));
8977   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8978   ins_cost(VOLATILE_REF_COST);
8979   effect(KILL cr);
8980   format %{
8981     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8982     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8983   %}
8984   ins_encode %{
8985     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8986                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8987                /*weak*/ true, noreg);
8988     __ csetw($res$$Register, Assembler::EQ);
8989   %}
8990   ins_pipe(pipe_slow);
8991 %}
8992 
8993 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8994   predicate(needs_acquiring_load_exclusive(n));
8995   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8996   ins_cost(VOLATILE_REF_COST);
8997   effect(KILL cr);
8998   format %{
8999     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9000     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9001   %}
9002   ins_encode %{
9003     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9004                Assembler::word, /*acquire*/ true, /*release*/ true,
9005                /*weak*/ true, noreg);
9006     __ csetw($res$$Register, Assembler::EQ);
9007   %}
9008   ins_pipe(pipe_slow);
9009 %}
9010 
9011 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9012   predicate(needs_acquiring_load_exclusive(n));
9013   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9014   ins_cost(VOLATILE_REF_COST);
9015   effect(KILL cr);
9016   format %{
9017     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9018     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9019   %}
9020   ins_encode %{
9021     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9022                Assembler::xword, /*acquire*/ true, /*release*/ true,
9023                /*weak*/ true, noreg);
9024     __ csetw($res$$Register, Assembler::EQ);
9025   %}
9026   ins_pipe(pipe_slow);
9027 %}
9028 
9029 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9030   predicate(needs_acquiring_load_exclusive(n));
9031   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9032   ins_cost(VOLATILE_REF_COST);
9033   effect(KILL cr);
9034   format %{
9035     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9036     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9037   %}
9038   ins_encode %{
9039     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9040                Assembler::word, /*acquire*/ true, /*release*/ true,
9041                /*weak*/ true, noreg);
9042     __ csetw($res$$Register, Assembler::EQ);
9043   %}
9044   ins_pipe(pipe_slow);
9045 %}
9046 
9047 instruct weakCompareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
9048   predicate(needs_acquiring_load_exclusive(n));
9049   match(Set res (ShenandoahWeakCompareAndSwapN mem (Binary oldval newval)));
9050   ins_cost(VOLATILE_REF_COST);
9051   effect(TEMP tmp, KILL cr);
9052   format %{
9053     "cmpxchgw_acq_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9054     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9055   %}
9056   ins_encode %{
9057     Register tmp = $tmp$$Register;
9058     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
9059 #if INCLUDE_SHENANDOAHGC
9060     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
9061                                                    /*acquire*/ true, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
9062 #else
9063     ShouldNotReachHere();
9064 #endif
9065     __ csetw($res$$Register, Assembler::EQ);
9066   %}
9067   ins_pipe(pipe_slow);
9068 %}
9069 
9070 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9071   predicate(needs_acquiring_load_exclusive(n));
9072   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9073   ins_cost(VOLATILE_REF_COST);
9074   effect(KILL cr);
9075   format %{
9076     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9077     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9078   %}
9079   ins_encode %{
9080     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9081                Assembler::xword, /*acquire*/ true, /*release*/ true,
9082                /*weak*/ true, noreg);
9083     __ csetw($res$$Register, Assembler::EQ);
9084   %}
9085   ins_pipe(pipe_slow);
9086 %}
9087 
9088 instruct weakCompareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
9089   predicate(needs_acquiring_load_exclusive(n));
9090   match(Set res (ShenandoahWeakCompareAndSwapP mem (Binary oldval newval)));
9091   ins_cost(VOLATILE_REF_COST);
9092   effect(TEMP tmp, KILL cr);
9093   format %{
9094     "cmpxchg_acq_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9095     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9096   %}
9097   ins_encode %{
9098     Register tmp = $tmp$$Register;
9099     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
9100 #if INCLUDE_SHENANDOAHGC
9101     ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
9102                                                    /*acquire*/ true, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
9103 #else
9104     ShouldNotReachHere();
9105 #endif
9106     __ csetw($res$$Register, Assembler::EQ);
9107   %}
9108   ins_pipe(pipe_slow);
9109 %}
9110 
9111 // END This section of the file is automatically generated. Do not edit --------------
9112 // ---------------------------------------------------------------------
9113 
9114 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9115   match(Set prev (GetAndSetI mem newv));
9116   ins_cost(2 * VOLATILE_REF_COST);
9117   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9118   ins_encode %{
9119     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9120   %}
9121   ins_pipe(pipe_serial);
9122 %}
9123 
9124 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
9125   match(Set prev (GetAndSetL mem newv));
9126   ins_cost(2 * VOLATILE_REF_COST);
9127   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9128   ins_encode %{
9129     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9130   %}
9131   ins_pipe(pipe_serial);
9132 %}
9133 
9134 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
9135   match(Set prev (GetAndSetN mem newv));
9136   ins_cost(2 * VOLATILE_REF_COST);
9137   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9138   ins_encode %{
9139     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9140   %}
9141   ins_pipe(pipe_serial);
9142 %}
9143 
9144 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
9145   match(Set prev (GetAndSetP mem newv));
9146   ins_cost(2 * VOLATILE_REF_COST);
9147   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9148   ins_encode %{
9149     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9150   %}
9151   ins_pipe(pipe_serial);
9152 %}
9153 
9154 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
9155   predicate(needs_acquiring_load_exclusive(n));
9156   match(Set prev (GetAndSetI mem newv));
9157   ins_cost(VOLATILE_REF_COST);
9158   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
9159   ins_encode %{
9160     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9161   %}
9162   ins_pipe(pipe_serial);
9163 %}
9164 
9165 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
9166   predicate(needs_acquiring_load_exclusive(n));
9167   match(Set prev (GetAndSetL mem newv));
9168   ins_cost(VOLATILE_REF_COST);
9169   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9170   ins_encode %{
9171     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9172   %}
9173   ins_pipe(pipe_serial);
9174 %}
9175 
9176 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
9177   predicate(needs_acquiring_load_exclusive(n));
9178   match(Set prev (GetAndSetN mem newv));
9179   ins_cost(VOLATILE_REF_COST);
9180   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9181   ins_encode %{
9182     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9183   %}
9184   ins_pipe(pipe_serial);
9185 %}
9186 
9187 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
9188   predicate(needs_acquiring_load_exclusive(n));
9189   match(Set prev (GetAndSetP mem newv));
9190   ins_cost(VOLATILE_REF_COST);
9191   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
9192   ins_encode %{
9193     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9194   %}
9195   ins_pipe(pipe_serial);
9196 %}
9197 
9198 
9199 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9200   match(Set newval (GetAndAddL mem incr));
9201   ins_cost(2 * VOLATILE_REF_COST + 1);
9202   format %{ "get_and_addL $newval, [$mem], $incr" %}
9203   ins_encode %{
9204     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9205   %}
9206   ins_pipe(pipe_serial);
9207 %}
9208 
9209 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9210   predicate(n->as_LoadStore()->result_not_used());
9211   match(Set dummy (GetAndAddL mem incr));
9212   ins_cost(2 * VOLATILE_REF_COST);
9213   format %{ "get_and_addL [$mem], $incr" %}
9214   ins_encode %{
9215     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9216   %}
9217   ins_pipe(pipe_serial);
9218 %}
9219 
9220 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9221   match(Set newval (GetAndAddL mem incr));
9222   ins_cost(2 * VOLATILE_REF_COST + 1);
9223   format %{ "get_and_addL $newval, [$mem], $incr" %}
9224   ins_encode %{
9225     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9226   %}
9227   ins_pipe(pipe_serial);
9228 %}
9229 
9230 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9231   predicate(n->as_LoadStore()->result_not_used());
9232   match(Set dummy (GetAndAddL mem incr));
9233   ins_cost(2 * VOLATILE_REF_COST);
9234   format %{ "get_and_addL [$mem], $incr" %}
9235   ins_encode %{
9236     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9237   %}
9238   ins_pipe(pipe_serial);
9239 %}
9240 
9241 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9242   match(Set newval (GetAndAddI mem incr));
9243   ins_cost(2 * VOLATILE_REF_COST + 1);
9244   format %{ "get_and_addI $newval, [$mem], $incr" %}
9245   ins_encode %{
9246     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9247   %}
9248   ins_pipe(pipe_serial);
9249 %}
9250 
9251 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9252   predicate(n->as_LoadStore()->result_not_used());
9253   match(Set dummy (GetAndAddI mem incr));
9254   ins_cost(2 * VOLATILE_REF_COST);
9255   format %{ "get_and_addI [$mem], $incr" %}
9256   ins_encode %{
9257     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9258   %}
9259   ins_pipe(pipe_serial);
9260 %}
9261 
9262 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9263   match(Set newval (GetAndAddI mem incr));
9264   ins_cost(2 * VOLATILE_REF_COST + 1);
9265   format %{ "get_and_addI $newval, [$mem], $incr" %}
9266   ins_encode %{
9267     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9268   %}
9269   ins_pipe(pipe_serial);
9270 %}
9271 
9272 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9273   predicate(n->as_LoadStore()->result_not_used());
9274   match(Set dummy (GetAndAddI mem incr));
9275   ins_cost(2 * VOLATILE_REF_COST);
9276   format %{ "get_and_addI [$mem], $incr" %}
9277   ins_encode %{
9278     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9279   %}
9280   ins_pipe(pipe_serial);
9281 %}
9282 
9283 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
9284   predicate(needs_acquiring_load_exclusive(n));
9285   match(Set newval (GetAndAddL mem incr));
9286   ins_cost(VOLATILE_REF_COST + 1);
9287   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9288   ins_encode %{
9289     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
9290   %}
9291   ins_pipe(pipe_serial);
9292 %}
9293 
9294 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
9295   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9296   match(Set dummy (GetAndAddL mem incr));
9297   ins_cost(VOLATILE_REF_COST);
9298   format %{ "get_and_addL_acq [$mem], $incr" %}
9299   ins_encode %{
9300     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
9301   %}
9302   ins_pipe(pipe_serial);
9303 %}
9304 
9305 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9306   predicate(needs_acquiring_load_exclusive(n));
9307   match(Set newval (GetAndAddL mem incr));
9308   ins_cost(VOLATILE_REF_COST + 1);
9309   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9310   ins_encode %{
9311     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9312   %}
9313   ins_pipe(pipe_serial);
9314 %}
9315 
9316 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9317   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9318   match(Set dummy (GetAndAddL mem incr));
9319   ins_cost(VOLATILE_REF_COST);
9320   format %{ "get_and_addL_acq [$mem], $incr" %}
9321   ins_encode %{
9322     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9323   %}
9324   ins_pipe(pipe_serial);
9325 %}
9326 
9327 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9328   predicate(needs_acquiring_load_exclusive(n));
9329   match(Set newval (GetAndAddI mem incr));
9330   ins_cost(VOLATILE_REF_COST + 1);
9331   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9332   ins_encode %{
9333     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9334   %}
9335   ins_pipe(pipe_serial);
9336 %}
9337 
9338 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9339   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9340   match(Set dummy (GetAndAddI mem incr));
9341   ins_cost(VOLATILE_REF_COST);
9342   format %{ "get_and_addI_acq [$mem], $incr" %}
9343   ins_encode %{
9344     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9345   %}
9346   ins_pipe(pipe_serial);
9347 %}
9348 
9349 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9350   predicate(needs_acquiring_load_exclusive(n));
9351   match(Set newval (GetAndAddI mem incr));
9352   ins_cost(VOLATILE_REF_COST + 1);
9353   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9354   ins_encode %{
9355     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9356   %}
9357   ins_pipe(pipe_serial);
9358 %}
9359 
9360 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9361   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9362   match(Set dummy (GetAndAddI mem incr));
9363   ins_cost(VOLATILE_REF_COST);
9364   format %{ "get_and_addI_acq [$mem], $incr" %}
9365   ins_encode %{
9366     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9367   %}
9368   ins_pipe(pipe_serial);
9369 %}
9370 
9371 // Manifest a CmpL result in an integer register.
9372 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9373 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9374 %{
9375   match(Set dst (CmpL3 src1 src2));
9376   effect(KILL flags);
9377 
9378   ins_cost(INSN_COST * 6);
9379   format %{
9380       "cmp $src1, $src2"
9381       "csetw $dst, ne"
9382       "cnegw $dst, lt"
9383   %}
9384   // format %{ "CmpL3 $dst, $src1, $src2" %}
9385   ins_encode %{
9386     __ cmp($src1$$Register, $src2$$Register);
9387     __ csetw($dst$$Register, Assembler::NE);
9388     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9389   %}
9390 
9391   ins_pipe(pipe_class_default);
9392 %}
9393 
9394 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9395 %{
9396   match(Set dst (CmpL3 src1 src2));
9397   effect(KILL flags);
9398 
9399   ins_cost(INSN_COST * 6);
9400   format %{
9401       "cmp $src1, $src2"
9402       "csetw $dst, ne"
9403       "cnegw $dst, lt"
9404   %}
9405   ins_encode %{
9406     int32_t con = (int32_t)$src2$$constant;
9407      if (con < 0) {
9408       __ adds(zr, $src1$$Register, -con);
9409     } else {
9410       __ subs(zr, $src1$$Register, con);
9411     }
9412     __ csetw($dst$$Register, Assembler::NE);
9413     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9414   %}
9415 
9416   ins_pipe(pipe_class_default);
9417 %}
9418 
9419 // ============================================================================
9420 // Conditional Move Instructions
9421 
9422 // n.b. we have identical rules for both a signed compare op (cmpOp)
9423 // and an unsigned compare op (cmpOpU). it would be nice if we could
9424 // define an op class which merged both inputs and use it to type the
9425 // argument to a single rule. unfortunatelyt his fails because the
9426 // opclass does not live up to the COND_INTER interface of its
9427 // component operands. When the generic code tries to negate the
9428 // operand it ends up running the generci Machoper::negate method
9429 // which throws a ShouldNotHappen. So, we have to provide two flavours
9430 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9431 
9432 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9433   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9434 
9435   ins_cost(INSN_COST * 2);
9436   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9437 
9438   ins_encode %{
9439     __ cselw(as_Register($dst$$reg),
9440              as_Register($src2$$reg),
9441              as_Register($src1$$reg),
9442              (Assembler::Condition)$cmp$$cmpcode);
9443   %}
9444 
9445   ins_pipe(icond_reg_reg);
9446 %}
9447 
9448 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9449   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9450 
9451   ins_cost(INSN_COST * 2);
9452   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9453 
9454   ins_encode %{
9455     __ cselw(as_Register($dst$$reg),
9456              as_Register($src2$$reg),
9457              as_Register($src1$$reg),
9458              (Assembler::Condition)$cmp$$cmpcode);
9459   %}
9460 
9461   ins_pipe(icond_reg_reg);
9462 %}
9463 
9464 // special cases where one arg is zero
9465 
9466 // n.b. this is selected in preference to the rule above because it
9467 // avoids loading constant 0 into a source register
9468 
9469 // TODO
9470 // we ought only to be able to cull one of these variants as the ideal
9471 // transforms ought always to order the zero consistently (to left/right?)
9472 
9473 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9474   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9475 
9476   ins_cost(INSN_COST * 2);
9477   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9478 
9479   ins_encode %{
9480     __ cselw(as_Register($dst$$reg),
9481              as_Register($src$$reg),
9482              zr,
9483              (Assembler::Condition)$cmp$$cmpcode);
9484   %}
9485 
9486   ins_pipe(icond_reg);
9487 %}
9488 
9489 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9490   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9491 
9492   ins_cost(INSN_COST * 2);
9493   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9494 
9495   ins_encode %{
9496     __ cselw(as_Register($dst$$reg),
9497              as_Register($src$$reg),
9498              zr,
9499              (Assembler::Condition)$cmp$$cmpcode);
9500   %}
9501 
9502   ins_pipe(icond_reg);
9503 %}
9504 
9505 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9506   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9507 
9508   ins_cost(INSN_COST * 2);
9509   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9510 
9511   ins_encode %{
9512     __ cselw(as_Register($dst$$reg),
9513              zr,
9514              as_Register($src$$reg),
9515              (Assembler::Condition)$cmp$$cmpcode);
9516   %}
9517 
9518   ins_pipe(icond_reg);
9519 %}
9520 
9521 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9522   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9523 
9524   ins_cost(INSN_COST * 2);
9525   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9526 
9527   ins_encode %{
9528     __ cselw(as_Register($dst$$reg),
9529              zr,
9530              as_Register($src$$reg),
9531              (Assembler::Condition)$cmp$$cmpcode);
9532   %}
9533 
9534   ins_pipe(icond_reg);
9535 %}
9536 
9537 // special case for creating a boolean 0 or 1
9538 
9539 // n.b. this is selected in preference to the rule above because it
9540 // avoids loading constants 0 and 1 into a source register
9541 
9542 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9543   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9544 
9545   ins_cost(INSN_COST * 2);
9546   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9547 
9548   ins_encode %{
9549     // equivalently
9550     // cset(as_Register($dst$$reg),
9551     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9552     __ csincw(as_Register($dst$$reg),
9553              zr,
9554              zr,
9555              (Assembler::Condition)$cmp$$cmpcode);
9556   %}
9557 
9558   ins_pipe(icond_none);
9559 %}
9560 
9561 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9562   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9563 
9564   ins_cost(INSN_COST * 2);
9565   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9566 
9567   ins_encode %{
9568     // equivalently
9569     // cset(as_Register($dst$$reg),
9570     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9571     __ csincw(as_Register($dst$$reg),
9572              zr,
9573              zr,
9574              (Assembler::Condition)$cmp$$cmpcode);
9575   %}
9576 
9577   ins_pipe(icond_none);
9578 %}
9579 
9580 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9581   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9582 
9583   ins_cost(INSN_COST * 2);
9584   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9585 
9586   ins_encode %{
9587     __ csel(as_Register($dst$$reg),
9588             as_Register($src2$$reg),
9589             as_Register($src1$$reg),
9590             (Assembler::Condition)$cmp$$cmpcode);
9591   %}
9592 
9593   ins_pipe(icond_reg_reg);
9594 %}
9595 
9596 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9597   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9598 
9599   ins_cost(INSN_COST * 2);
9600   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9601 
9602   ins_encode %{
9603     __ csel(as_Register($dst$$reg),
9604             as_Register($src2$$reg),
9605             as_Register($src1$$reg),
9606             (Assembler::Condition)$cmp$$cmpcode);
9607   %}
9608 
9609   ins_pipe(icond_reg_reg);
9610 %}
9611 
9612 // special cases where one arg is zero
9613 
9614 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9615   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9616 
9617   ins_cost(INSN_COST * 2);
9618   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9619 
9620   ins_encode %{
9621     __ csel(as_Register($dst$$reg),
9622             zr,
9623             as_Register($src$$reg),
9624             (Assembler::Condition)$cmp$$cmpcode);
9625   %}
9626 
9627   ins_pipe(icond_reg);
9628 %}
9629 
9630 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9631   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9632 
9633   ins_cost(INSN_COST * 2);
9634   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9635 
9636   ins_encode %{
9637     __ csel(as_Register($dst$$reg),
9638             zr,
9639             as_Register($src$$reg),
9640             (Assembler::Condition)$cmp$$cmpcode);
9641   %}
9642 
9643   ins_pipe(icond_reg);
9644 %}
9645 
9646 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9647   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9648 
9649   ins_cost(INSN_COST * 2);
9650   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9651 
9652   ins_encode %{
9653     __ csel(as_Register($dst$$reg),
9654             as_Register($src$$reg),
9655             zr,
9656             (Assembler::Condition)$cmp$$cmpcode);
9657   %}
9658 
9659   ins_pipe(icond_reg);
9660 %}
9661 
9662 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9663   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9664 
9665   ins_cost(INSN_COST * 2);
9666   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9667 
9668   ins_encode %{
9669     __ csel(as_Register($dst$$reg),
9670             as_Register($src$$reg),
9671             zr,
9672             (Assembler::Condition)$cmp$$cmpcode);
9673   %}
9674 
9675   ins_pipe(icond_reg);
9676 %}
9677 
9678 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9679   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9680 
9681   ins_cost(INSN_COST * 2);
9682   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9683 
9684   ins_encode %{
9685     __ csel(as_Register($dst$$reg),
9686             as_Register($src2$$reg),
9687             as_Register($src1$$reg),
9688             (Assembler::Condition)$cmp$$cmpcode);
9689   %}
9690 
9691   ins_pipe(icond_reg_reg);
9692 %}
9693 
9694 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9695   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9696 
9697   ins_cost(INSN_COST * 2);
9698   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9699 
9700   ins_encode %{
9701     __ csel(as_Register($dst$$reg),
9702             as_Register($src2$$reg),
9703             as_Register($src1$$reg),
9704             (Assembler::Condition)$cmp$$cmpcode);
9705   %}
9706 
9707   ins_pipe(icond_reg_reg);
9708 %}
9709 
9710 // special cases where one arg is zero
9711 
9712 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9713   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9714 
9715   ins_cost(INSN_COST * 2);
9716   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9717 
9718   ins_encode %{
9719     __ csel(as_Register($dst$$reg),
9720             zr,
9721             as_Register($src$$reg),
9722             (Assembler::Condition)$cmp$$cmpcode);
9723   %}
9724 
9725   ins_pipe(icond_reg);
9726 %}
9727 
9728 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9729   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9730 
9731   ins_cost(INSN_COST * 2);
9732   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9733 
9734   ins_encode %{
9735     __ csel(as_Register($dst$$reg),
9736             zr,
9737             as_Register($src$$reg),
9738             (Assembler::Condition)$cmp$$cmpcode);
9739   %}
9740 
9741   ins_pipe(icond_reg);
9742 %}
9743 
9744 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9745   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9746 
9747   ins_cost(INSN_COST * 2);
9748   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9749 
9750   ins_encode %{
9751     __ csel(as_Register($dst$$reg),
9752             as_Register($src$$reg),
9753             zr,
9754             (Assembler::Condition)$cmp$$cmpcode);
9755   %}
9756 
9757   ins_pipe(icond_reg);
9758 %}
9759 
9760 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9761   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9762 
9763   ins_cost(INSN_COST * 2);
9764   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9765 
9766   ins_encode %{
9767     __ csel(as_Register($dst$$reg),
9768             as_Register($src$$reg),
9769             zr,
9770             (Assembler::Condition)$cmp$$cmpcode);
9771   %}
9772 
9773   ins_pipe(icond_reg);
9774 %}
9775 
9776 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9777   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9778 
9779   ins_cost(INSN_COST * 2);
9780   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9781 
9782   ins_encode %{
9783     __ cselw(as_Register($dst$$reg),
9784              as_Register($src2$$reg),
9785              as_Register($src1$$reg),
9786              (Assembler::Condition)$cmp$$cmpcode);
9787   %}
9788 
9789   ins_pipe(icond_reg_reg);
9790 %}
9791 
9792 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9793   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9794 
9795   ins_cost(INSN_COST * 2);
9796   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9797 
9798   ins_encode %{
9799     __ cselw(as_Register($dst$$reg),
9800              as_Register($src2$$reg),
9801              as_Register($src1$$reg),
9802              (Assembler::Condition)$cmp$$cmpcode);
9803   %}
9804 
9805   ins_pipe(icond_reg_reg);
9806 %}
9807 
9808 // special cases where one arg is zero
9809 
9810 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9811   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9812 
9813   ins_cost(INSN_COST * 2);
9814   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9815 
9816   ins_encode %{
9817     __ cselw(as_Register($dst$$reg),
9818              zr,
9819              as_Register($src$$reg),
9820              (Assembler::Condition)$cmp$$cmpcode);
9821   %}
9822 
9823   ins_pipe(icond_reg);
9824 %}
9825 
9826 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9827   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9828 
9829   ins_cost(INSN_COST * 2);
9830   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9831 
9832   ins_encode %{
9833     __ cselw(as_Register($dst$$reg),
9834              zr,
9835              as_Register($src$$reg),
9836              (Assembler::Condition)$cmp$$cmpcode);
9837   %}
9838 
9839   ins_pipe(icond_reg);
9840 %}
9841 
9842 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9843   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9844 
9845   ins_cost(INSN_COST * 2);
9846   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9847 
9848   ins_encode %{
9849     __ cselw(as_Register($dst$$reg),
9850              as_Register($src$$reg),
9851              zr,
9852              (Assembler::Condition)$cmp$$cmpcode);
9853   %}
9854 
9855   ins_pipe(icond_reg);
9856 %}
9857 
9858 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9859   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9860 
9861   ins_cost(INSN_COST * 2);
9862   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9863 
9864   ins_encode %{
9865     __ cselw(as_Register($dst$$reg),
9866              as_Register($src$$reg),
9867              zr,
9868              (Assembler::Condition)$cmp$$cmpcode);
9869   %}
9870 
9871   ins_pipe(icond_reg);
9872 %}
9873 
9874 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9875 %{
9876   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9877 
9878   ins_cost(INSN_COST * 3);
9879 
9880   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9881   ins_encode %{
9882     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9883     __ fcsels(as_FloatRegister($dst$$reg),
9884               as_FloatRegister($src2$$reg),
9885               as_FloatRegister($src1$$reg),
9886               cond);
9887   %}
9888 
9889   ins_pipe(fp_cond_reg_reg_s);
9890 %}
9891 
9892 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9893 %{
9894   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9895 
9896   ins_cost(INSN_COST * 3);
9897 
9898   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9899   ins_encode %{
9900     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9901     __ fcsels(as_FloatRegister($dst$$reg),
9902               as_FloatRegister($src2$$reg),
9903               as_FloatRegister($src1$$reg),
9904               cond);
9905   %}
9906 
9907   ins_pipe(fp_cond_reg_reg_s);
9908 %}
9909 
9910 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9911 %{
9912   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9913 
9914   ins_cost(INSN_COST * 3);
9915 
9916   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9917   ins_encode %{
9918     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9919     __ fcseld(as_FloatRegister($dst$$reg),
9920               as_FloatRegister($src2$$reg),
9921               as_FloatRegister($src1$$reg),
9922               cond);
9923   %}
9924 
9925   ins_pipe(fp_cond_reg_reg_d);
9926 %}
9927 
9928 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9929 %{
9930   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9931 
9932   ins_cost(INSN_COST * 3);
9933 
9934   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9935   ins_encode %{
9936     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9937     __ fcseld(as_FloatRegister($dst$$reg),
9938               as_FloatRegister($src2$$reg),
9939               as_FloatRegister($src1$$reg),
9940               cond);
9941   %}
9942 
9943   ins_pipe(fp_cond_reg_reg_d);
9944 %}
9945 
9946 // ============================================================================
9947 // Arithmetic Instructions
9948 //
9949 
9950 // Integer Addition
9951 
9952 // TODO
9953 // these currently employ operations which do not set CR and hence are
9954 // not flagged as killing CR but we would like to isolate the cases
9955 // where we want to set flags from those where we don't. need to work
9956 // out how to do that.
9957 
9958 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9959   match(Set dst (AddI src1 src2));
9960 
9961   ins_cost(INSN_COST);
9962   format %{ "addw  $dst, $src1, $src2" %}
9963 
9964   ins_encode %{
9965     __ addw(as_Register($dst$$reg),
9966             as_Register($src1$$reg),
9967             as_Register($src2$$reg));
9968   %}
9969 
9970   ins_pipe(ialu_reg_reg);
9971 %}
9972 
9973 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9974   match(Set dst (AddI src1 src2));
9975 
9976   ins_cost(INSN_COST);
9977   format %{ "addw $dst, $src1, $src2" %}
9978 
9979   // use opcode to indicate that this is an add not a sub
9980   opcode(0x0);
9981 
9982   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9983 
9984   ins_pipe(ialu_reg_imm);
9985 %}
9986 
9987 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9988   match(Set dst (AddI (ConvL2I src1) src2));
9989 
9990   ins_cost(INSN_COST);
9991   format %{ "addw $dst, $src1, $src2" %}
9992 
9993   // use opcode to indicate that this is an add not a sub
9994   opcode(0x0);
9995 
9996   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9997 
9998   ins_pipe(ialu_reg_imm);
9999 %}
10000 
10001 // Pointer Addition
10002 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10003   match(Set dst (AddP src1 src2));
10004 
10005   ins_cost(INSN_COST);
10006   format %{ "add $dst, $src1, $src2\t# ptr" %}
10007 
10008   ins_encode %{
10009     __ add(as_Register($dst$$reg),
10010            as_Register($src1$$reg),
10011            as_Register($src2$$reg));
10012   %}
10013 
10014   ins_pipe(ialu_reg_reg);
10015 %}
10016 
10017 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10018   match(Set dst (AddP src1 (ConvI2L src2)));
10019 
10020   ins_cost(1.9 * INSN_COST);
10021   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10022 
10023   ins_encode %{
10024     __ add(as_Register($dst$$reg),
10025            as_Register($src1$$reg),
10026            as_Register($src2$$reg), ext::sxtw);
10027   %}
10028 
10029   ins_pipe(ialu_reg_reg);
10030 %}
10031 
10032 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10033   match(Set dst (AddP src1 (LShiftL src2 scale)));
10034 
10035   ins_cost(1.9 * INSN_COST);
10036   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10037 
10038   ins_encode %{
10039     __ lea(as_Register($dst$$reg),
10040            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10041                    Address::lsl($scale$$constant)));
10042   %}
10043 
10044   ins_pipe(ialu_reg_reg_shift);
10045 %}
10046 
10047 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10048   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10049 
10050   ins_cost(1.9 * INSN_COST);
10051   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10052 
10053   ins_encode %{
10054     __ lea(as_Register($dst$$reg),
10055            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10056                    Address::sxtw($scale$$constant)));
10057   %}
10058 
10059   ins_pipe(ialu_reg_reg_shift);
10060 %}
10061 
10062 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10063   match(Set dst (LShiftL (ConvI2L src) scale));
10064 
10065   ins_cost(INSN_COST);
10066   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10067 
10068   ins_encode %{
10069     __ sbfiz(as_Register($dst$$reg),
10070           as_Register($src$$reg),
10071           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10072   %}
10073 
10074   ins_pipe(ialu_reg_shift);
10075 %}
10076 
10077 // Pointer Immediate Addition
10078 // n.b. this needs to be more expensive than using an indirect memory
10079 // operand
10080 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10081   match(Set dst (AddP src1 src2));
10082 
10083   ins_cost(INSN_COST);
10084   format %{ "add $dst, $src1, $src2\t# ptr" %}
10085 
10086   // use opcode to indicate that this is an add not a sub
10087   opcode(0x0);
10088 
10089   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10090 
10091   ins_pipe(ialu_reg_imm);
10092 %}
10093 
10094 // Long Addition
10095 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10096 
10097   match(Set dst (AddL src1 src2));
10098 
10099   ins_cost(INSN_COST);
10100   format %{ "add  $dst, $src1, $src2" %}
10101 
10102   ins_encode %{
10103     __ add(as_Register($dst$$reg),
10104            as_Register($src1$$reg),
10105            as_Register($src2$$reg));
10106   %}
10107 
10108   ins_pipe(ialu_reg_reg);
10109 %}
10110 
10111 // No constant pool entries requiredLong Immediate Addition.
10112 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10113   match(Set dst (AddL src1 src2));
10114 
10115   ins_cost(INSN_COST);
10116   format %{ "add $dst, $src1, $src2" %}
10117 
10118   // use opcode to indicate that this is an add not a sub
10119   opcode(0x0);
10120 
10121   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10122 
10123   ins_pipe(ialu_reg_imm);
10124 %}
10125 
10126 // Integer Subtraction
10127 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10128   match(Set dst (SubI src1 src2));
10129 
10130   ins_cost(INSN_COST);
10131   format %{ "subw  $dst, $src1, $src2" %}
10132 
10133   ins_encode %{
10134     __ subw(as_Register($dst$$reg),
10135             as_Register($src1$$reg),
10136             as_Register($src2$$reg));
10137   %}
10138 
10139   ins_pipe(ialu_reg_reg);
10140 %}
10141 
10142 // Immediate Subtraction
10143 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10144   match(Set dst (SubI src1 src2));
10145 
10146   ins_cost(INSN_COST);
10147   format %{ "subw $dst, $src1, $src2" %}
10148 
10149   // use opcode to indicate that this is a sub not an add
10150   opcode(0x1);
10151 
10152   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10153 
10154   ins_pipe(ialu_reg_imm);
10155 %}
10156 
10157 // Long Subtraction
10158 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10159 
10160   match(Set dst (SubL src1 src2));
10161 
10162   ins_cost(INSN_COST);
10163   format %{ "sub  $dst, $src1, $src2" %}
10164 
10165   ins_encode %{
10166     __ sub(as_Register($dst$$reg),
10167            as_Register($src1$$reg),
10168            as_Register($src2$$reg));
10169   %}
10170 
10171   ins_pipe(ialu_reg_reg);
10172 %}
10173 
10174 // No constant pool entries requiredLong Immediate Subtraction.
10175 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10176   match(Set dst (SubL src1 src2));
10177 
10178   ins_cost(INSN_COST);
10179   format %{ "sub$dst, $src1, $src2" %}
10180 
10181   // use opcode to indicate that this is a sub not an add
10182   opcode(0x1);
10183 
10184   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10185 
10186   ins_pipe(ialu_reg_imm);
10187 %}
10188 
10189 // Integer Negation (special case for sub)
10190 
10191 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10192   match(Set dst (SubI zero src));
10193 
10194   ins_cost(INSN_COST);
10195   format %{ "negw $dst, $src\t# int" %}
10196 
10197   ins_encode %{
10198     __ negw(as_Register($dst$$reg),
10199             as_Register($src$$reg));
10200   %}
10201 
10202   ins_pipe(ialu_reg);
10203 %}
10204 
10205 // Long Negation
10206 
10207 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10208   match(Set dst (SubL zero src));
10209 
10210   ins_cost(INSN_COST);
10211   format %{ "neg $dst, $src\t# long" %}
10212 
10213   ins_encode %{
10214     __ neg(as_Register($dst$$reg),
10215            as_Register($src$$reg));
10216   %}
10217 
10218   ins_pipe(ialu_reg);
10219 %}
10220 
10221 // Integer Multiply
10222 
10223 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10224   match(Set dst (MulI src1 src2));
10225 
10226   ins_cost(INSN_COST * 3);
10227   format %{ "mulw  $dst, $src1, $src2" %}
10228 
10229   ins_encode %{
10230     __ mulw(as_Register($dst$$reg),
10231             as_Register($src1$$reg),
10232             as_Register($src2$$reg));
10233   %}
10234 
10235   ins_pipe(imul_reg_reg);
10236 %}
10237 
10238 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10239   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10240 
10241   ins_cost(INSN_COST * 3);
10242   format %{ "smull  $dst, $src1, $src2" %}
10243 
10244   ins_encode %{
10245     __ smull(as_Register($dst$$reg),
10246              as_Register($src1$$reg),
10247              as_Register($src2$$reg));
10248   %}
10249 
10250   ins_pipe(imul_reg_reg);
10251 %}
10252 
10253 // Long Multiply
10254 
10255 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10256   match(Set dst (MulL src1 src2));
10257 
10258   ins_cost(INSN_COST * 5);
10259   format %{ "mul  $dst, $src1, $src2" %}
10260 
10261   ins_encode %{
10262     __ mul(as_Register($dst$$reg),
10263            as_Register($src1$$reg),
10264            as_Register($src2$$reg));
10265   %}
10266 
10267   ins_pipe(lmul_reg_reg);
10268 %}
10269 
10270 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10271 %{
10272   match(Set dst (MulHiL src1 src2));
10273 
10274   ins_cost(INSN_COST * 7);
10275   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10276 
10277   ins_encode %{
10278     __ smulh(as_Register($dst$$reg),
10279              as_Register($src1$$reg),
10280              as_Register($src2$$reg));
10281   %}
10282 
10283   ins_pipe(lmul_reg_reg);
10284 %}
10285 
10286 // Combined Integer Multiply & Add/Sub
10287 
10288 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10289   match(Set dst (AddI src3 (MulI src1 src2)));
10290 
10291   ins_cost(INSN_COST * 3);
10292   format %{ "madd  $dst, $src1, $src2, $src3" %}
10293 
10294   ins_encode %{
10295     __ maddw(as_Register($dst$$reg),
10296              as_Register($src1$$reg),
10297              as_Register($src2$$reg),
10298              as_Register($src3$$reg));
10299   %}
10300 
10301   ins_pipe(imac_reg_reg);
10302 %}
10303 
10304 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10305   match(Set dst (SubI src3 (MulI src1 src2)));
10306 
10307   ins_cost(INSN_COST * 3);
10308   format %{ "msub  $dst, $src1, $src2, $src3" %}
10309 
10310   ins_encode %{
10311     __ msubw(as_Register($dst$$reg),
10312              as_Register($src1$$reg),
10313              as_Register($src2$$reg),
10314              as_Register($src3$$reg));
10315   %}
10316 
10317   ins_pipe(imac_reg_reg);
10318 %}
10319 
10320 // Combined Integer Multiply & Neg
10321 
10322 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10323   match(Set dst (MulI (SubI zero src1) src2));
10324   match(Set dst (MulI src1 (SubI zero src2)));
10325 
10326   ins_cost(INSN_COST * 3);
10327   format %{ "mneg  $dst, $src1, $src2" %}
10328 
10329   ins_encode %{
10330     __ mnegw(as_Register($dst$$reg),
10331              as_Register($src1$$reg),
10332              as_Register($src2$$reg));
10333   %}
10334 
10335   ins_pipe(imac_reg_reg);
10336 %}
10337 
10338 // Combined Long Multiply & Add/Sub
10339 
10340 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10341   match(Set dst (AddL src3 (MulL src1 src2)));
10342 
10343   ins_cost(INSN_COST * 5);
10344   format %{ "madd  $dst, $src1, $src2, $src3" %}
10345 
10346   ins_encode %{
10347     __ madd(as_Register($dst$$reg),
10348             as_Register($src1$$reg),
10349             as_Register($src2$$reg),
10350             as_Register($src3$$reg));
10351   %}
10352 
10353   ins_pipe(lmac_reg_reg);
10354 %}
10355 
10356 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10357   match(Set dst (SubL src3 (MulL src1 src2)));
10358 
10359   ins_cost(INSN_COST * 5);
10360   format %{ "msub  $dst, $src1, $src2, $src3" %}
10361 
10362   ins_encode %{
10363     __ msub(as_Register($dst$$reg),
10364             as_Register($src1$$reg),
10365             as_Register($src2$$reg),
10366             as_Register($src3$$reg));
10367   %}
10368 
10369   ins_pipe(lmac_reg_reg);
10370 %}
10371 
10372 // Combined Long Multiply & Neg
10373 
10374 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10375   match(Set dst (MulL (SubL zero src1) src2));
10376   match(Set dst (MulL src1 (SubL zero src2)));
10377 
10378   ins_cost(INSN_COST * 5);
10379   format %{ "mneg  $dst, $src1, $src2" %}
10380 
10381   ins_encode %{
10382     __ mneg(as_Register($dst$$reg),
10383             as_Register($src1$$reg),
10384             as_Register($src2$$reg));
10385   %}
10386 
10387   ins_pipe(lmac_reg_reg);
10388 %}
10389 
10390 // Integer Divide
10391 
10392 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10393   match(Set dst (DivI src1 src2));
10394 
10395   ins_cost(INSN_COST * 19);
10396   format %{ "sdivw  $dst, $src1, $src2" %}
10397 
10398   ins_encode(aarch64_enc_divw(dst, src1, src2));
10399   ins_pipe(idiv_reg_reg);
10400 %}
10401 
10402 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10403   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10404   ins_cost(INSN_COST);
10405   format %{ "lsrw $dst, $src1, $div1" %}
10406   ins_encode %{
10407     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10408   %}
10409   ins_pipe(ialu_reg_shift);
10410 %}
10411 
10412 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10413   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10414   ins_cost(INSN_COST);
10415   format %{ "addw $dst, $src, LSR $div1" %}
10416 
10417   ins_encode %{
10418     __ addw(as_Register($dst$$reg),
10419               as_Register($src$$reg),
10420               as_Register($src$$reg),
10421               Assembler::LSR, 31);
10422   %}
10423   ins_pipe(ialu_reg);
10424 %}
10425 
10426 // Long Divide
10427 
10428 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10429   match(Set dst (DivL src1 src2));
10430 
10431   ins_cost(INSN_COST * 35);
10432   format %{ "sdiv   $dst, $src1, $src2" %}
10433 
10434   ins_encode(aarch64_enc_div(dst, src1, src2));
10435   ins_pipe(ldiv_reg_reg);
10436 %}
10437 
10438 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10439   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10440   ins_cost(INSN_COST);
10441   format %{ "lsr $dst, $src1, $div1" %}
10442   ins_encode %{
10443     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10444   %}
10445   ins_pipe(ialu_reg_shift);
10446 %}
10447 
10448 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10449   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10450   ins_cost(INSN_COST);
10451   format %{ "add $dst, $src, $div1" %}
10452 
10453   ins_encode %{
10454     __ add(as_Register($dst$$reg),
10455               as_Register($src$$reg),
10456               as_Register($src$$reg),
10457               Assembler::LSR, 63);
10458   %}
10459   ins_pipe(ialu_reg);
10460 %}
10461 
10462 // Integer Remainder
10463 
10464 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10465   match(Set dst (ModI src1 src2));
10466 
10467   ins_cost(INSN_COST * 22);
10468   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10469             "msubw($dst, rscratch1, $src2, $src1" %}
10470 
10471   ins_encode(aarch64_enc_modw(dst, src1, src2));
10472   ins_pipe(idiv_reg_reg);
10473 %}
10474 
10475 // Long Remainder
10476 
10477 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10478   match(Set dst (ModL src1 src2));
10479 
10480   ins_cost(INSN_COST * 38);
10481   format %{ "sdiv   rscratch1, $src1, $src2\n"
10482             "msub($dst, rscratch1, $src2, $src1" %}
10483 
10484   ins_encode(aarch64_enc_mod(dst, src1, src2));
10485   ins_pipe(ldiv_reg_reg);
10486 %}
10487 
10488 // Integer Shifts
10489 
10490 // Shift Left Register
10491 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10492   match(Set dst (LShiftI src1 src2));
10493 
10494   ins_cost(INSN_COST * 2);
10495   format %{ "lslvw  $dst, $src1, $src2" %}
10496 
10497   ins_encode %{
10498     __ lslvw(as_Register($dst$$reg),
10499              as_Register($src1$$reg),
10500              as_Register($src2$$reg));
10501   %}
10502 
10503   ins_pipe(ialu_reg_reg_vshift);
10504 %}
10505 
10506 // Shift Left Immediate
10507 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10508   match(Set dst (LShiftI src1 src2));
10509 
10510   ins_cost(INSN_COST);
10511   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10512 
10513   ins_encode %{
10514     __ lslw(as_Register($dst$$reg),
10515             as_Register($src1$$reg),
10516             $src2$$constant & 0x1f);
10517   %}
10518 
10519   ins_pipe(ialu_reg_shift);
10520 %}
10521 
10522 // Shift Right Logical Register
10523 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10524   match(Set dst (URShiftI src1 src2));
10525 
10526   ins_cost(INSN_COST * 2);
10527   format %{ "lsrvw  $dst, $src1, $src2" %}
10528 
10529   ins_encode %{
10530     __ lsrvw(as_Register($dst$$reg),
10531              as_Register($src1$$reg),
10532              as_Register($src2$$reg));
10533   %}
10534 
10535   ins_pipe(ialu_reg_reg_vshift);
10536 %}
10537 
10538 // Shift Right Logical Immediate
10539 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10540   match(Set dst (URShiftI src1 src2));
10541 
10542   ins_cost(INSN_COST);
10543   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10544 
10545   ins_encode %{
10546     __ lsrw(as_Register($dst$$reg),
10547             as_Register($src1$$reg),
10548             $src2$$constant & 0x1f);
10549   %}
10550 
10551   ins_pipe(ialu_reg_shift);
10552 %}
10553 
10554 // Shift Right Arithmetic Register
10555 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10556   match(Set dst (RShiftI src1 src2));
10557 
10558   ins_cost(INSN_COST * 2);
10559   format %{ "asrvw  $dst, $src1, $src2" %}
10560 
10561   ins_encode %{
10562     __ asrvw(as_Register($dst$$reg),
10563              as_Register($src1$$reg),
10564              as_Register($src2$$reg));
10565   %}
10566 
10567   ins_pipe(ialu_reg_reg_vshift);
10568 %}
10569 
10570 // Shift Right Arithmetic Immediate
10571 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10572   match(Set dst (RShiftI src1 src2));
10573 
10574   ins_cost(INSN_COST);
10575   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10576 
10577   ins_encode %{
10578     __ asrw(as_Register($dst$$reg),
10579             as_Register($src1$$reg),
10580             $src2$$constant & 0x1f);
10581   %}
10582 
10583   ins_pipe(ialu_reg_shift);
10584 %}
10585 
10586 // Combined Int Mask and Right Shift (using UBFM)
10587 // TODO
10588 
10589 // Long Shifts
10590 
10591 // Shift Left Register
10592 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10593   match(Set dst (LShiftL src1 src2));
10594 
10595   ins_cost(INSN_COST * 2);
10596   format %{ "lslv  $dst, $src1, $src2" %}
10597 
10598   ins_encode %{
10599     __ lslv(as_Register($dst$$reg),
10600             as_Register($src1$$reg),
10601             as_Register($src2$$reg));
10602   %}
10603 
10604   ins_pipe(ialu_reg_reg_vshift);
10605 %}
10606 
10607 // Shift Left Immediate
10608 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10609   match(Set dst (LShiftL src1 src2));
10610 
10611   ins_cost(INSN_COST);
10612   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10613 
10614   ins_encode %{
10615     __ lsl(as_Register($dst$$reg),
10616             as_Register($src1$$reg),
10617             $src2$$constant & 0x3f);
10618   %}
10619 
10620   ins_pipe(ialu_reg_shift);
10621 %}
10622 
10623 // Shift Right Logical Register
10624 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10625   match(Set dst (URShiftL src1 src2));
10626 
10627   ins_cost(INSN_COST * 2);
10628   format %{ "lsrv  $dst, $src1, $src2" %}
10629 
10630   ins_encode %{
10631     __ lsrv(as_Register($dst$$reg),
10632             as_Register($src1$$reg),
10633             as_Register($src2$$reg));
10634   %}
10635 
10636   ins_pipe(ialu_reg_reg_vshift);
10637 %}
10638 
10639 // Shift Right Logical Immediate
10640 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10641   match(Set dst (URShiftL src1 src2));
10642 
10643   ins_cost(INSN_COST);
10644   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10645 
10646   ins_encode %{
10647     __ lsr(as_Register($dst$$reg),
10648            as_Register($src1$$reg),
10649            $src2$$constant & 0x3f);
10650   %}
10651 
10652   ins_pipe(ialu_reg_shift);
10653 %}
10654 
10655 // A special-case pattern for card table stores.
10656 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10657   match(Set dst (URShiftL (CastP2X src1) src2));
10658 
10659   ins_cost(INSN_COST);
10660   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10661 
10662   ins_encode %{
10663     __ lsr(as_Register($dst$$reg),
10664            as_Register($src1$$reg),
10665            $src2$$constant & 0x3f);
10666   %}
10667 
10668   ins_pipe(ialu_reg_shift);
10669 %}
10670 
10671 // Shift Right Arithmetic Register
10672 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10673   match(Set dst (RShiftL src1 src2));
10674 
10675   ins_cost(INSN_COST * 2);
10676   format %{ "asrv  $dst, $src1, $src2" %}
10677 
10678   ins_encode %{
10679     __ asrv(as_Register($dst$$reg),
10680             as_Register($src1$$reg),
10681             as_Register($src2$$reg));
10682   %}
10683 
10684   ins_pipe(ialu_reg_reg_vshift);
10685 %}
10686 
10687 // Shift Right Arithmetic Immediate
10688 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10689   match(Set dst (RShiftL src1 src2));
10690 
10691   ins_cost(INSN_COST);
10692   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10693 
10694   ins_encode %{
10695     __ asr(as_Register($dst$$reg),
10696            as_Register($src1$$reg),
10697            $src2$$constant & 0x3f);
10698   %}
10699 
10700   ins_pipe(ialu_reg_shift);
10701 %}
10702 
10703 // BEGIN This section of the file is automatically generated. Do not edit --------------
10704 
10705 instruct regL_not_reg(iRegLNoSp dst,
10706                          iRegL src1, immL_M1 m1,
10707                          rFlagsReg cr) %{
10708   match(Set dst (XorL src1 m1));
10709   ins_cost(INSN_COST);
10710   format %{ "eon  $dst, $src1, zr" %}
10711 
10712   ins_encode %{
10713     __ eon(as_Register($dst$$reg),
10714               as_Register($src1$$reg),
10715               zr,
10716               Assembler::LSL, 0);
10717   %}
10718 
10719   ins_pipe(ialu_reg);
10720 %}
10721 instruct regI_not_reg(iRegINoSp dst,
10722                          iRegIorL2I src1, immI_M1 m1,
10723                          rFlagsReg cr) %{
10724   match(Set dst (XorI src1 m1));
10725   ins_cost(INSN_COST);
10726   format %{ "eonw  $dst, $src1, zr" %}
10727 
10728   ins_encode %{
10729     __ eonw(as_Register($dst$$reg),
10730               as_Register($src1$$reg),
10731               zr,
10732               Assembler::LSL, 0);
10733   %}
10734 
10735   ins_pipe(ialu_reg);
10736 %}
10737 
10738 instruct AndI_reg_not_reg(iRegINoSp dst,
10739                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10740                          rFlagsReg cr) %{
10741   match(Set dst (AndI src1 (XorI src2 m1)));
10742   ins_cost(INSN_COST);
10743   format %{ "bicw  $dst, $src1, $src2" %}
10744 
10745   ins_encode %{
10746     __ bicw(as_Register($dst$$reg),
10747               as_Register($src1$$reg),
10748               as_Register($src2$$reg),
10749               Assembler::LSL, 0);
10750   %}
10751 
10752   ins_pipe(ialu_reg_reg);
10753 %}
10754 
10755 instruct AndL_reg_not_reg(iRegLNoSp dst,
10756                          iRegL src1, iRegL src2, immL_M1 m1,
10757                          rFlagsReg cr) %{
10758   match(Set dst (AndL src1 (XorL src2 m1)));
10759   ins_cost(INSN_COST);
10760   format %{ "bic  $dst, $src1, $src2" %}
10761 
10762   ins_encode %{
10763     __ bic(as_Register($dst$$reg),
10764               as_Register($src1$$reg),
10765               as_Register($src2$$reg),
10766               Assembler::LSL, 0);
10767   %}
10768 
10769   ins_pipe(ialu_reg_reg);
10770 %}
10771 
10772 instruct OrI_reg_not_reg(iRegINoSp dst,
10773                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10774                          rFlagsReg cr) %{
10775   match(Set dst (OrI src1 (XorI src2 m1)));
10776   ins_cost(INSN_COST);
10777   format %{ "ornw  $dst, $src1, $src2" %}
10778 
10779   ins_encode %{
10780     __ ornw(as_Register($dst$$reg),
10781               as_Register($src1$$reg),
10782               as_Register($src2$$reg),
10783               Assembler::LSL, 0);
10784   %}
10785 
10786   ins_pipe(ialu_reg_reg);
10787 %}
10788 
10789 instruct OrL_reg_not_reg(iRegLNoSp dst,
10790                          iRegL src1, iRegL src2, immL_M1 m1,
10791                          rFlagsReg cr) %{
10792   match(Set dst (OrL src1 (XorL src2 m1)));
10793   ins_cost(INSN_COST);
10794   format %{ "orn  $dst, $src1, $src2" %}
10795 
10796   ins_encode %{
10797     __ orn(as_Register($dst$$reg),
10798               as_Register($src1$$reg),
10799               as_Register($src2$$reg),
10800               Assembler::LSL, 0);
10801   %}
10802 
10803   ins_pipe(ialu_reg_reg);
10804 %}
10805 
10806 instruct XorI_reg_not_reg(iRegINoSp dst,
10807                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10808                          rFlagsReg cr) %{
10809   match(Set dst (XorI m1 (XorI src2 src1)));
10810   ins_cost(INSN_COST);
10811   format %{ "eonw  $dst, $src1, $src2" %}
10812 
10813   ins_encode %{
10814     __ eonw(as_Register($dst$$reg),
10815               as_Register($src1$$reg),
10816               as_Register($src2$$reg),
10817               Assembler::LSL, 0);
10818   %}
10819 
10820   ins_pipe(ialu_reg_reg);
10821 %}
10822 
10823 instruct XorL_reg_not_reg(iRegLNoSp dst,
10824                          iRegL src1, iRegL src2, immL_M1 m1,
10825                          rFlagsReg cr) %{
10826   match(Set dst (XorL m1 (XorL src2 src1)));
10827   ins_cost(INSN_COST);
10828   format %{ "eon  $dst, $src1, $src2" %}
10829 
10830   ins_encode %{
10831     __ eon(as_Register($dst$$reg),
10832               as_Register($src1$$reg),
10833               as_Register($src2$$reg),
10834               Assembler::LSL, 0);
10835   %}
10836 
10837   ins_pipe(ialu_reg_reg);
10838 %}
10839 
10840 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10841                          iRegIorL2I src1, iRegIorL2I src2,
10842                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10843   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10844   ins_cost(1.9 * INSN_COST);
10845   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10846 
10847   ins_encode %{
10848     __ bicw(as_Register($dst$$reg),
10849               as_Register($src1$$reg),
10850               as_Register($src2$$reg),
10851               Assembler::LSR,
10852               $src3$$constant & 0x1f);
10853   %}
10854 
10855   ins_pipe(ialu_reg_reg_shift);
10856 %}
10857 
10858 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10859                          iRegL src1, iRegL src2,
10860                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10861   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10862   ins_cost(1.9 * INSN_COST);
10863   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10864 
10865   ins_encode %{
10866     __ bic(as_Register($dst$$reg),
10867               as_Register($src1$$reg),
10868               as_Register($src2$$reg),
10869               Assembler::LSR,
10870               $src3$$constant & 0x3f);
10871   %}
10872 
10873   ins_pipe(ialu_reg_reg_shift);
10874 %}
10875 
10876 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10877                          iRegIorL2I src1, iRegIorL2I src2,
10878                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10879   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10880   ins_cost(1.9 * INSN_COST);
10881   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10882 
10883   ins_encode %{
10884     __ bicw(as_Register($dst$$reg),
10885               as_Register($src1$$reg),
10886               as_Register($src2$$reg),
10887               Assembler::ASR,
10888               $src3$$constant & 0x1f);
10889   %}
10890 
10891   ins_pipe(ialu_reg_reg_shift);
10892 %}
10893 
10894 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10895                          iRegL src1, iRegL src2,
10896                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10897   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10898   ins_cost(1.9 * INSN_COST);
10899   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10900 
10901   ins_encode %{
10902     __ bic(as_Register($dst$$reg),
10903               as_Register($src1$$reg),
10904               as_Register($src2$$reg),
10905               Assembler::ASR,
10906               $src3$$constant & 0x3f);
10907   %}
10908 
10909   ins_pipe(ialu_reg_reg_shift);
10910 %}
10911 
10912 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10913                          iRegIorL2I src1, iRegIorL2I src2,
10914                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10915   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10916   ins_cost(1.9 * INSN_COST);
10917   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10918 
10919   ins_encode %{
10920     __ bicw(as_Register($dst$$reg),
10921               as_Register($src1$$reg),
10922               as_Register($src2$$reg),
10923               Assembler::LSL,
10924               $src3$$constant & 0x1f);
10925   %}
10926 
10927   ins_pipe(ialu_reg_reg_shift);
10928 %}
10929 
10930 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10931                          iRegL src1, iRegL src2,
10932                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10933   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10934   ins_cost(1.9 * INSN_COST);
10935   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10936 
10937   ins_encode %{
10938     __ bic(as_Register($dst$$reg),
10939               as_Register($src1$$reg),
10940               as_Register($src2$$reg),
10941               Assembler::LSL,
10942               $src3$$constant & 0x3f);
10943   %}
10944 
10945   ins_pipe(ialu_reg_reg_shift);
10946 %}
10947 
10948 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10949                          iRegIorL2I src1, iRegIorL2I src2,
10950                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10951   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10952   ins_cost(1.9 * INSN_COST);
10953   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10954 
10955   ins_encode %{
10956     __ eonw(as_Register($dst$$reg),
10957               as_Register($src1$$reg),
10958               as_Register($src2$$reg),
10959               Assembler::LSR,
10960               $src3$$constant & 0x1f);
10961   %}
10962 
10963   ins_pipe(ialu_reg_reg_shift);
10964 %}
10965 
10966 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10967                          iRegL src1, iRegL src2,
10968                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10969   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10970   ins_cost(1.9 * INSN_COST);
10971   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10972 
10973   ins_encode %{
10974     __ eon(as_Register($dst$$reg),
10975               as_Register($src1$$reg),
10976               as_Register($src2$$reg),
10977               Assembler::LSR,
10978               $src3$$constant & 0x3f);
10979   %}
10980 
10981   ins_pipe(ialu_reg_reg_shift);
10982 %}
10983 
10984 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10985                          iRegIorL2I src1, iRegIorL2I src2,
10986                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10987   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10988   ins_cost(1.9 * INSN_COST);
10989   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10990 
10991   ins_encode %{
10992     __ eonw(as_Register($dst$$reg),
10993               as_Register($src1$$reg),
10994               as_Register($src2$$reg),
10995               Assembler::ASR,
10996               $src3$$constant & 0x1f);
10997   %}
10998 
10999   ins_pipe(ialu_reg_reg_shift);
11000 %}
11001 
11002 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11003                          iRegL src1, iRegL src2,
11004                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11005   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11006   ins_cost(1.9 * INSN_COST);
11007   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11008 
11009   ins_encode %{
11010     __ eon(as_Register($dst$$reg),
11011               as_Register($src1$$reg),
11012               as_Register($src2$$reg),
11013               Assembler::ASR,
11014               $src3$$constant & 0x3f);
11015   %}
11016 
11017   ins_pipe(ialu_reg_reg_shift);
11018 %}
11019 
11020 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11021                          iRegIorL2I src1, iRegIorL2I src2,
11022                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11023   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11024   ins_cost(1.9 * INSN_COST);
11025   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11026 
11027   ins_encode %{
11028     __ eonw(as_Register($dst$$reg),
11029               as_Register($src1$$reg),
11030               as_Register($src2$$reg),
11031               Assembler::LSL,
11032               $src3$$constant & 0x1f);
11033   %}
11034 
11035   ins_pipe(ialu_reg_reg_shift);
11036 %}
11037 
11038 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11039                          iRegL src1, iRegL src2,
11040                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11041   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11042   ins_cost(1.9 * INSN_COST);
11043   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11044 
11045   ins_encode %{
11046     __ eon(as_Register($dst$$reg),
11047               as_Register($src1$$reg),
11048               as_Register($src2$$reg),
11049               Assembler::LSL,
11050               $src3$$constant & 0x3f);
11051   %}
11052 
11053   ins_pipe(ialu_reg_reg_shift);
11054 %}
11055 
11056 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11057                          iRegIorL2I src1, iRegIorL2I src2,
11058                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11059   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11060   ins_cost(1.9 * INSN_COST);
11061   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11062 
11063   ins_encode %{
11064     __ ornw(as_Register($dst$$reg),
11065               as_Register($src1$$reg),
11066               as_Register($src2$$reg),
11067               Assembler::LSR,
11068               $src3$$constant & 0x1f);
11069   %}
11070 
11071   ins_pipe(ialu_reg_reg_shift);
11072 %}
11073 
11074 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11075                          iRegL src1, iRegL src2,
11076                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11077   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11078   ins_cost(1.9 * INSN_COST);
11079   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11080 
11081   ins_encode %{
11082     __ orn(as_Register($dst$$reg),
11083               as_Register($src1$$reg),
11084               as_Register($src2$$reg),
11085               Assembler::LSR,
11086               $src3$$constant & 0x3f);
11087   %}
11088 
11089   ins_pipe(ialu_reg_reg_shift);
11090 %}
11091 
11092 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11093                          iRegIorL2I src1, iRegIorL2I src2,
11094                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11095   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11096   ins_cost(1.9 * INSN_COST);
11097   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11098 
11099   ins_encode %{
11100     __ ornw(as_Register($dst$$reg),
11101               as_Register($src1$$reg),
11102               as_Register($src2$$reg),
11103               Assembler::ASR,
11104               $src3$$constant & 0x1f);
11105   %}
11106 
11107   ins_pipe(ialu_reg_reg_shift);
11108 %}
11109 
11110 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11111                          iRegL src1, iRegL src2,
11112                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11113   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11114   ins_cost(1.9 * INSN_COST);
11115   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11116 
11117   ins_encode %{
11118     __ orn(as_Register($dst$$reg),
11119               as_Register($src1$$reg),
11120               as_Register($src2$$reg),
11121               Assembler::ASR,
11122               $src3$$constant & 0x3f);
11123   %}
11124 
11125   ins_pipe(ialu_reg_reg_shift);
11126 %}
11127 
11128 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11129                          iRegIorL2I src1, iRegIorL2I src2,
11130                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11131   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11132   ins_cost(1.9 * INSN_COST);
11133   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11134 
11135   ins_encode %{
11136     __ ornw(as_Register($dst$$reg),
11137               as_Register($src1$$reg),
11138               as_Register($src2$$reg),
11139               Assembler::LSL,
11140               $src3$$constant & 0x1f);
11141   %}
11142 
11143   ins_pipe(ialu_reg_reg_shift);
11144 %}
11145 
11146 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11147                          iRegL src1, iRegL src2,
11148                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11149   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11150   ins_cost(1.9 * INSN_COST);
11151   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11152 
11153   ins_encode %{
11154     __ orn(as_Register($dst$$reg),
11155               as_Register($src1$$reg),
11156               as_Register($src2$$reg),
11157               Assembler::LSL,
11158               $src3$$constant & 0x3f);
11159   %}
11160 
11161   ins_pipe(ialu_reg_reg_shift);
11162 %}
11163 
11164 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11165                          iRegIorL2I src1, iRegIorL2I src2,
11166                          immI src3, rFlagsReg cr) %{
11167   match(Set dst (AndI src1 (URShiftI src2 src3)));
11168 
11169   ins_cost(1.9 * INSN_COST);
11170   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11171 
11172   ins_encode %{
11173     __ andw(as_Register($dst$$reg),
11174               as_Register($src1$$reg),
11175               as_Register($src2$$reg),
11176               Assembler::LSR,
11177               $src3$$constant & 0x1f);
11178   %}
11179 
11180   ins_pipe(ialu_reg_reg_shift);
11181 %}
11182 
11183 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11184                          iRegL src1, iRegL src2,
11185                          immI src3, rFlagsReg cr) %{
11186   match(Set dst (AndL src1 (URShiftL src2 src3)));
11187 
11188   ins_cost(1.9 * INSN_COST);
11189   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11190 
11191   ins_encode %{
11192     __ andr(as_Register($dst$$reg),
11193               as_Register($src1$$reg),
11194               as_Register($src2$$reg),
11195               Assembler::LSR,
11196               $src3$$constant & 0x3f);
11197   %}
11198 
11199   ins_pipe(ialu_reg_reg_shift);
11200 %}
11201 
11202 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11203                          iRegIorL2I src1, iRegIorL2I src2,
11204                          immI src3, rFlagsReg cr) %{
11205   match(Set dst (AndI src1 (RShiftI src2 src3)));
11206 
11207   ins_cost(1.9 * INSN_COST);
11208   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11209 
11210   ins_encode %{
11211     __ andw(as_Register($dst$$reg),
11212               as_Register($src1$$reg),
11213               as_Register($src2$$reg),
11214               Assembler::ASR,
11215               $src3$$constant & 0x1f);
11216   %}
11217 
11218   ins_pipe(ialu_reg_reg_shift);
11219 %}
11220 
11221 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11222                          iRegL src1, iRegL src2,
11223                          immI src3, rFlagsReg cr) %{
11224   match(Set dst (AndL src1 (RShiftL src2 src3)));
11225 
11226   ins_cost(1.9 * INSN_COST);
11227   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11228 
11229   ins_encode %{
11230     __ andr(as_Register($dst$$reg),
11231               as_Register($src1$$reg),
11232               as_Register($src2$$reg),
11233               Assembler::ASR,
11234               $src3$$constant & 0x3f);
11235   %}
11236 
11237   ins_pipe(ialu_reg_reg_shift);
11238 %}
11239 
11240 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11241                          iRegIorL2I src1, iRegIorL2I src2,
11242                          immI src3, rFlagsReg cr) %{
11243   match(Set dst (AndI src1 (LShiftI src2 src3)));
11244 
11245   ins_cost(1.9 * INSN_COST);
11246   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11247 
11248   ins_encode %{
11249     __ andw(as_Register($dst$$reg),
11250               as_Register($src1$$reg),
11251               as_Register($src2$$reg),
11252               Assembler::LSL,
11253               $src3$$constant & 0x1f);
11254   %}
11255 
11256   ins_pipe(ialu_reg_reg_shift);
11257 %}
11258 
11259 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11260                          iRegL src1, iRegL src2,
11261                          immI src3, rFlagsReg cr) %{
11262   match(Set dst (AndL src1 (LShiftL src2 src3)));
11263 
11264   ins_cost(1.9 * INSN_COST);
11265   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11266 
11267   ins_encode %{
11268     __ andr(as_Register($dst$$reg),
11269               as_Register($src1$$reg),
11270               as_Register($src2$$reg),
11271               Assembler::LSL,
11272               $src3$$constant & 0x3f);
11273   %}
11274 
11275   ins_pipe(ialu_reg_reg_shift);
11276 %}
11277 
11278 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11279                          iRegIorL2I src1, iRegIorL2I src2,
11280                          immI src3, rFlagsReg cr) %{
11281   match(Set dst (XorI src1 (URShiftI src2 src3)));
11282 
11283   ins_cost(1.9 * INSN_COST);
11284   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11285 
11286   ins_encode %{
11287     __ eorw(as_Register($dst$$reg),
11288               as_Register($src1$$reg),
11289               as_Register($src2$$reg),
11290               Assembler::LSR,
11291               $src3$$constant & 0x1f);
11292   %}
11293 
11294   ins_pipe(ialu_reg_reg_shift);
11295 %}
11296 
11297 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11298                          iRegL src1, iRegL src2,
11299                          immI src3, rFlagsReg cr) %{
11300   match(Set dst (XorL src1 (URShiftL src2 src3)));
11301 
11302   ins_cost(1.9 * INSN_COST);
11303   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11304 
11305   ins_encode %{
11306     __ eor(as_Register($dst$$reg),
11307               as_Register($src1$$reg),
11308               as_Register($src2$$reg),
11309               Assembler::LSR,
11310               $src3$$constant & 0x3f);
11311   %}
11312 
11313   ins_pipe(ialu_reg_reg_shift);
11314 %}
11315 
11316 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11317                          iRegIorL2I src1, iRegIorL2I src2,
11318                          immI src3, rFlagsReg cr) %{
11319   match(Set dst (XorI src1 (RShiftI src2 src3)));
11320 
11321   ins_cost(1.9 * INSN_COST);
11322   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11323 
11324   ins_encode %{
11325     __ eorw(as_Register($dst$$reg),
11326               as_Register($src1$$reg),
11327               as_Register($src2$$reg),
11328               Assembler::ASR,
11329               $src3$$constant & 0x1f);
11330   %}
11331 
11332   ins_pipe(ialu_reg_reg_shift);
11333 %}
11334 
11335 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11336                          iRegL src1, iRegL src2,
11337                          immI src3, rFlagsReg cr) %{
11338   match(Set dst (XorL src1 (RShiftL src2 src3)));
11339 
11340   ins_cost(1.9 * INSN_COST);
11341   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11342 
11343   ins_encode %{
11344     __ eor(as_Register($dst$$reg),
11345               as_Register($src1$$reg),
11346               as_Register($src2$$reg),
11347               Assembler::ASR,
11348               $src3$$constant & 0x3f);
11349   %}
11350 
11351   ins_pipe(ialu_reg_reg_shift);
11352 %}
11353 
11354 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11355                          iRegIorL2I src1, iRegIorL2I src2,
11356                          immI src3, rFlagsReg cr) %{
11357   match(Set dst (XorI src1 (LShiftI src2 src3)));
11358 
11359   ins_cost(1.9 * INSN_COST);
11360   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11361 
11362   ins_encode %{
11363     __ eorw(as_Register($dst$$reg),
11364               as_Register($src1$$reg),
11365               as_Register($src2$$reg),
11366               Assembler::LSL,
11367               $src3$$constant & 0x1f);
11368   %}
11369 
11370   ins_pipe(ialu_reg_reg_shift);
11371 %}
11372 
11373 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11374                          iRegL src1, iRegL src2,
11375                          immI src3, rFlagsReg cr) %{
11376   match(Set dst (XorL src1 (LShiftL src2 src3)));
11377 
11378   ins_cost(1.9 * INSN_COST);
11379   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11380 
11381   ins_encode %{
11382     __ eor(as_Register($dst$$reg),
11383               as_Register($src1$$reg),
11384               as_Register($src2$$reg),
11385               Assembler::LSL,
11386               $src3$$constant & 0x3f);
11387   %}
11388 
11389   ins_pipe(ialu_reg_reg_shift);
11390 %}
11391 
11392 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11393                          iRegIorL2I src1, iRegIorL2I src2,
11394                          immI src3, rFlagsReg cr) %{
11395   match(Set dst (OrI src1 (URShiftI src2 src3)));
11396 
11397   ins_cost(1.9 * INSN_COST);
11398   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11399 
11400   ins_encode %{
11401     __ orrw(as_Register($dst$$reg),
11402               as_Register($src1$$reg),
11403               as_Register($src2$$reg),
11404               Assembler::LSR,
11405               $src3$$constant & 0x1f);
11406   %}
11407 
11408   ins_pipe(ialu_reg_reg_shift);
11409 %}
11410 
11411 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11412                          iRegL src1, iRegL src2,
11413                          immI src3, rFlagsReg cr) %{
11414   match(Set dst (OrL src1 (URShiftL src2 src3)));
11415 
11416   ins_cost(1.9 * INSN_COST);
11417   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11418 
11419   ins_encode %{
11420     __ orr(as_Register($dst$$reg),
11421               as_Register($src1$$reg),
11422               as_Register($src2$$reg),
11423               Assembler::LSR,
11424               $src3$$constant & 0x3f);
11425   %}
11426 
11427   ins_pipe(ialu_reg_reg_shift);
11428 %}
11429 
11430 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11431                          iRegIorL2I src1, iRegIorL2I src2,
11432                          immI src3, rFlagsReg cr) %{
11433   match(Set dst (OrI src1 (RShiftI src2 src3)));
11434 
11435   ins_cost(1.9 * INSN_COST);
11436   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11437 
11438   ins_encode %{
11439     __ orrw(as_Register($dst$$reg),
11440               as_Register($src1$$reg),
11441               as_Register($src2$$reg),
11442               Assembler::ASR,
11443               $src3$$constant & 0x1f);
11444   %}
11445 
11446   ins_pipe(ialu_reg_reg_shift);
11447 %}
11448 
11449 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11450                          iRegL src1, iRegL src2,
11451                          immI src3, rFlagsReg cr) %{
11452   match(Set dst (OrL src1 (RShiftL src2 src3)));
11453 
11454   ins_cost(1.9 * INSN_COST);
11455   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11456 
11457   ins_encode %{
11458     __ orr(as_Register($dst$$reg),
11459               as_Register($src1$$reg),
11460               as_Register($src2$$reg),
11461               Assembler::ASR,
11462               $src3$$constant & 0x3f);
11463   %}
11464 
11465   ins_pipe(ialu_reg_reg_shift);
11466 %}
11467 
11468 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11469                          iRegIorL2I src1, iRegIorL2I src2,
11470                          immI src3, rFlagsReg cr) %{
11471   match(Set dst (OrI src1 (LShiftI src2 src3)));
11472 
11473   ins_cost(1.9 * INSN_COST);
11474   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11475 
11476   ins_encode %{
11477     __ orrw(as_Register($dst$$reg),
11478               as_Register($src1$$reg),
11479               as_Register($src2$$reg),
11480               Assembler::LSL,
11481               $src3$$constant & 0x1f);
11482   %}
11483 
11484   ins_pipe(ialu_reg_reg_shift);
11485 %}
11486 
11487 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11488                          iRegL src1, iRegL src2,
11489                          immI src3, rFlagsReg cr) %{
11490   match(Set dst (OrL src1 (LShiftL src2 src3)));
11491 
11492   ins_cost(1.9 * INSN_COST);
11493   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11494 
11495   ins_encode %{
11496     __ orr(as_Register($dst$$reg),
11497               as_Register($src1$$reg),
11498               as_Register($src2$$reg),
11499               Assembler::LSL,
11500               $src3$$constant & 0x3f);
11501   %}
11502 
11503   ins_pipe(ialu_reg_reg_shift);
11504 %}
11505 
11506 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11507                          iRegIorL2I src1, iRegIorL2I src2,
11508                          immI src3, rFlagsReg cr) %{
11509   match(Set dst (AddI src1 (URShiftI src2 src3)));
11510 
11511   ins_cost(1.9 * INSN_COST);
11512   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11513 
11514   ins_encode %{
11515     __ addw(as_Register($dst$$reg),
11516               as_Register($src1$$reg),
11517               as_Register($src2$$reg),
11518               Assembler::LSR,
11519               $src3$$constant & 0x1f);
11520   %}
11521 
11522   ins_pipe(ialu_reg_reg_shift);
11523 %}
11524 
11525 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11526                          iRegL src1, iRegL src2,
11527                          immI src3, rFlagsReg cr) %{
11528   match(Set dst (AddL src1 (URShiftL src2 src3)));
11529 
11530   ins_cost(1.9 * INSN_COST);
11531   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11532 
11533   ins_encode %{
11534     __ add(as_Register($dst$$reg),
11535               as_Register($src1$$reg),
11536               as_Register($src2$$reg),
11537               Assembler::LSR,
11538               $src3$$constant & 0x3f);
11539   %}
11540 
11541   ins_pipe(ialu_reg_reg_shift);
11542 %}
11543 
11544 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11545                          iRegIorL2I src1, iRegIorL2I src2,
11546                          immI src3, rFlagsReg cr) %{
11547   match(Set dst (AddI src1 (RShiftI src2 src3)));
11548 
11549   ins_cost(1.9 * INSN_COST);
11550   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11551 
11552   ins_encode %{
11553     __ addw(as_Register($dst$$reg),
11554               as_Register($src1$$reg),
11555               as_Register($src2$$reg),
11556               Assembler::ASR,
11557               $src3$$constant & 0x1f);
11558   %}
11559 
11560   ins_pipe(ialu_reg_reg_shift);
11561 %}
11562 
11563 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11564                          iRegL src1, iRegL src2,
11565                          immI src3, rFlagsReg cr) %{
11566   match(Set dst (AddL src1 (RShiftL src2 src3)));
11567 
11568   ins_cost(1.9 * INSN_COST);
11569   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11570 
11571   ins_encode %{
11572     __ add(as_Register($dst$$reg),
11573               as_Register($src1$$reg),
11574               as_Register($src2$$reg),
11575               Assembler::ASR,
11576               $src3$$constant & 0x3f);
11577   %}
11578 
11579   ins_pipe(ialu_reg_reg_shift);
11580 %}
11581 
11582 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11583                          iRegIorL2I src1, iRegIorL2I src2,
11584                          immI src3, rFlagsReg cr) %{
11585   match(Set dst (AddI src1 (LShiftI src2 src3)));
11586 
11587   ins_cost(1.9 * INSN_COST);
11588   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11589 
11590   ins_encode %{
11591     __ addw(as_Register($dst$$reg),
11592               as_Register($src1$$reg),
11593               as_Register($src2$$reg),
11594               Assembler::LSL,
11595               $src3$$constant & 0x1f);
11596   %}
11597 
11598   ins_pipe(ialu_reg_reg_shift);
11599 %}
11600 
11601 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11602                          iRegL src1, iRegL src2,
11603                          immI src3, rFlagsReg cr) %{
11604   match(Set dst (AddL src1 (LShiftL src2 src3)));
11605 
11606   ins_cost(1.9 * INSN_COST);
11607   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11608 
11609   ins_encode %{
11610     __ add(as_Register($dst$$reg),
11611               as_Register($src1$$reg),
11612               as_Register($src2$$reg),
11613               Assembler::LSL,
11614               $src3$$constant & 0x3f);
11615   %}
11616 
11617   ins_pipe(ialu_reg_reg_shift);
11618 %}
11619 
11620 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11621                          iRegIorL2I src1, iRegIorL2I src2,
11622                          immI src3, rFlagsReg cr) %{
11623   match(Set dst (SubI src1 (URShiftI src2 src3)));
11624 
11625   ins_cost(1.9 * INSN_COST);
11626   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11627 
11628   ins_encode %{
11629     __ subw(as_Register($dst$$reg),
11630               as_Register($src1$$reg),
11631               as_Register($src2$$reg),
11632               Assembler::LSR,
11633               $src3$$constant & 0x1f);
11634   %}
11635 
11636   ins_pipe(ialu_reg_reg_shift);
11637 %}
11638 
11639 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11640                          iRegL src1, iRegL src2,
11641                          immI src3, rFlagsReg cr) %{
11642   match(Set dst (SubL src1 (URShiftL src2 src3)));
11643 
11644   ins_cost(1.9 * INSN_COST);
11645   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11646 
11647   ins_encode %{
11648     __ sub(as_Register($dst$$reg),
11649               as_Register($src1$$reg),
11650               as_Register($src2$$reg),
11651               Assembler::LSR,
11652               $src3$$constant & 0x3f);
11653   %}
11654 
11655   ins_pipe(ialu_reg_reg_shift);
11656 %}
11657 
11658 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11659                          iRegIorL2I src1, iRegIorL2I src2,
11660                          immI src3, rFlagsReg cr) %{
11661   match(Set dst (SubI src1 (RShiftI src2 src3)));
11662 
11663   ins_cost(1.9 * INSN_COST);
11664   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11665 
11666   ins_encode %{
11667     __ subw(as_Register($dst$$reg),
11668               as_Register($src1$$reg),
11669               as_Register($src2$$reg),
11670               Assembler::ASR,
11671               $src3$$constant & 0x1f);
11672   %}
11673 
11674   ins_pipe(ialu_reg_reg_shift);
11675 %}
11676 
11677 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11678                          iRegL src1, iRegL src2,
11679                          immI src3, rFlagsReg cr) %{
11680   match(Set dst (SubL src1 (RShiftL src2 src3)));
11681 
11682   ins_cost(1.9 * INSN_COST);
11683   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11684 
11685   ins_encode %{
11686     __ sub(as_Register($dst$$reg),
11687               as_Register($src1$$reg),
11688               as_Register($src2$$reg),
11689               Assembler::ASR,
11690               $src3$$constant & 0x3f);
11691   %}
11692 
11693   ins_pipe(ialu_reg_reg_shift);
11694 %}
11695 
11696 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11697                          iRegIorL2I src1, iRegIorL2I src2,
11698                          immI src3, rFlagsReg cr) %{
11699   match(Set dst (SubI src1 (LShiftI src2 src3)));
11700 
11701   ins_cost(1.9 * INSN_COST);
11702   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11703 
11704   ins_encode %{
11705     __ subw(as_Register($dst$$reg),
11706               as_Register($src1$$reg),
11707               as_Register($src2$$reg),
11708               Assembler::LSL,
11709               $src3$$constant & 0x1f);
11710   %}
11711 
11712   ins_pipe(ialu_reg_reg_shift);
11713 %}
11714 
11715 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11716                          iRegL src1, iRegL src2,
11717                          immI src3, rFlagsReg cr) %{
11718   match(Set dst (SubL src1 (LShiftL src2 src3)));
11719 
11720   ins_cost(1.9 * INSN_COST);
11721   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11722 
11723   ins_encode %{
11724     __ sub(as_Register($dst$$reg),
11725               as_Register($src1$$reg),
11726               as_Register($src2$$reg),
11727               Assembler::LSL,
11728               $src3$$constant & 0x3f);
11729   %}
11730 
11731   ins_pipe(ialu_reg_reg_shift);
11732 %}
11733 
11734 
11735 
11736 // Shift Left followed by Shift Right.
11737 // This idiom is used by the compiler for the i2b bytecode etc.
11738 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11739 %{
11740   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11741   // Make sure we are not going to exceed what sbfm can do.
11742   predicate((unsigned int)n->in(2)->get_int() <= 63
11743             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11744 
11745   ins_cost(INSN_COST * 2);
11746   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11747   ins_encode %{
11748     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11749     int s = 63 - lshift;
11750     int r = (rshift - lshift) & 63;
11751     __ sbfm(as_Register($dst$$reg),
11752             as_Register($src$$reg),
11753             r, s);
11754   %}
11755 
11756   ins_pipe(ialu_reg_shift);
11757 %}
11758 
11759 // Shift Left followed by Shift Right.
11760 // This idiom is used by the compiler for the i2b bytecode etc.
11761 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11762 %{
11763   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11764   // Make sure we are not going to exceed what sbfmw can do.
11765   predicate((unsigned int)n->in(2)->get_int() <= 31
11766             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11767 
11768   ins_cost(INSN_COST * 2);
11769   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11770   ins_encode %{
11771     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11772     int s = 31 - lshift;
11773     int r = (rshift - lshift) & 31;
11774     __ sbfmw(as_Register($dst$$reg),
11775             as_Register($src$$reg),
11776             r, s);
11777   %}
11778 
11779   ins_pipe(ialu_reg_shift);
11780 %}
11781 
11782 // Shift Left followed by Shift Right.
11783 // This idiom is used by the compiler for the i2b bytecode etc.
11784 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11785 %{
11786   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11787   // Make sure we are not going to exceed what ubfm can do.
11788   predicate((unsigned int)n->in(2)->get_int() <= 63
11789             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11790 
11791   ins_cost(INSN_COST * 2);
11792   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11793   ins_encode %{
11794     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11795     int s = 63 - lshift;
11796     int r = (rshift - lshift) & 63;
11797     __ ubfm(as_Register($dst$$reg),
11798             as_Register($src$$reg),
11799             r, s);
11800   %}
11801 
11802   ins_pipe(ialu_reg_shift);
11803 %}
11804 
11805 // Shift Left followed by Shift Right.
11806 // This idiom is used by the compiler for the i2b bytecode etc.
11807 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11808 %{
11809   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11810   // Make sure we are not going to exceed what ubfmw can do.
11811   predicate((unsigned int)n->in(2)->get_int() <= 31
11812             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11813 
11814   ins_cost(INSN_COST * 2);
11815   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11816   ins_encode %{
11817     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11818     int s = 31 - lshift;
11819     int r = (rshift - lshift) & 31;
11820     __ ubfmw(as_Register($dst$$reg),
11821             as_Register($src$$reg),
11822             r, s);
11823   %}
11824 
11825   ins_pipe(ialu_reg_shift);
11826 %}
11827 // Bitfield extract with shift & mask
11828 
11829 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11830 %{
11831   match(Set dst (AndI (URShiftI src rshift) mask));
11832 
11833   ins_cost(INSN_COST);
11834   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11835   ins_encode %{
11836     int rshift = $rshift$$constant;
11837     long mask = $mask$$constant;
11838     int width = exact_log2(mask+1);
11839     __ ubfxw(as_Register($dst$$reg),
11840             as_Register($src$$reg), rshift, width);
11841   %}
11842   ins_pipe(ialu_reg_shift);
11843 %}
11844 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11845 %{
11846   match(Set dst (AndL (URShiftL src rshift) mask));
11847 
11848   ins_cost(INSN_COST);
11849   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11850   ins_encode %{
11851     int rshift = $rshift$$constant;
11852     long mask = $mask$$constant;
11853     int width = exact_log2(mask+1);
11854     __ ubfx(as_Register($dst$$reg),
11855             as_Register($src$$reg), rshift, width);
11856   %}
11857   ins_pipe(ialu_reg_shift);
11858 %}
11859 
11860 // We can use ubfx when extending an And with a mask when we know mask
11861 // is positive.  We know that because immI_bitmask guarantees it.
11862 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11863 %{
11864   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11865 
11866   ins_cost(INSN_COST * 2);
11867   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11868   ins_encode %{
11869     int rshift = $rshift$$constant;
11870     long mask = $mask$$constant;
11871     int width = exact_log2(mask+1);
11872     __ ubfx(as_Register($dst$$reg),
11873             as_Register($src$$reg), rshift, width);
11874   %}
11875   ins_pipe(ialu_reg_shift);
11876 %}
11877 
11878 // We can use ubfiz when masking by a positive number and then left shifting the result.
11879 // We know that the mask is positive because immI_bitmask guarantees it.
11880 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11881 %{
11882   match(Set dst (LShiftI (AndI src mask) lshift));
11883   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11884     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
11885 
11886   ins_cost(INSN_COST);
11887   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11888   ins_encode %{
11889     int lshift = $lshift$$constant;
11890     long mask = $mask$$constant;
11891     int width = exact_log2(mask+1);
11892     __ ubfizw(as_Register($dst$$reg),
11893           as_Register($src$$reg), lshift, width);
11894   %}
11895   ins_pipe(ialu_reg_shift);
11896 %}
11897 // We can use ubfiz when masking by a positive number and then left shifting the result.
11898 // We know that the mask is positive because immL_bitmask guarantees it.
11899 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11900 %{
11901   match(Set dst (LShiftL (AndL src mask) lshift));
11902   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
11903     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
11904 
11905   ins_cost(INSN_COST);
11906   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11907   ins_encode %{
11908     int lshift = $lshift$$constant;
11909     long mask = $mask$$constant;
11910     int width = exact_log2(mask+1);
11911     __ ubfiz(as_Register($dst$$reg),
11912           as_Register($src$$reg), lshift, width);
11913   %}
11914   ins_pipe(ialu_reg_shift);
11915 %}
11916 
11917 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11918 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11919 %{
11920   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
11921   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11922     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
11923 
11924   ins_cost(INSN_COST);
11925   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11926   ins_encode %{
11927     int lshift = $lshift$$constant;
11928     long mask = $mask$$constant;
11929     int width = exact_log2(mask+1);
11930     __ ubfiz(as_Register($dst$$reg),
11931              as_Register($src$$reg), lshift, width);
11932   %}
11933   ins_pipe(ialu_reg_shift);
11934 %}
11935 
11936 // Rotations
11937 
11938 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11939 %{
11940   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11941   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11942 
11943   ins_cost(INSN_COST);
11944   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11945 
11946   ins_encode %{
11947     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11948             $rshift$$constant & 63);
11949   %}
11950   ins_pipe(ialu_reg_reg_extr);
11951 %}
11952 
11953 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11954 %{
11955   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11956   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11957 
11958   ins_cost(INSN_COST);
11959   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11960 
11961   ins_encode %{
11962     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11963             $rshift$$constant & 31);
11964   %}
11965   ins_pipe(ialu_reg_reg_extr);
11966 %}
11967 
11968 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11969 %{
11970   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11971   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11972 
11973   ins_cost(INSN_COST);
11974   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11975 
11976   ins_encode %{
11977     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11978             $rshift$$constant & 63);
11979   %}
11980   ins_pipe(ialu_reg_reg_extr);
11981 %}
11982 
11983 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11984 %{
11985   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11986   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11987 
11988   ins_cost(INSN_COST);
11989   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11990 
11991   ins_encode %{
11992     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11993             $rshift$$constant & 31);
11994   %}
11995   ins_pipe(ialu_reg_reg_extr);
11996 %}
11997 
11998 
11999 // rol expander
12000 
12001 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12002 %{
12003   effect(DEF dst, USE src, USE shift);
12004 
12005   format %{ "rol    $dst, $src, $shift" %}
12006   ins_cost(INSN_COST * 3);
12007   ins_encode %{
12008     __ subw(rscratch1, zr, as_Register($shift$$reg));
12009     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12010             rscratch1);
12011     %}
12012   ins_pipe(ialu_reg_reg_vshift);
12013 %}
12014 
12015 // rol expander
12016 
12017 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12018 %{
12019   effect(DEF dst, USE src, USE shift);
12020 
12021   format %{ "rol    $dst, $src, $shift" %}
12022   ins_cost(INSN_COST * 3);
12023   ins_encode %{
12024     __ subw(rscratch1, zr, as_Register($shift$$reg));
12025     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12026             rscratch1);
12027     %}
12028   ins_pipe(ialu_reg_reg_vshift);
12029 %}
12030 
12031 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12032 %{
12033   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12034 
12035   expand %{
12036     rolL_rReg(dst, src, shift, cr);
12037   %}
12038 %}
12039 
12040 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12041 %{
12042   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12043 
12044   expand %{
12045     rolL_rReg(dst, src, shift, cr);
12046   %}
12047 %}
12048 
12049 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12050 %{
12051   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12052 
12053   expand %{
12054     rolI_rReg(dst, src, shift, cr);
12055   %}
12056 %}
12057 
12058 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12059 %{
12060   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12061 
12062   expand %{
12063     rolI_rReg(dst, src, shift, cr);
12064   %}
12065 %}
12066 
12067 // ror expander
12068 
12069 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12070 %{
12071   effect(DEF dst, USE src, USE shift);
12072 
12073   format %{ "ror    $dst, $src, $shift" %}
12074   ins_cost(INSN_COST);
12075   ins_encode %{
12076     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12077             as_Register($shift$$reg));
12078     %}
12079   ins_pipe(ialu_reg_reg_vshift);
12080 %}
12081 
12082 // ror expander
12083 
12084 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12085 %{
12086   effect(DEF dst, USE src, USE shift);
12087 
12088   format %{ "ror    $dst, $src, $shift" %}
12089   ins_cost(INSN_COST);
12090   ins_encode %{
12091     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12092             as_Register($shift$$reg));
12093     %}
12094   ins_pipe(ialu_reg_reg_vshift);
12095 %}
12096 
12097 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12098 %{
12099   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12100 
12101   expand %{
12102     rorL_rReg(dst, src, shift, cr);
12103   %}
12104 %}
12105 
12106 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12107 %{
12108   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12109 
12110   expand %{
12111     rorL_rReg(dst, src, shift, cr);
12112   %}
12113 %}
12114 
12115 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12116 %{
12117   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12118 
12119   expand %{
12120     rorI_rReg(dst, src, shift, cr);
12121   %}
12122 %}
12123 
12124 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12125 %{
12126   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12127 
12128   expand %{
12129     rorI_rReg(dst, src, shift, cr);
12130   %}
12131 %}
12132 
12133 // Add/subtract (extended)
12134 
12135 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12136 %{
12137   match(Set dst (AddL src1 (ConvI2L src2)));
12138   ins_cost(INSN_COST);
12139   format %{ "add  $dst, $src1, $src2, sxtw" %}
12140 
12141    ins_encode %{
12142      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12143             as_Register($src2$$reg), ext::sxtw);
12144    %}
12145   ins_pipe(ialu_reg_reg);
12146 %};
12147 
12148 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12149 %{
12150   match(Set dst (SubL src1 (ConvI2L src2)));
12151   ins_cost(INSN_COST);
12152   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12153 
12154    ins_encode %{
12155      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12156             as_Register($src2$$reg), ext::sxtw);
12157    %}
12158   ins_pipe(ialu_reg_reg);
12159 %};
12160 
12161 
12162 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12163 %{
12164   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12165   ins_cost(INSN_COST);
12166   format %{ "add  $dst, $src1, $src2, sxth" %}
12167 
12168    ins_encode %{
12169      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12170             as_Register($src2$$reg), ext::sxth);
12171    %}
12172   ins_pipe(ialu_reg_reg);
12173 %}
12174 
12175 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12176 %{
12177   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12178   ins_cost(INSN_COST);
12179   format %{ "add  $dst, $src1, $src2, sxtb" %}
12180 
12181    ins_encode %{
12182      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12183             as_Register($src2$$reg), ext::sxtb);
12184    %}
12185   ins_pipe(ialu_reg_reg);
12186 %}
12187 
12188 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12189 %{
12190   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12191   ins_cost(INSN_COST);
12192   format %{ "add  $dst, $src1, $src2, uxtb" %}
12193 
12194    ins_encode %{
12195      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12196             as_Register($src2$$reg), ext::uxtb);
12197    %}
12198   ins_pipe(ialu_reg_reg);
12199 %}
12200 
12201 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12202 %{
12203   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12204   ins_cost(INSN_COST);
12205   format %{ "add  $dst, $src1, $src2, sxth" %}
12206 
12207    ins_encode %{
12208      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12209             as_Register($src2$$reg), ext::sxth);
12210    %}
12211   ins_pipe(ialu_reg_reg);
12212 %}
12213 
12214 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12215 %{
12216   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12217   ins_cost(INSN_COST);
12218   format %{ "add  $dst, $src1, $src2, sxtw" %}
12219 
12220    ins_encode %{
12221      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12222             as_Register($src2$$reg), ext::sxtw);
12223    %}
12224   ins_pipe(ialu_reg_reg);
12225 %}
12226 
12227 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12228 %{
12229   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12230   ins_cost(INSN_COST);
12231   format %{ "add  $dst, $src1, $src2, sxtb" %}
12232 
12233    ins_encode %{
12234      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12235             as_Register($src2$$reg), ext::sxtb);
12236    %}
12237   ins_pipe(ialu_reg_reg);
12238 %}
12239 
12240 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12241 %{
12242   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12243   ins_cost(INSN_COST);
12244   format %{ "add  $dst, $src1, $src2, uxtb" %}
12245 
12246    ins_encode %{
12247      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12248             as_Register($src2$$reg), ext::uxtb);
12249    %}
12250   ins_pipe(ialu_reg_reg);
12251 %}
12252 
12253 
12254 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12255 %{
12256   match(Set dst (AddI src1 (AndI src2 mask)));
12257   ins_cost(INSN_COST);
12258   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12259 
12260    ins_encode %{
12261      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12262             as_Register($src2$$reg), ext::uxtb);
12263    %}
12264   ins_pipe(ialu_reg_reg);
12265 %}
12266 
12267 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12268 %{
12269   match(Set dst (AddI src1 (AndI src2 mask)));
12270   ins_cost(INSN_COST);
12271   format %{ "addw  $dst, $src1, $src2, uxth" %}
12272 
12273    ins_encode %{
12274      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12275             as_Register($src2$$reg), ext::uxth);
12276    %}
12277   ins_pipe(ialu_reg_reg);
12278 %}
12279 
12280 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12281 %{
12282   match(Set dst (AddL src1 (AndL src2 mask)));
12283   ins_cost(INSN_COST);
12284   format %{ "add  $dst, $src1, $src2, uxtb" %}
12285 
12286    ins_encode %{
12287      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12288             as_Register($src2$$reg), ext::uxtb);
12289    %}
12290   ins_pipe(ialu_reg_reg);
12291 %}
12292 
12293 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12294 %{
12295   match(Set dst (AddL src1 (AndL src2 mask)));
12296   ins_cost(INSN_COST);
12297   format %{ "add  $dst, $src1, $src2, uxth" %}
12298 
12299    ins_encode %{
12300      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12301             as_Register($src2$$reg), ext::uxth);
12302    %}
12303   ins_pipe(ialu_reg_reg);
12304 %}
12305 
12306 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12307 %{
12308   match(Set dst (AddL src1 (AndL src2 mask)));
12309   ins_cost(INSN_COST);
12310   format %{ "add  $dst, $src1, $src2, uxtw" %}
12311 
12312    ins_encode %{
12313      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12314             as_Register($src2$$reg), ext::uxtw);
12315    %}
12316   ins_pipe(ialu_reg_reg);
12317 %}
12318 
12319 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12320 %{
12321   match(Set dst (SubI src1 (AndI src2 mask)));
12322   ins_cost(INSN_COST);
12323   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12324 
12325    ins_encode %{
12326      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12327             as_Register($src2$$reg), ext::uxtb);
12328    %}
12329   ins_pipe(ialu_reg_reg);
12330 %}
12331 
12332 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12333 %{
12334   match(Set dst (SubI src1 (AndI src2 mask)));
12335   ins_cost(INSN_COST);
12336   format %{ "subw  $dst, $src1, $src2, uxth" %}
12337 
12338    ins_encode %{
12339      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12340             as_Register($src2$$reg), ext::uxth);
12341    %}
12342   ins_pipe(ialu_reg_reg);
12343 %}
12344 
12345 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12346 %{
12347   match(Set dst (SubL src1 (AndL src2 mask)));
12348   ins_cost(INSN_COST);
12349   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12350 
12351    ins_encode %{
12352      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12353             as_Register($src2$$reg), ext::uxtb);
12354    %}
12355   ins_pipe(ialu_reg_reg);
12356 %}
12357 
12358 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12359 %{
12360   match(Set dst (SubL src1 (AndL src2 mask)));
12361   ins_cost(INSN_COST);
12362   format %{ "sub  $dst, $src1, $src2, uxth" %}
12363 
12364    ins_encode %{
12365      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12366             as_Register($src2$$reg), ext::uxth);
12367    %}
12368   ins_pipe(ialu_reg_reg);
12369 %}
12370 
12371 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12372 %{
12373   match(Set dst (SubL src1 (AndL src2 mask)));
12374   ins_cost(INSN_COST);
12375   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12376 
12377    ins_encode %{
12378      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12379             as_Register($src2$$reg), ext::uxtw);
12380    %}
12381   ins_pipe(ialu_reg_reg);
12382 %}
12383 
12384 
12385 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12386 %{
12387   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12388   ins_cost(1.9 * INSN_COST);
12389   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12390 
12391    ins_encode %{
12392      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12393             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12394    %}
12395   ins_pipe(ialu_reg_reg_shift);
12396 %}
12397 
12398 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12399 %{
12400   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12401   ins_cost(1.9 * INSN_COST);
12402   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12403 
12404    ins_encode %{
12405      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12406             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12407    %}
12408   ins_pipe(ialu_reg_reg_shift);
12409 %}
12410 
12411 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12412 %{
12413   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12414   ins_cost(1.9 * INSN_COST);
12415   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12416 
12417    ins_encode %{
12418      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12419             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12420    %}
12421   ins_pipe(ialu_reg_reg_shift);
12422 %}
12423 
12424 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12425 %{
12426   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12427   ins_cost(1.9 * INSN_COST);
12428   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12429 
12430    ins_encode %{
12431      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12432             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12433    %}
12434   ins_pipe(ialu_reg_reg_shift);
12435 %}
12436 
12437 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12438 %{
12439   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12440   ins_cost(1.9 * INSN_COST);
12441   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12442 
12443    ins_encode %{
12444      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12445             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12446    %}
12447   ins_pipe(ialu_reg_reg_shift);
12448 %}
12449 
12450 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12451 %{
12452   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12453   ins_cost(1.9 * INSN_COST);
12454   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12455 
12456    ins_encode %{
12457      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12458             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12459    %}
12460   ins_pipe(ialu_reg_reg_shift);
12461 %}
12462 
12463 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12464 %{
12465   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12466   ins_cost(1.9 * INSN_COST);
12467   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12468 
12469    ins_encode %{
12470      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12471             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12472    %}
12473   ins_pipe(ialu_reg_reg_shift);
12474 %}
12475 
12476 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12477 %{
12478   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12479   ins_cost(1.9 * INSN_COST);
12480   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12481 
12482    ins_encode %{
12483      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12484             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12485    %}
12486   ins_pipe(ialu_reg_reg_shift);
12487 %}
12488 
12489 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12490 %{
12491   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12492   ins_cost(1.9 * INSN_COST);
12493   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12494 
12495    ins_encode %{
12496      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12497             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12498    %}
12499   ins_pipe(ialu_reg_reg_shift);
12500 %}
12501 
12502 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12503 %{
12504   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12505   ins_cost(1.9 * INSN_COST);
12506   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12507 
12508    ins_encode %{
12509      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12510             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12511    %}
12512   ins_pipe(ialu_reg_reg_shift);
12513 %}
12514 
12515 
12516 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12517 %{
12518   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12519   ins_cost(1.9 * INSN_COST);
12520   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12521 
12522    ins_encode %{
12523      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12524             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12525    %}
12526   ins_pipe(ialu_reg_reg_shift);
12527 %};
12528 
12529 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12530 %{
12531   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12532   ins_cost(1.9 * INSN_COST);
12533   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12534 
12535    ins_encode %{
12536      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12537             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12538    %}
12539   ins_pipe(ialu_reg_reg_shift);
12540 %};
12541 
12542 
12543 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12544 %{
12545   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12546   ins_cost(1.9 * INSN_COST);
12547   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12548 
12549    ins_encode %{
12550      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12551             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12552    %}
12553   ins_pipe(ialu_reg_reg_shift);
12554 %}
12555 
12556 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12557 %{
12558   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12559   ins_cost(1.9 * INSN_COST);
12560   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12561 
12562    ins_encode %{
12563      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12564             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12565    %}
12566   ins_pipe(ialu_reg_reg_shift);
12567 %}
12568 
12569 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12570 %{
12571   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12572   ins_cost(1.9 * INSN_COST);
12573   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12574 
12575    ins_encode %{
12576      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12577             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12578    %}
12579   ins_pipe(ialu_reg_reg_shift);
12580 %}
12581 
12582 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12583 %{
12584   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12585   ins_cost(1.9 * INSN_COST);
12586   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12587 
12588    ins_encode %{
12589      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12590             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12591    %}
12592   ins_pipe(ialu_reg_reg_shift);
12593 %}
12594 
12595 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12596 %{
12597   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12598   ins_cost(1.9 * INSN_COST);
12599   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12600 
12601    ins_encode %{
12602      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12603             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12604    %}
12605   ins_pipe(ialu_reg_reg_shift);
12606 %}
12607 
12608 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12609 %{
12610   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12611   ins_cost(1.9 * INSN_COST);
12612   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12613 
12614    ins_encode %{
12615      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12616             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12617    %}
12618   ins_pipe(ialu_reg_reg_shift);
12619 %}
12620 
12621 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12622 %{
12623   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12624   ins_cost(1.9 * INSN_COST);
12625   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12626 
12627    ins_encode %{
12628      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12629             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12630    %}
12631   ins_pipe(ialu_reg_reg_shift);
12632 %}
12633 
12634 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12635 %{
12636   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12637   ins_cost(1.9 * INSN_COST);
12638   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12639 
12640    ins_encode %{
12641      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12642             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12643    %}
12644   ins_pipe(ialu_reg_reg_shift);
12645 %}
12646 
12647 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12648 %{
12649   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12650   ins_cost(1.9 * INSN_COST);
12651   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12652 
12653    ins_encode %{
12654      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12655             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12656    %}
12657   ins_pipe(ialu_reg_reg_shift);
12658 %}
12659 
12660 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12661 %{
12662   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12663   ins_cost(1.9 * INSN_COST);
12664   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12665 
12666    ins_encode %{
12667      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12668             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12669    %}
12670   ins_pipe(ialu_reg_reg_shift);
12671 %}
12672 // END This section of the file is automatically generated. Do not edit --------------
12673 
12674 // ============================================================================
12675 // Floating Point Arithmetic Instructions
12676 
12677 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12678   match(Set dst (AddF src1 src2));
12679 
12680   ins_cost(INSN_COST * 5);
12681   format %{ "fadds   $dst, $src1, $src2" %}
12682 
12683   ins_encode %{
12684     __ fadds(as_FloatRegister($dst$$reg),
12685              as_FloatRegister($src1$$reg),
12686              as_FloatRegister($src2$$reg));
12687   %}
12688 
12689   ins_pipe(fp_dop_reg_reg_s);
12690 %}
12691 
12692 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12693   match(Set dst (AddD src1 src2));
12694 
12695   ins_cost(INSN_COST * 5);
12696   format %{ "faddd   $dst, $src1, $src2" %}
12697 
12698   ins_encode %{
12699     __ faddd(as_FloatRegister($dst$$reg),
12700              as_FloatRegister($src1$$reg),
12701              as_FloatRegister($src2$$reg));
12702   %}
12703 
12704   ins_pipe(fp_dop_reg_reg_d);
12705 %}
12706 
12707 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12708   match(Set dst (SubF src1 src2));
12709 
12710   ins_cost(INSN_COST * 5);
12711   format %{ "fsubs   $dst, $src1, $src2" %}
12712 
12713   ins_encode %{
12714     __ fsubs(as_FloatRegister($dst$$reg),
12715              as_FloatRegister($src1$$reg),
12716              as_FloatRegister($src2$$reg));
12717   %}
12718 
12719   ins_pipe(fp_dop_reg_reg_s);
12720 %}
12721 
12722 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12723   match(Set dst (SubD src1 src2));
12724 
12725   ins_cost(INSN_COST * 5);
12726   format %{ "fsubd   $dst, $src1, $src2" %}
12727 
12728   ins_encode %{
12729     __ fsubd(as_FloatRegister($dst$$reg),
12730              as_FloatRegister($src1$$reg),
12731              as_FloatRegister($src2$$reg));
12732   %}
12733 
12734   ins_pipe(fp_dop_reg_reg_d);
12735 %}
12736 
12737 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12738   match(Set dst (MulF src1 src2));
12739 
12740   ins_cost(INSN_COST * 6);
12741   format %{ "fmuls   $dst, $src1, $src2" %}
12742 
12743   ins_encode %{
12744     __ fmuls(as_FloatRegister($dst$$reg),
12745              as_FloatRegister($src1$$reg),
12746              as_FloatRegister($src2$$reg));
12747   %}
12748 
12749   ins_pipe(fp_dop_reg_reg_s);
12750 %}
12751 
12752 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12753   match(Set dst (MulD src1 src2));
12754 
12755   ins_cost(INSN_COST * 6);
12756   format %{ "fmuld   $dst, $src1, $src2" %}
12757 
12758   ins_encode %{
12759     __ fmuld(as_FloatRegister($dst$$reg),
12760              as_FloatRegister($src1$$reg),
12761              as_FloatRegister($src2$$reg));
12762   %}
12763 
12764   ins_pipe(fp_dop_reg_reg_d);
12765 %}
12766 
12767 // src1 * src2 + src3
12768 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12769   predicate(UseFMA);
12770   match(Set dst (FmaF src3 (Binary src1 src2)));
12771 
12772   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12773 
12774   ins_encode %{
12775     __ fmadds(as_FloatRegister($dst$$reg),
12776              as_FloatRegister($src1$$reg),
12777              as_FloatRegister($src2$$reg),
12778              as_FloatRegister($src3$$reg));
12779   %}
12780 
12781   ins_pipe(pipe_class_default);
12782 %}
12783 
12784 // src1 * src2 + src3
12785 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12786   predicate(UseFMA);
12787   match(Set dst (FmaD src3 (Binary src1 src2)));
12788 
12789   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12790 
12791   ins_encode %{
12792     __ fmaddd(as_FloatRegister($dst$$reg),
12793              as_FloatRegister($src1$$reg),
12794              as_FloatRegister($src2$$reg),
12795              as_FloatRegister($src3$$reg));
12796   %}
12797 
12798   ins_pipe(pipe_class_default);
12799 %}
12800 
12801 // -src1 * src2 + src3
12802 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12803   predicate(UseFMA);
12804   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12805   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12806 
12807   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12808 
12809   ins_encode %{
12810     __ fmsubs(as_FloatRegister($dst$$reg),
12811               as_FloatRegister($src1$$reg),
12812               as_FloatRegister($src2$$reg),
12813               as_FloatRegister($src3$$reg));
12814   %}
12815 
12816   ins_pipe(pipe_class_default);
12817 %}
12818 
12819 // -src1 * src2 + src3
12820 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12821   predicate(UseFMA);
12822   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12823   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12824 
12825   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12826 
12827   ins_encode %{
12828     __ fmsubd(as_FloatRegister($dst$$reg),
12829               as_FloatRegister($src1$$reg),
12830               as_FloatRegister($src2$$reg),
12831               as_FloatRegister($src3$$reg));
12832   %}
12833 
12834   ins_pipe(pipe_class_default);
12835 %}
12836 
12837 // -src1 * src2 - src3
12838 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12839   predicate(UseFMA);
12840   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12841   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12842 
12843   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12844 
12845   ins_encode %{
12846     __ fnmadds(as_FloatRegister($dst$$reg),
12847                as_FloatRegister($src1$$reg),
12848                as_FloatRegister($src2$$reg),
12849                as_FloatRegister($src3$$reg));
12850   %}
12851 
12852   ins_pipe(pipe_class_default);
12853 %}
12854 
12855 // -src1 * src2 - src3
12856 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12857   predicate(UseFMA);
12858   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12859   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12860 
12861   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12862 
12863   ins_encode %{
12864     __ fnmaddd(as_FloatRegister($dst$$reg),
12865                as_FloatRegister($src1$$reg),
12866                as_FloatRegister($src2$$reg),
12867                as_FloatRegister($src3$$reg));
12868   %}
12869 
12870   ins_pipe(pipe_class_default);
12871 %}
12872 
12873 // src1 * src2 - src3
12874 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12875   predicate(UseFMA);
12876   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12877 
12878   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12879 
12880   ins_encode %{
12881     __ fnmsubs(as_FloatRegister($dst$$reg),
12882                as_FloatRegister($src1$$reg),
12883                as_FloatRegister($src2$$reg),
12884                as_FloatRegister($src3$$reg));
12885   %}
12886 
12887   ins_pipe(pipe_class_default);
12888 %}
12889 
12890 // src1 * src2 - src3
12891 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12892   predicate(UseFMA);
12893   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12894 
12895   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12896 
12897   ins_encode %{
12898   // n.b. insn name should be fnmsubd
12899     __ fnmsub(as_FloatRegister($dst$$reg),
12900               as_FloatRegister($src1$$reg),
12901               as_FloatRegister($src2$$reg),
12902               as_FloatRegister($src3$$reg));
12903   %}
12904 
12905   ins_pipe(pipe_class_default);
12906 %}
12907 
12908 
12909 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12910   match(Set dst (DivF src1  src2));
12911 
12912   ins_cost(INSN_COST * 18);
12913   format %{ "fdivs   $dst, $src1, $src2" %}
12914 
12915   ins_encode %{
12916     __ fdivs(as_FloatRegister($dst$$reg),
12917              as_FloatRegister($src1$$reg),
12918              as_FloatRegister($src2$$reg));
12919   %}
12920 
12921   ins_pipe(fp_div_s);
12922 %}
12923 
12924 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12925   match(Set dst (DivD src1  src2));
12926 
12927   ins_cost(INSN_COST * 32);
12928   format %{ "fdivd   $dst, $src1, $src2" %}
12929 
12930   ins_encode %{
12931     __ fdivd(as_FloatRegister($dst$$reg),
12932              as_FloatRegister($src1$$reg),
12933              as_FloatRegister($src2$$reg));
12934   %}
12935 
12936   ins_pipe(fp_div_d);
12937 %}
12938 
12939 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12940   match(Set dst (NegF src));
12941 
12942   ins_cost(INSN_COST * 3);
12943   format %{ "fneg   $dst, $src" %}
12944 
12945   ins_encode %{
12946     __ fnegs(as_FloatRegister($dst$$reg),
12947              as_FloatRegister($src$$reg));
12948   %}
12949 
12950   ins_pipe(fp_uop_s);
12951 %}
12952 
12953 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12954   match(Set dst (NegD src));
12955 
12956   ins_cost(INSN_COST * 3);
12957   format %{ "fnegd   $dst, $src" %}
12958 
12959   ins_encode %{
12960     __ fnegd(as_FloatRegister($dst$$reg),
12961              as_FloatRegister($src$$reg));
12962   %}
12963 
12964   ins_pipe(fp_uop_d);
12965 %}
12966 
12967 instruct absF_reg(vRegF dst, vRegF src) %{
12968   match(Set dst (AbsF src));
12969 
12970   ins_cost(INSN_COST * 3);
12971   format %{ "fabss   $dst, $src" %}
12972   ins_encode %{
12973     __ fabss(as_FloatRegister($dst$$reg),
12974              as_FloatRegister($src$$reg));
12975   %}
12976 
12977   ins_pipe(fp_uop_s);
12978 %}
12979 
12980 instruct absD_reg(vRegD dst, vRegD src) %{
12981   match(Set dst (AbsD src));
12982 
12983   ins_cost(INSN_COST * 3);
12984   format %{ "fabsd   $dst, $src" %}
12985   ins_encode %{
12986     __ fabsd(as_FloatRegister($dst$$reg),
12987              as_FloatRegister($src$$reg));
12988   %}
12989 
12990   ins_pipe(fp_uop_d);
12991 %}
12992 
12993 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12994   match(Set dst (SqrtD src));
12995 
12996   ins_cost(INSN_COST * 50);
12997   format %{ "fsqrtd  $dst, $src" %}
12998   ins_encode %{
12999     __ fsqrtd(as_FloatRegister($dst$$reg),
13000              as_FloatRegister($src$$reg));
13001   %}
13002 
13003   ins_pipe(fp_div_s);
13004 %}
13005 
13006 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13007   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13008 
13009   ins_cost(INSN_COST * 50);
13010   format %{ "fsqrts  $dst, $src" %}
13011   ins_encode %{
13012     __ fsqrts(as_FloatRegister($dst$$reg),
13013              as_FloatRegister($src$$reg));
13014   %}
13015 
13016   ins_pipe(fp_div_d);
13017 %}
13018 
13019 // ============================================================================
13020 // Logical Instructions
13021 
13022 // Integer Logical Instructions
13023 
13024 // And Instructions
13025 
13026 
13027 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13028   match(Set dst (AndI src1 src2));
13029 
13030   format %{ "andw  $dst, $src1, $src2\t# int" %}
13031 
13032   ins_cost(INSN_COST);
13033   ins_encode %{
13034     __ andw(as_Register($dst$$reg),
13035             as_Register($src1$$reg),
13036             as_Register($src2$$reg));
13037   %}
13038 
13039   ins_pipe(ialu_reg_reg);
13040 %}
13041 
13042 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13043   match(Set dst (AndI src1 src2));
13044 
13045   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13046 
13047   ins_cost(INSN_COST);
13048   ins_encode %{
13049     __ andw(as_Register($dst$$reg),
13050             as_Register($src1$$reg),
13051             (unsigned long)($src2$$constant));
13052   %}
13053 
13054   ins_pipe(ialu_reg_imm);
13055 %}
13056 
13057 // Or Instructions
13058 
13059 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13060   match(Set dst (OrI src1 src2));
13061 
13062   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13063 
13064   ins_cost(INSN_COST);
13065   ins_encode %{
13066     __ orrw(as_Register($dst$$reg),
13067             as_Register($src1$$reg),
13068             as_Register($src2$$reg));
13069   %}
13070 
13071   ins_pipe(ialu_reg_reg);
13072 %}
13073 
13074 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13075   match(Set dst (OrI src1 src2));
13076 
13077   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13078 
13079   ins_cost(INSN_COST);
13080   ins_encode %{
13081     __ orrw(as_Register($dst$$reg),
13082             as_Register($src1$$reg),
13083             (unsigned long)($src2$$constant));
13084   %}
13085 
13086   ins_pipe(ialu_reg_imm);
13087 %}
13088 
13089 // Xor Instructions
13090 
13091 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13092   match(Set dst (XorI src1 src2));
13093 
13094   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13095 
13096   ins_cost(INSN_COST);
13097   ins_encode %{
13098     __ eorw(as_Register($dst$$reg),
13099             as_Register($src1$$reg),
13100             as_Register($src2$$reg));
13101   %}
13102 
13103   ins_pipe(ialu_reg_reg);
13104 %}
13105 
13106 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13107   match(Set dst (XorI src1 src2));
13108 
13109   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13110 
13111   ins_cost(INSN_COST);
13112   ins_encode %{
13113     __ eorw(as_Register($dst$$reg),
13114             as_Register($src1$$reg),
13115             (unsigned long)($src2$$constant));
13116   %}
13117 
13118   ins_pipe(ialu_reg_imm);
13119 %}
13120 
13121 // Long Logical Instructions
13122 // TODO
13123 
13124 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13125   match(Set dst (AndL src1 src2));
13126 
13127   format %{ "and  $dst, $src1, $src2\t# int" %}
13128 
13129   ins_cost(INSN_COST);
13130   ins_encode %{
13131     __ andr(as_Register($dst$$reg),
13132             as_Register($src1$$reg),
13133             as_Register($src2$$reg));
13134   %}
13135 
13136   ins_pipe(ialu_reg_reg);
13137 %}
13138 
13139 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13140   match(Set dst (AndL src1 src2));
13141 
13142   format %{ "and  $dst, $src1, $src2\t# int" %}
13143 
13144   ins_cost(INSN_COST);
13145   ins_encode %{
13146     __ andr(as_Register($dst$$reg),
13147             as_Register($src1$$reg),
13148             (unsigned long)($src2$$constant));
13149   %}
13150 
13151   ins_pipe(ialu_reg_imm);
13152 %}
13153 
13154 // Or Instructions
13155 
13156 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13157   match(Set dst (OrL src1 src2));
13158 
13159   format %{ "orr  $dst, $src1, $src2\t# int" %}
13160 
13161   ins_cost(INSN_COST);
13162   ins_encode %{
13163     __ orr(as_Register($dst$$reg),
13164            as_Register($src1$$reg),
13165            as_Register($src2$$reg));
13166   %}
13167 
13168   ins_pipe(ialu_reg_reg);
13169 %}
13170 
13171 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13172   match(Set dst (OrL src1 src2));
13173 
13174   format %{ "orr  $dst, $src1, $src2\t# int" %}
13175 
13176   ins_cost(INSN_COST);
13177   ins_encode %{
13178     __ orr(as_Register($dst$$reg),
13179            as_Register($src1$$reg),
13180            (unsigned long)($src2$$constant));
13181   %}
13182 
13183   ins_pipe(ialu_reg_imm);
13184 %}
13185 
13186 // Xor Instructions
13187 
13188 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13189   match(Set dst (XorL src1 src2));
13190 
13191   format %{ "eor  $dst, $src1, $src2\t# int" %}
13192 
13193   ins_cost(INSN_COST);
13194   ins_encode %{
13195     __ eor(as_Register($dst$$reg),
13196            as_Register($src1$$reg),
13197            as_Register($src2$$reg));
13198   %}
13199 
13200   ins_pipe(ialu_reg_reg);
13201 %}
13202 
13203 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13204   match(Set dst (XorL src1 src2));
13205 
13206   ins_cost(INSN_COST);
13207   format %{ "eor  $dst, $src1, $src2\t# int" %}
13208 
13209   ins_encode %{
13210     __ eor(as_Register($dst$$reg),
13211            as_Register($src1$$reg),
13212            (unsigned long)($src2$$constant));
13213   %}
13214 
13215   ins_pipe(ialu_reg_imm);
13216 %}
13217 
13218 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13219 %{
13220   match(Set dst (ConvI2L src));
13221 
13222   ins_cost(INSN_COST);
13223   format %{ "sxtw  $dst, $src\t# i2l" %}
13224   ins_encode %{
13225     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13226   %}
13227   ins_pipe(ialu_reg_shift);
13228 %}
13229 
13230 // this pattern occurs in bigmath arithmetic
13231 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13232 %{
13233   match(Set dst (AndL (ConvI2L src) mask));
13234 
13235   ins_cost(INSN_COST);
13236   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13237   ins_encode %{
13238     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13239   %}
13240 
13241   ins_pipe(ialu_reg_shift);
13242 %}
13243 
13244 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13245   match(Set dst (ConvL2I src));
13246 
13247   ins_cost(INSN_COST);
13248   format %{ "movw  $dst, $src \t// l2i" %}
13249 
13250   ins_encode %{
13251     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13252   %}
13253 
13254   ins_pipe(ialu_reg);
13255 %}
13256 
13257 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13258 %{
13259   match(Set dst (Conv2B src));
13260   effect(KILL cr);
13261 
13262   format %{
13263     "cmpw $src, zr\n\t"
13264     "cset $dst, ne"
13265   %}
13266 
13267   ins_encode %{
13268     __ cmpw(as_Register($src$$reg), zr);
13269     __ cset(as_Register($dst$$reg), Assembler::NE);
13270   %}
13271 
13272   ins_pipe(ialu_reg);
13273 %}
13274 
13275 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13276 %{
13277   match(Set dst (Conv2B src));
13278   effect(KILL cr);
13279 
13280   format %{
13281     "cmp  $src, zr\n\t"
13282     "cset $dst, ne"
13283   %}
13284 
13285   ins_encode %{
13286     __ cmp(as_Register($src$$reg), zr);
13287     __ cset(as_Register($dst$$reg), Assembler::NE);
13288   %}
13289 
13290   ins_pipe(ialu_reg);
13291 %}
13292 
13293 instruct convD2F_reg(vRegF dst, vRegD src) %{
13294   match(Set dst (ConvD2F src));
13295 
13296   ins_cost(INSN_COST * 5);
13297   format %{ "fcvtd  $dst, $src \t// d2f" %}
13298 
13299   ins_encode %{
13300     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13301   %}
13302 
13303   ins_pipe(fp_d2f);
13304 %}
13305 
13306 instruct convF2D_reg(vRegD dst, vRegF src) %{
13307   match(Set dst (ConvF2D src));
13308 
13309   ins_cost(INSN_COST * 5);
13310   format %{ "fcvts  $dst, $src \t// f2d" %}
13311 
13312   ins_encode %{
13313     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13314   %}
13315 
13316   ins_pipe(fp_f2d);
13317 %}
13318 
13319 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13320   match(Set dst (ConvF2I src));
13321 
13322   ins_cost(INSN_COST * 5);
13323   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13324 
13325   ins_encode %{
13326     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13327   %}
13328 
13329   ins_pipe(fp_f2i);
13330 %}
13331 
13332 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13333   match(Set dst (ConvF2L src));
13334 
13335   ins_cost(INSN_COST * 5);
13336   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13337 
13338   ins_encode %{
13339     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13340   %}
13341 
13342   ins_pipe(fp_f2l);
13343 %}
13344 
13345 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13346   match(Set dst (ConvI2F src));
13347 
13348   ins_cost(INSN_COST * 5);
13349   format %{ "scvtfws  $dst, $src \t// i2f" %}
13350 
13351   ins_encode %{
13352     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13353   %}
13354 
13355   ins_pipe(fp_i2f);
13356 %}
13357 
13358 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13359   match(Set dst (ConvL2F src));
13360 
13361   ins_cost(INSN_COST * 5);
13362   format %{ "scvtfs  $dst, $src \t// l2f" %}
13363 
13364   ins_encode %{
13365     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13366   %}
13367 
13368   ins_pipe(fp_l2f);
13369 %}
13370 
13371 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13372   match(Set dst (ConvD2I src));
13373 
13374   ins_cost(INSN_COST * 5);
13375   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13376 
13377   ins_encode %{
13378     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13379   %}
13380 
13381   ins_pipe(fp_d2i);
13382 %}
13383 
13384 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13385   match(Set dst (ConvD2L src));
13386 
13387   ins_cost(INSN_COST * 5);
13388   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13389 
13390   ins_encode %{
13391     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13392   %}
13393 
13394   ins_pipe(fp_d2l);
13395 %}
13396 
13397 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13398   match(Set dst (ConvI2D src));
13399 
13400   ins_cost(INSN_COST * 5);
13401   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13402 
13403   ins_encode %{
13404     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13405   %}
13406 
13407   ins_pipe(fp_i2d);
13408 %}
13409 
13410 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13411   match(Set dst (ConvL2D src));
13412 
13413   ins_cost(INSN_COST * 5);
13414   format %{ "scvtfd  $dst, $src \t// l2d" %}
13415 
13416   ins_encode %{
13417     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13418   %}
13419 
13420   ins_pipe(fp_l2d);
13421 %}
13422 
13423 // stack <-> reg and reg <-> reg shuffles with no conversion
13424 
13425 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13426 
13427   match(Set dst (MoveF2I src));
13428 
13429   effect(DEF dst, USE src);
13430 
13431   ins_cost(4 * INSN_COST);
13432 
13433   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13434 
13435   ins_encode %{
13436     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13437   %}
13438 
13439   ins_pipe(iload_reg_reg);
13440 
13441 %}
13442 
13443 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13444 
13445   match(Set dst (MoveI2F src));
13446 
13447   effect(DEF dst, USE src);
13448 
13449   ins_cost(4 * INSN_COST);
13450 
13451   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13452 
13453   ins_encode %{
13454     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13455   %}
13456 
13457   ins_pipe(pipe_class_memory);
13458 
13459 %}
13460 
13461 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13462 
13463   match(Set dst (MoveD2L src));
13464 
13465   effect(DEF dst, USE src);
13466 
13467   ins_cost(4 * INSN_COST);
13468 
13469   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13470 
13471   ins_encode %{
13472     __ ldr($dst$$Register, Address(sp, $src$$disp));
13473   %}
13474 
13475   ins_pipe(iload_reg_reg);
13476 
13477 %}
13478 
13479 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13480 
13481   match(Set dst (MoveL2D src));
13482 
13483   effect(DEF dst, USE src);
13484 
13485   ins_cost(4 * INSN_COST);
13486 
13487   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13488 
13489   ins_encode %{
13490     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13491   %}
13492 
13493   ins_pipe(pipe_class_memory);
13494 
13495 %}
13496 
13497 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13498 
13499   match(Set dst (MoveF2I src));
13500 
13501   effect(DEF dst, USE src);
13502 
13503   ins_cost(INSN_COST);
13504 
13505   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13506 
13507   ins_encode %{
13508     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13509   %}
13510 
13511   ins_pipe(pipe_class_memory);
13512 
13513 %}
13514 
13515 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13516 
13517   match(Set dst (MoveI2F src));
13518 
13519   effect(DEF dst, USE src);
13520 
13521   ins_cost(INSN_COST);
13522 
13523   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13524 
13525   ins_encode %{
13526     __ strw($src$$Register, Address(sp, $dst$$disp));
13527   %}
13528 
13529   ins_pipe(istore_reg_reg);
13530 
13531 %}
13532 
13533 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13534 
13535   match(Set dst (MoveD2L src));
13536 
13537   effect(DEF dst, USE src);
13538 
13539   ins_cost(INSN_COST);
13540 
13541   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13542 
13543   ins_encode %{
13544     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13545   %}
13546 
13547   ins_pipe(pipe_class_memory);
13548 
13549 %}
13550 
13551 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13552 
13553   match(Set dst (MoveL2D src));
13554 
13555   effect(DEF dst, USE src);
13556 
13557   ins_cost(INSN_COST);
13558 
13559   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13560 
13561   ins_encode %{
13562     __ str($src$$Register, Address(sp, $dst$$disp));
13563   %}
13564 
13565   ins_pipe(istore_reg_reg);
13566 
13567 %}
13568 
13569 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13570 
13571   match(Set dst (MoveF2I src));
13572 
13573   effect(DEF dst, USE src);
13574 
13575   ins_cost(INSN_COST);
13576 
13577   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13578 
13579   ins_encode %{
13580     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13581   %}
13582 
13583   ins_pipe(fp_f2i);
13584 
13585 %}
13586 
13587 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13588 
13589   match(Set dst (MoveI2F src));
13590 
13591   effect(DEF dst, USE src);
13592 
13593   ins_cost(INSN_COST);
13594 
13595   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13596 
13597   ins_encode %{
13598     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13599   %}
13600 
13601   ins_pipe(fp_i2f);
13602 
13603 %}
13604 
13605 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13606 
13607   match(Set dst (MoveD2L src));
13608 
13609   effect(DEF dst, USE src);
13610 
13611   ins_cost(INSN_COST);
13612 
13613   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13614 
13615   ins_encode %{
13616     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13617   %}
13618 
13619   ins_pipe(fp_d2l);
13620 
13621 %}
13622 
13623 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13624 
13625   match(Set dst (MoveL2D src));
13626 
13627   effect(DEF dst, USE src);
13628 
13629   ins_cost(INSN_COST);
13630 
13631   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13632 
13633   ins_encode %{
13634     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13635   %}
13636 
13637   ins_pipe(fp_l2d);
13638 
13639 %}
13640 
13641 // ============================================================================
13642 // clearing of an array
13643 
13644 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13645 %{
13646   match(Set dummy (ClearArray cnt base));
13647   effect(USE_KILL cnt, USE_KILL base);
13648 
13649   ins_cost(4 * INSN_COST);
13650   format %{ "ClearArray $cnt, $base" %}
13651 
13652   ins_encode %{
13653     __ zero_words($base$$Register, $cnt$$Register);
13654   %}
13655 
13656   ins_pipe(pipe_class_memory);
13657 %}
13658 
13659 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13660 %{
13661   predicate((u_int64_t)n->in(2)->get_long()
13662             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13663   match(Set dummy (ClearArray cnt base));
13664   effect(USE_KILL base);
13665 
13666   ins_cost(4 * INSN_COST);
13667   format %{ "ClearArray $cnt, $base" %}
13668 
13669   ins_encode %{
13670     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13671   %}
13672 
13673   ins_pipe(pipe_class_memory);
13674 %}
13675 
13676 // ============================================================================
13677 // Overflow Math Instructions
13678 
13679 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13680 %{
13681   match(Set cr (OverflowAddI op1 op2));
13682 
13683   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13684   ins_cost(INSN_COST);
13685   ins_encode %{
13686     __ cmnw($op1$$Register, $op2$$Register);
13687   %}
13688 
13689   ins_pipe(icmp_reg_reg);
13690 %}
13691 
13692 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13693 %{
13694   match(Set cr (OverflowAddI op1 op2));
13695 
13696   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13697   ins_cost(INSN_COST);
13698   ins_encode %{
13699     __ cmnw($op1$$Register, $op2$$constant);
13700   %}
13701 
13702   ins_pipe(icmp_reg_imm);
13703 %}
13704 
13705 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13706 %{
13707   match(Set cr (OverflowAddL op1 op2));
13708 
13709   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13710   ins_cost(INSN_COST);
13711   ins_encode %{
13712     __ cmn($op1$$Register, $op2$$Register);
13713   %}
13714 
13715   ins_pipe(icmp_reg_reg);
13716 %}
13717 
13718 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13719 %{
13720   match(Set cr (OverflowAddL op1 op2));
13721 
13722   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13723   ins_cost(INSN_COST);
13724   ins_encode %{
13725     __ cmn($op1$$Register, $op2$$constant);
13726   %}
13727 
13728   ins_pipe(icmp_reg_imm);
13729 %}
13730 
13731 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13732 %{
13733   match(Set cr (OverflowSubI op1 op2));
13734 
13735   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13736   ins_cost(INSN_COST);
13737   ins_encode %{
13738     __ cmpw($op1$$Register, $op2$$Register);
13739   %}
13740 
13741   ins_pipe(icmp_reg_reg);
13742 %}
13743 
13744 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13745 %{
13746   match(Set cr (OverflowSubI op1 op2));
13747 
13748   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13749   ins_cost(INSN_COST);
13750   ins_encode %{
13751     __ cmpw($op1$$Register, $op2$$constant);
13752   %}
13753 
13754   ins_pipe(icmp_reg_imm);
13755 %}
13756 
13757 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13758 %{
13759   match(Set cr (OverflowSubL op1 op2));
13760 
13761   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13762   ins_cost(INSN_COST);
13763   ins_encode %{
13764     __ cmp($op1$$Register, $op2$$Register);
13765   %}
13766 
13767   ins_pipe(icmp_reg_reg);
13768 %}
13769 
13770 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13771 %{
13772   match(Set cr (OverflowSubL op1 op2));
13773 
13774   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13775   ins_cost(INSN_COST);
13776   ins_encode %{
13777     __ subs(zr, $op1$$Register, $op2$$constant);
13778   %}
13779 
13780   ins_pipe(icmp_reg_imm);
13781 %}
13782 
13783 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13784 %{
13785   match(Set cr (OverflowSubI zero op1));
13786 
13787   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13788   ins_cost(INSN_COST);
13789   ins_encode %{
13790     __ cmpw(zr, $op1$$Register);
13791   %}
13792 
13793   ins_pipe(icmp_reg_imm);
13794 %}
13795 
13796 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13797 %{
13798   match(Set cr (OverflowSubL zero op1));
13799 
13800   format %{ "cmp   zr, $op1\t# overflow check long" %}
13801   ins_cost(INSN_COST);
13802   ins_encode %{
13803     __ cmp(zr, $op1$$Register);
13804   %}
13805 
13806   ins_pipe(icmp_reg_imm);
13807 %}
13808 
13809 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13810 %{
13811   match(Set cr (OverflowMulI op1 op2));
13812 
13813   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13814             "cmp   rscratch1, rscratch1, sxtw\n\t"
13815             "movw  rscratch1, #0x80000000\n\t"
13816             "cselw rscratch1, rscratch1, zr, NE\n\t"
13817             "cmpw  rscratch1, #1" %}
13818   ins_cost(5 * INSN_COST);
13819   ins_encode %{
13820     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13821     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13822     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13823     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13824     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13825   %}
13826 
13827   ins_pipe(pipe_slow);
13828 %}
13829 
13830 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13831 %{
13832   match(If cmp (OverflowMulI op1 op2));
13833   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13834             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13835   effect(USE labl, KILL cr);
13836 
13837   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13838             "cmp   rscratch1, rscratch1, sxtw\n\t"
13839             "b$cmp   $labl" %}
13840   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13841   ins_encode %{
13842     Label* L = $labl$$label;
13843     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13844     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13845     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13846     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13847   %}
13848 
13849   ins_pipe(pipe_serial);
13850 %}
13851 
13852 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13853 %{
13854   match(Set cr (OverflowMulL op1 op2));
13855 
13856   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13857             "smulh rscratch2, $op1, $op2\n\t"
13858             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13859             "movw  rscratch1, #0x80000000\n\t"
13860             "cselw rscratch1, rscratch1, zr, NE\n\t"
13861             "cmpw  rscratch1, #1" %}
13862   ins_cost(6 * INSN_COST);
13863   ins_encode %{
13864     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13865     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13866     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13867     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13868     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13869     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13870   %}
13871 
13872   ins_pipe(pipe_slow);
13873 %}
13874 
13875 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13876 %{
13877   match(If cmp (OverflowMulL op1 op2));
13878   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13879             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13880   effect(USE labl, KILL cr);
13881 
13882   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13883             "smulh rscratch2, $op1, $op2\n\t"
13884             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13885             "b$cmp $labl" %}
13886   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13887   ins_encode %{
13888     Label* L = $labl$$label;
13889     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13890     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13891     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13892     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13893     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13894   %}
13895 
13896   ins_pipe(pipe_serial);
13897 %}
13898 
13899 // ============================================================================
13900 // Compare Instructions
13901 
13902 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13903 %{
13904   match(Set cr (CmpI op1 op2));
13905 
13906   effect(DEF cr, USE op1, USE op2);
13907 
13908   ins_cost(INSN_COST);
13909   format %{ "cmpw  $op1, $op2" %}
13910 
13911   ins_encode(aarch64_enc_cmpw(op1, op2));
13912 
13913   ins_pipe(icmp_reg_reg);
13914 %}
13915 
13916 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13917 %{
13918   match(Set cr (CmpI op1 zero));
13919 
13920   effect(DEF cr, USE op1);
13921 
13922   ins_cost(INSN_COST);
13923   format %{ "cmpw $op1, 0" %}
13924 
13925   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13926 
13927   ins_pipe(icmp_reg_imm);
13928 %}
13929 
13930 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13931 %{
13932   match(Set cr (CmpI op1 op2));
13933 
13934   effect(DEF cr, USE op1);
13935 
13936   ins_cost(INSN_COST);
13937   format %{ "cmpw  $op1, $op2" %}
13938 
13939   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13940 
13941   ins_pipe(icmp_reg_imm);
13942 %}
13943 
13944 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13945 %{
13946   match(Set cr (CmpI op1 op2));
13947 
13948   effect(DEF cr, USE op1);
13949 
13950   ins_cost(INSN_COST * 2);
13951   format %{ "cmpw  $op1, $op2" %}
13952 
13953   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13954 
13955   ins_pipe(icmp_reg_imm);
13956 %}
13957 
13958 // Unsigned compare Instructions; really, same as signed compare
13959 // except it should only be used to feed an If or a CMovI which takes a
13960 // cmpOpU.
13961 
13962 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13963 %{
13964   match(Set cr (CmpU op1 op2));
13965 
13966   effect(DEF cr, USE op1, USE op2);
13967 
13968   ins_cost(INSN_COST);
13969   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13970 
13971   ins_encode(aarch64_enc_cmpw(op1, op2));
13972 
13973   ins_pipe(icmp_reg_reg);
13974 %}
13975 
13976 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13977 %{
13978   match(Set cr (CmpU op1 zero));
13979 
13980   effect(DEF cr, USE op1);
13981 
13982   ins_cost(INSN_COST);
13983   format %{ "cmpw $op1, #0\t# unsigned" %}
13984 
13985   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13986 
13987   ins_pipe(icmp_reg_imm);
13988 %}
13989 
13990 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13991 %{
13992   match(Set cr (CmpU op1 op2));
13993 
13994   effect(DEF cr, USE op1);
13995 
13996   ins_cost(INSN_COST);
13997   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13998 
13999   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14000 
14001   ins_pipe(icmp_reg_imm);
14002 %}
14003 
14004 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14005 %{
14006   match(Set cr (CmpU op1 op2));
14007 
14008   effect(DEF cr, USE op1);
14009 
14010   ins_cost(INSN_COST * 2);
14011   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14012 
14013   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14014 
14015   ins_pipe(icmp_reg_imm);
14016 %}
14017 
14018 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14019 %{
14020   match(Set cr (CmpL op1 op2));
14021 
14022   effect(DEF cr, USE op1, USE op2);
14023 
14024   ins_cost(INSN_COST);
14025   format %{ "cmp  $op1, $op2" %}
14026 
14027   ins_encode(aarch64_enc_cmp(op1, op2));
14028 
14029   ins_pipe(icmp_reg_reg);
14030 %}
14031 
14032 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14033 %{
14034   match(Set cr (CmpL op1 zero));
14035 
14036   effect(DEF cr, USE op1);
14037 
14038   ins_cost(INSN_COST);
14039   format %{ "tst  $op1" %}
14040 
14041   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14042 
14043   ins_pipe(icmp_reg_imm);
14044 %}
14045 
14046 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
14047 %{
14048   match(Set cr (CmpL op1 op2));
14049 
14050   effect(DEF cr, USE op1);
14051 
14052   ins_cost(INSN_COST);
14053   format %{ "cmp  $op1, $op2" %}
14054 
14055   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14056 
14057   ins_pipe(icmp_reg_imm);
14058 %}
14059 
14060 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
14061 %{
14062   match(Set cr (CmpL op1 op2));
14063 
14064   effect(DEF cr, USE op1);
14065 
14066   ins_cost(INSN_COST * 2);
14067   format %{ "cmp  $op1, $op2" %}
14068 
14069   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14070 
14071   ins_pipe(icmp_reg_imm);
14072 %}
14073 
14074 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
14075 %{
14076   match(Set cr (CmpUL op1 op2));
14077 
14078   effect(DEF cr, USE op1, USE op2);
14079 
14080   ins_cost(INSN_COST);
14081   format %{ "cmp  $op1, $op2" %}
14082 
14083   ins_encode(aarch64_enc_cmp(op1, op2));
14084 
14085   ins_pipe(icmp_reg_reg);
14086 %}
14087 
14088 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
14089 %{
14090   match(Set cr (CmpUL op1 zero));
14091 
14092   effect(DEF cr, USE op1);
14093 
14094   ins_cost(INSN_COST);
14095   format %{ "tst  $op1" %}
14096 
14097   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
14098 
14099   ins_pipe(icmp_reg_imm);
14100 %}
14101 
14102 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
14103 %{
14104   match(Set cr (CmpUL op1 op2));
14105 
14106   effect(DEF cr, USE op1);
14107 
14108   ins_cost(INSN_COST);
14109   format %{ "cmp  $op1, $op2" %}
14110 
14111   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
14112 
14113   ins_pipe(icmp_reg_imm);
14114 %}
14115 
14116 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
14117 %{
14118   match(Set cr (CmpUL op1 op2));
14119 
14120   effect(DEF cr, USE op1);
14121 
14122   ins_cost(INSN_COST * 2);
14123   format %{ "cmp  $op1, $op2" %}
14124 
14125   ins_encode(aarch64_enc_cmp_imm(op1, op2));
14126 
14127   ins_pipe(icmp_reg_imm);
14128 %}
14129 
14130 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
14131 %{
14132   match(Set cr (CmpP op1 op2));
14133 
14134   effect(DEF cr, USE op1, USE op2);
14135 
14136   ins_cost(INSN_COST);
14137   format %{ "cmp  $op1, $op2\t // ptr" %}
14138 
14139   ins_encode(aarch64_enc_cmpp(op1, op2));
14140 
14141   ins_pipe(icmp_reg_reg);
14142 %}
14143 
14144 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
14145 %{
14146   match(Set cr (CmpN op1 op2));
14147 
14148   effect(DEF cr, USE op1, USE op2);
14149 
14150   ins_cost(INSN_COST);
14151   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
14152 
14153   ins_encode(aarch64_enc_cmpn(op1, op2));
14154 
14155   ins_pipe(icmp_reg_reg);
14156 %}
14157 
14158 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
14159 %{
14160   match(Set cr (CmpP op1 zero));
14161 
14162   effect(DEF cr, USE op1, USE zero);
14163 
14164   ins_cost(INSN_COST);
14165   format %{ "cmp  $op1, 0\t // ptr" %}
14166 
14167   ins_encode(aarch64_enc_testp(op1));
14168 
14169   ins_pipe(icmp_reg_imm);
14170 %}
14171 
14172 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
14173 %{
14174   match(Set cr (CmpN op1 zero));
14175 
14176   effect(DEF cr, USE op1, USE zero);
14177 
14178   ins_cost(INSN_COST);
14179   format %{ "cmp  $op1, 0\t // compressed ptr" %}
14180 
14181   ins_encode(aarch64_enc_testn(op1));
14182 
14183   ins_pipe(icmp_reg_imm);
14184 %}
14185 
14186 // FP comparisons
14187 //
14188 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
14189 // using normal cmpOp. See declaration of rFlagsReg for details.
14190 
14191 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
14192 %{
14193   match(Set cr (CmpF src1 src2));
14194 
14195   ins_cost(3 * INSN_COST);
14196   format %{ "fcmps $src1, $src2" %}
14197 
14198   ins_encode %{
14199     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14200   %}
14201 
14202   ins_pipe(pipe_class_compare);
14203 %}
14204 
14205 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
14206 %{
14207   match(Set cr (CmpF src1 src2));
14208 
14209   ins_cost(3 * INSN_COST);
14210   format %{ "fcmps $src1, 0.0" %}
14211 
14212   ins_encode %{
14213     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
14214   %}
14215 
14216   ins_pipe(pipe_class_compare);
14217 %}
14218 // FROM HERE
14219 
14220 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
14221 %{
14222   match(Set cr (CmpD src1 src2));
14223 
14224   ins_cost(3 * INSN_COST);
14225   format %{ "fcmpd $src1, $src2" %}
14226 
14227   ins_encode %{
14228     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14229   %}
14230 
14231   ins_pipe(pipe_class_compare);
14232 %}
14233 
14234 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14235 %{
14236   match(Set cr (CmpD src1 src2));
14237 
14238   ins_cost(3 * INSN_COST);
14239   format %{ "fcmpd $src1, 0.0" %}
14240 
14241   ins_encode %{
14242     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
14243   %}
14244 
14245   ins_pipe(pipe_class_compare);
14246 %}
14247 
14248 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14249 %{
14250   match(Set dst (CmpF3 src1 src2));
14251   effect(KILL cr);
14252 
14253   ins_cost(5 * INSN_COST);
14254   format %{ "fcmps $src1, $src2\n\t"
14255             "csinvw($dst, zr, zr, eq\n\t"
14256             "csnegw($dst, $dst, $dst, lt)"
14257   %}
14258 
14259   ins_encode %{
14260     Label done;
14261     FloatRegister s1 = as_FloatRegister($src1$$reg);
14262     FloatRegister s2 = as_FloatRegister($src2$$reg);
14263     Register d = as_Register($dst$$reg);
14264     __ fcmps(s1, s2);
14265     // installs 0 if EQ else -1
14266     __ csinvw(d, zr, zr, Assembler::EQ);
14267     // keeps -1 if less or unordered else installs 1
14268     __ csnegw(d, d, d, Assembler::LT);
14269     __ bind(done);
14270   %}
14271 
14272   ins_pipe(pipe_class_default);
14273 
14274 %}
14275 
14276 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14277 %{
14278   match(Set dst (CmpD3 src1 src2));
14279   effect(KILL cr);
14280 
14281   ins_cost(5 * INSN_COST);
14282   format %{ "fcmpd $src1, $src2\n\t"
14283             "csinvw($dst, zr, zr, eq\n\t"
14284             "csnegw($dst, $dst, $dst, lt)"
14285   %}
14286 
14287   ins_encode %{
14288     Label done;
14289     FloatRegister s1 = as_FloatRegister($src1$$reg);
14290     FloatRegister s2 = as_FloatRegister($src2$$reg);
14291     Register d = as_Register($dst$$reg);
14292     __ fcmpd(s1, s2);
14293     // installs 0 if EQ else -1
14294     __ csinvw(d, zr, zr, Assembler::EQ);
14295     // keeps -1 if less or unordered else installs 1
14296     __ csnegw(d, d, d, Assembler::LT);
14297     __ bind(done);
14298   %}
14299   ins_pipe(pipe_class_default);
14300 
14301 %}
14302 
14303 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14304 %{
14305   match(Set dst (CmpF3 src1 zero));
14306   effect(KILL cr);
14307 
14308   ins_cost(5 * INSN_COST);
14309   format %{ "fcmps $src1, 0.0\n\t"
14310             "csinvw($dst, zr, zr, eq\n\t"
14311             "csnegw($dst, $dst, $dst, lt)"
14312   %}
14313 
14314   ins_encode %{
14315     Label done;
14316     FloatRegister s1 = as_FloatRegister($src1$$reg);
14317     Register d = as_Register($dst$$reg);
14318     __ fcmps(s1, 0.0D);
14319     // installs 0 if EQ else -1
14320     __ csinvw(d, zr, zr, Assembler::EQ);
14321     // keeps -1 if less or unordered else installs 1
14322     __ csnegw(d, d, d, Assembler::LT);
14323     __ bind(done);
14324   %}
14325 
14326   ins_pipe(pipe_class_default);
14327 
14328 %}
14329 
14330 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14331 %{
14332   match(Set dst (CmpD3 src1 zero));
14333   effect(KILL cr);
14334 
14335   ins_cost(5 * INSN_COST);
14336   format %{ "fcmpd $src1, 0.0\n\t"
14337             "csinvw($dst, zr, zr, eq\n\t"
14338             "csnegw($dst, $dst, $dst, lt)"
14339   %}
14340 
14341   ins_encode %{
14342     Label done;
14343     FloatRegister s1 = as_FloatRegister($src1$$reg);
14344     Register d = as_Register($dst$$reg);
14345     __ fcmpd(s1, 0.0D);
14346     // installs 0 if EQ else -1
14347     __ csinvw(d, zr, zr, Assembler::EQ);
14348     // keeps -1 if less or unordered else installs 1
14349     __ csnegw(d, d, d, Assembler::LT);
14350     __ bind(done);
14351   %}
14352   ins_pipe(pipe_class_default);
14353 
14354 %}
14355 
14356 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14357 %{
14358   match(Set dst (CmpLTMask p q));
14359   effect(KILL cr);
14360 
14361   ins_cost(3 * INSN_COST);
14362 
14363   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14364             "csetw $dst, lt\n\t"
14365             "subw $dst, zr, $dst"
14366   %}
14367 
14368   ins_encode %{
14369     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14370     __ csetw(as_Register($dst$$reg), Assembler::LT);
14371     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14372   %}
14373 
14374   ins_pipe(ialu_reg_reg);
14375 %}
14376 
14377 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14378 %{
14379   match(Set dst (CmpLTMask src zero));
14380   effect(KILL cr);
14381 
14382   ins_cost(INSN_COST);
14383 
14384   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14385 
14386   ins_encode %{
14387     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14388   %}
14389 
14390   ins_pipe(ialu_reg_shift);
14391 %}
14392 
14393 // ============================================================================
14394 // Max and Min
14395 
14396 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14397 %{
14398   match(Set dst (MinI src1 src2));
14399 
14400   effect(DEF dst, USE src1, USE src2, KILL cr);
14401   size(8);
14402 
14403   ins_cost(INSN_COST * 3);
14404   format %{
14405     "cmpw $src1 $src2\t signed int\n\t"
14406     "cselw $dst, $src1, $src2 lt\t"
14407   %}
14408 
14409   ins_encode %{
14410     __ cmpw(as_Register($src1$$reg),
14411             as_Register($src2$$reg));
14412     __ cselw(as_Register($dst$$reg),
14413              as_Register($src1$$reg),
14414              as_Register($src2$$reg),
14415              Assembler::LT);
14416   %}
14417 
14418   ins_pipe(ialu_reg_reg);
14419 %}
14420 // FROM HERE
14421 
14422 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14423 %{
14424   match(Set dst (MaxI src1 src2));
14425 
14426   effect(DEF dst, USE src1, USE src2, KILL cr);
14427   size(8);
14428 
14429   ins_cost(INSN_COST * 3);
14430   format %{
14431     "cmpw $src1 $src2\t signed int\n\t"
14432     "cselw $dst, $src1, $src2 gt\t"
14433   %}
14434 
14435   ins_encode %{
14436     __ cmpw(as_Register($src1$$reg),
14437             as_Register($src2$$reg));
14438     __ cselw(as_Register($dst$$reg),
14439              as_Register($src1$$reg),
14440              as_Register($src2$$reg),
14441              Assembler::GT);
14442   %}
14443 
14444   ins_pipe(ialu_reg_reg);
14445 %}
14446 
14447 // ============================================================================
14448 // Branch Instructions
14449 
14450 // Direct Branch.
14451 instruct branch(label lbl)
14452 %{
14453   match(Goto);
14454 
14455   effect(USE lbl);
14456 
14457   ins_cost(BRANCH_COST);
14458   format %{ "b  $lbl" %}
14459 
14460   ins_encode(aarch64_enc_b(lbl));
14461 
14462   ins_pipe(pipe_branch);
14463 %}
14464 
14465 // Conditional Near Branch
14466 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14467 %{
14468   // Same match rule as `branchConFar'.
14469   match(If cmp cr);
14470 
14471   effect(USE lbl);
14472 
14473   ins_cost(BRANCH_COST);
14474   // If set to 1 this indicates that the current instruction is a
14475   // short variant of a long branch. This avoids using this
14476   // instruction in first-pass matching. It will then only be used in
14477   // the `Shorten_branches' pass.
14478   // ins_short_branch(1);
14479   format %{ "b$cmp  $lbl" %}
14480 
14481   ins_encode(aarch64_enc_br_con(cmp, lbl));
14482 
14483   ins_pipe(pipe_branch_cond);
14484 %}
14485 
14486 // Conditional Near Branch Unsigned
14487 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14488 %{
14489   // Same match rule as `branchConFar'.
14490   match(If cmp cr);
14491 
14492   effect(USE lbl);
14493 
14494   ins_cost(BRANCH_COST);
14495   // If set to 1 this indicates that the current instruction is a
14496   // short variant of a long branch. This avoids using this
14497   // instruction in first-pass matching. It will then only be used in
14498   // the `Shorten_branches' pass.
14499   // ins_short_branch(1);
14500   format %{ "b$cmp  $lbl\t# unsigned" %}
14501 
14502   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14503 
14504   ins_pipe(pipe_branch_cond);
14505 %}
14506 
14507 // Make use of CBZ and CBNZ.  These instructions, as well as being
14508 // shorter than (cmp; branch), have the additional benefit of not
14509 // killing the flags.
14510 
14511 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14512   match(If cmp (CmpI op1 op2));
14513   effect(USE labl);
14514 
14515   ins_cost(BRANCH_COST);
14516   format %{ "cbw$cmp   $op1, $labl" %}
14517   ins_encode %{
14518     Label* L = $labl$$label;
14519     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14520     if (cond == Assembler::EQ)
14521       __ cbzw($op1$$Register, *L);
14522     else
14523       __ cbnzw($op1$$Register, *L);
14524   %}
14525   ins_pipe(pipe_cmp_branch);
14526 %}
14527 
14528 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14529   match(If cmp (CmpL op1 op2));
14530   effect(USE labl);
14531 
14532   ins_cost(BRANCH_COST);
14533   format %{ "cb$cmp   $op1, $labl" %}
14534   ins_encode %{
14535     Label* L = $labl$$label;
14536     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14537     if (cond == Assembler::EQ)
14538       __ cbz($op1$$Register, *L);
14539     else
14540       __ cbnz($op1$$Register, *L);
14541   %}
14542   ins_pipe(pipe_cmp_branch);
14543 %}
14544 
14545 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14546   match(If cmp (CmpP op1 op2));
14547   effect(USE labl);
14548 
14549   ins_cost(BRANCH_COST);
14550   format %{ "cb$cmp   $op1, $labl" %}
14551   ins_encode %{
14552     Label* L = $labl$$label;
14553     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14554     if (cond == Assembler::EQ)
14555       __ cbz($op1$$Register, *L);
14556     else
14557       __ cbnz($op1$$Register, *L);
14558   %}
14559   ins_pipe(pipe_cmp_branch);
14560 %}
14561 
14562 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14563   match(If cmp (CmpN op1 op2));
14564   effect(USE labl);
14565 
14566   ins_cost(BRANCH_COST);
14567   format %{ "cbw$cmp   $op1, $labl" %}
14568   ins_encode %{
14569     Label* L = $labl$$label;
14570     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14571     if (cond == Assembler::EQ)
14572       __ cbzw($op1$$Register, *L);
14573     else
14574       __ cbnzw($op1$$Register, *L);
14575   %}
14576   ins_pipe(pipe_cmp_branch);
14577 %}
14578 
14579 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14580   match(If cmp (CmpP (DecodeN oop) zero));
14581   effect(USE labl);
14582 
14583   ins_cost(BRANCH_COST);
14584   format %{ "cb$cmp   $oop, $labl" %}
14585   ins_encode %{
14586     Label* L = $labl$$label;
14587     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14588     if (cond == Assembler::EQ)
14589       __ cbzw($oop$$Register, *L);
14590     else
14591       __ cbnzw($oop$$Register, *L);
14592   %}
14593   ins_pipe(pipe_cmp_branch);
14594 %}
14595 
14596 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14597   match(If cmp (CmpU op1 op2));
14598   effect(USE labl);
14599 
14600   ins_cost(BRANCH_COST);
14601   format %{ "cbw$cmp   $op1, $labl" %}
14602   ins_encode %{
14603     Label* L = $labl$$label;
14604     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14605     if (cond == Assembler::EQ || cond == Assembler::LS)
14606       __ cbzw($op1$$Register, *L);
14607     else
14608       __ cbnzw($op1$$Register, *L);
14609   %}
14610   ins_pipe(pipe_cmp_branch);
14611 %}
14612 
14613 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14614   match(If cmp (CmpUL op1 op2));
14615   effect(USE labl);
14616 
14617   ins_cost(BRANCH_COST);
14618   format %{ "cb$cmp   $op1, $labl" %}
14619   ins_encode %{
14620     Label* L = $labl$$label;
14621     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14622     if (cond == Assembler::EQ || cond == Assembler::LS)
14623       __ cbz($op1$$Register, *L);
14624     else
14625       __ cbnz($op1$$Register, *L);
14626   %}
14627   ins_pipe(pipe_cmp_branch);
14628 %}
14629 
14630 // Test bit and Branch
14631 
14632 // Patterns for short (< 32KiB) variants
14633 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14634   match(If cmp (CmpL op1 op2));
14635   effect(USE labl);
14636 
14637   ins_cost(BRANCH_COST);
14638   format %{ "cb$cmp   $op1, $labl # long" %}
14639   ins_encode %{
14640     Label* L = $labl$$label;
14641     Assembler::Condition cond =
14642       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14643     __ tbr(cond, $op1$$Register, 63, *L);
14644   %}
14645   ins_pipe(pipe_cmp_branch);
14646   ins_short_branch(1);
14647 %}
14648 
14649 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14650   match(If cmp (CmpI op1 op2));
14651   effect(USE labl);
14652 
14653   ins_cost(BRANCH_COST);
14654   format %{ "cb$cmp   $op1, $labl # int" %}
14655   ins_encode %{
14656     Label* L = $labl$$label;
14657     Assembler::Condition cond =
14658       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14659     __ tbr(cond, $op1$$Register, 31, *L);
14660   %}
14661   ins_pipe(pipe_cmp_branch);
14662   ins_short_branch(1);
14663 %}
14664 
14665 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14666   match(If cmp (CmpL (AndL op1 op2) op3));
14667   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14668   effect(USE labl);
14669 
14670   ins_cost(BRANCH_COST);
14671   format %{ "tb$cmp   $op1, $op2, $labl" %}
14672   ins_encode %{
14673     Label* L = $labl$$label;
14674     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14675     int bit = exact_log2($op2$$constant);
14676     __ tbr(cond, $op1$$Register, bit, *L);
14677   %}
14678   ins_pipe(pipe_cmp_branch);
14679   ins_short_branch(1);
14680 %}
14681 
14682 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14683   match(If cmp (CmpI (AndI op1 op2) op3));
14684   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14685   effect(USE labl);
14686 
14687   ins_cost(BRANCH_COST);
14688   format %{ "tb$cmp   $op1, $op2, $labl" %}
14689   ins_encode %{
14690     Label* L = $labl$$label;
14691     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14692     int bit = exact_log2($op2$$constant);
14693     __ tbr(cond, $op1$$Register, bit, *L);
14694   %}
14695   ins_pipe(pipe_cmp_branch);
14696   ins_short_branch(1);
14697 %}
14698 
14699 // And far variants
14700 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14701   match(If cmp (CmpL op1 op2));
14702   effect(USE labl);
14703 
14704   ins_cost(BRANCH_COST);
14705   format %{ "cb$cmp   $op1, $labl # long" %}
14706   ins_encode %{
14707     Label* L = $labl$$label;
14708     Assembler::Condition cond =
14709       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14710     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14711   %}
14712   ins_pipe(pipe_cmp_branch);
14713 %}
14714 
14715 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14716   match(If cmp (CmpI op1 op2));
14717   effect(USE labl);
14718 
14719   ins_cost(BRANCH_COST);
14720   format %{ "cb$cmp   $op1, $labl # int" %}
14721   ins_encode %{
14722     Label* L = $labl$$label;
14723     Assembler::Condition cond =
14724       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14725     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14726   %}
14727   ins_pipe(pipe_cmp_branch);
14728 %}
14729 
14730 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14731   match(If cmp (CmpL (AndL op1 op2) op3));
14732   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14733   effect(USE labl);
14734 
14735   ins_cost(BRANCH_COST);
14736   format %{ "tb$cmp   $op1, $op2, $labl" %}
14737   ins_encode %{
14738     Label* L = $labl$$label;
14739     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14740     int bit = exact_log2($op2$$constant);
14741     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14742   %}
14743   ins_pipe(pipe_cmp_branch);
14744 %}
14745 
14746 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14747   match(If cmp (CmpI (AndI op1 op2) op3));
14748   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14749   effect(USE labl);
14750 
14751   ins_cost(BRANCH_COST);
14752   format %{ "tb$cmp   $op1, $op2, $labl" %}
14753   ins_encode %{
14754     Label* L = $labl$$label;
14755     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14756     int bit = exact_log2($op2$$constant);
14757     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14758   %}
14759   ins_pipe(pipe_cmp_branch);
14760 %}
14761 
14762 // Test bits
14763 
14764 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14765   match(Set cr (CmpL (AndL op1 op2) op3));
14766   predicate(Assembler::operand_valid_for_logical_immediate
14767             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14768 
14769   ins_cost(INSN_COST);
14770   format %{ "tst $op1, $op2 # long" %}
14771   ins_encode %{
14772     __ tst($op1$$Register, $op2$$constant);
14773   %}
14774   ins_pipe(ialu_reg_reg);
14775 %}
14776 
14777 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14778   match(Set cr (CmpI (AndI op1 op2) op3));
14779   predicate(Assembler::operand_valid_for_logical_immediate
14780             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14781 
14782   ins_cost(INSN_COST);
14783   format %{ "tst $op1, $op2 # int" %}
14784   ins_encode %{
14785     __ tstw($op1$$Register, $op2$$constant);
14786   %}
14787   ins_pipe(ialu_reg_reg);
14788 %}
14789 
14790 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14791   match(Set cr (CmpL (AndL op1 op2) op3));
14792 
14793   ins_cost(INSN_COST);
14794   format %{ "tst $op1, $op2 # long" %}
14795   ins_encode %{
14796     __ tst($op1$$Register, $op2$$Register);
14797   %}
14798   ins_pipe(ialu_reg_reg);
14799 %}
14800 
14801 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14802   match(Set cr (CmpI (AndI op1 op2) op3));
14803 
14804   ins_cost(INSN_COST);
14805   format %{ "tstw $op1, $op2 # int" %}
14806   ins_encode %{
14807     __ tstw($op1$$Register, $op2$$Register);
14808   %}
14809   ins_pipe(ialu_reg_reg);
14810 %}
14811 
14812 
14813 // Conditional Far Branch
14814 // Conditional Far Branch Unsigned
14815 // TODO: fixme
14816 
14817 // counted loop end branch near
14818 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14819 %{
14820   match(CountedLoopEnd cmp cr);
14821 
14822   effect(USE lbl);
14823 
14824   ins_cost(BRANCH_COST);
14825   // short variant.
14826   // ins_short_branch(1);
14827   format %{ "b$cmp $lbl \t// counted loop end" %}
14828 
14829   ins_encode(aarch64_enc_br_con(cmp, lbl));
14830 
14831   ins_pipe(pipe_branch);
14832 %}
14833 
14834 // counted loop end branch near Unsigned
14835 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14836 %{
14837   match(CountedLoopEnd cmp cr);
14838 
14839   effect(USE lbl);
14840 
14841   ins_cost(BRANCH_COST);
14842   // short variant.
14843   // ins_short_branch(1);
14844   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14845 
14846   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14847 
14848   ins_pipe(pipe_branch);
14849 %}
14850 
14851 // counted loop end branch far
14852 // counted loop end branch far unsigned
14853 // TODO: fixme
14854 
14855 // ============================================================================
14856 // inlined locking and unlocking
14857 
14858 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14859 %{
14860   match(Set cr (FastLock object box));
14861   effect(TEMP tmp, TEMP tmp2);
14862 
14863   // TODO
14864   // identify correct cost
14865   ins_cost(5 * INSN_COST);
14866   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14867 
14868   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14869 
14870   ins_pipe(pipe_serial);
14871 %}
14872 
14873 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14874 %{
14875   match(Set cr (FastUnlock object box));
14876   effect(TEMP tmp, TEMP tmp2);
14877 
14878   ins_cost(5 * INSN_COST);
14879   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14880 
14881   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14882 
14883   ins_pipe(pipe_serial);
14884 %}
14885 
14886 
14887 // ============================================================================
14888 // Safepoint Instructions
14889 
14890 // TODO
14891 // provide a near and far version of this code
14892 
14893 instruct safePoint(iRegP poll)
14894 %{
14895   match(SafePoint poll);
14896 
14897   format %{
14898     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14899   %}
14900   ins_encode %{
14901     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14902   %}
14903   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14904 %}
14905 
14906 
14907 // ============================================================================
14908 // Procedure Call/Return Instructions
14909 
14910 // Call Java Static Instruction
14911 
14912 instruct CallStaticJavaDirect(method meth)
14913 %{
14914   match(CallStaticJava);
14915 
14916   effect(USE meth);
14917 
14918   ins_cost(CALL_COST);
14919 
14920   format %{ "call,static $meth \t// ==> " %}
14921 
14922   ins_encode( aarch64_enc_java_static_call(meth),
14923               aarch64_enc_call_epilog );
14924 
14925   ins_pipe(pipe_class_call);
14926 %}
14927 
14928 // TO HERE
14929 
14930 // Call Java Dynamic Instruction
14931 instruct CallDynamicJavaDirect(method meth)
14932 %{
14933   match(CallDynamicJava);
14934 
14935   effect(USE meth);
14936 
14937   ins_cost(CALL_COST);
14938 
14939   format %{ "CALL,dynamic $meth \t// ==> " %}
14940 
14941   ins_encode( aarch64_enc_java_dynamic_call(meth),
14942                aarch64_enc_call_epilog );
14943 
14944   ins_pipe(pipe_class_call);
14945 %}
14946 
14947 // Call Runtime Instruction
14948 
14949 instruct CallRuntimeDirect(method meth)
14950 %{
14951   match(CallRuntime);
14952 
14953   effect(USE meth);
14954 
14955   ins_cost(CALL_COST);
14956 
14957   format %{ "CALL, runtime $meth" %}
14958 
14959   ins_encode( aarch64_enc_java_to_runtime(meth) );
14960 
14961   ins_pipe(pipe_class_call);
14962 %}
14963 
14964 // Call Runtime Instruction
14965 
14966 instruct CallLeafDirect(method meth)
14967 %{
14968   match(CallLeaf);
14969 
14970   effect(USE meth);
14971 
14972   ins_cost(CALL_COST);
14973 
14974   format %{ "CALL, runtime leaf $meth" %}
14975 
14976   ins_encode( aarch64_enc_java_to_runtime(meth) );
14977 
14978   ins_pipe(pipe_class_call);
14979 %}
14980 
14981 // Call Runtime Instruction
14982 
14983 instruct CallLeafNoFPDirect(method meth)
14984 %{
14985   match(CallLeafNoFP);
14986 
14987   effect(USE meth);
14988 
14989   ins_cost(CALL_COST);
14990 
14991   format %{ "CALL, runtime leaf nofp $meth" %}
14992 
14993   ins_encode( aarch64_enc_java_to_runtime(meth) );
14994 
14995   ins_pipe(pipe_class_call);
14996 %}
14997 
14998 // Tail Call; Jump from runtime stub to Java code.
14999 // Also known as an 'interprocedural jump'.
15000 // Target of jump will eventually return to caller.
15001 // TailJump below removes the return address.
15002 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15003 %{
15004   match(TailCall jump_target method_oop);
15005 
15006   ins_cost(CALL_COST);
15007 
15008   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15009 
15010   ins_encode(aarch64_enc_tail_call(jump_target));
15011 
15012   ins_pipe(pipe_class_call);
15013 %}
15014 
15015 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15016 %{
15017   match(TailJump jump_target ex_oop);
15018 
15019   ins_cost(CALL_COST);
15020 
15021   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15022 
15023   ins_encode(aarch64_enc_tail_jmp(jump_target));
15024 
15025   ins_pipe(pipe_class_call);
15026 %}
15027 
15028 // Create exception oop: created by stack-crawling runtime code.
15029 // Created exception is now available to this handler, and is setup
15030 // just prior to jumping to this handler. No code emitted.
15031 // TODO check
15032 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15033 instruct CreateException(iRegP_R0 ex_oop)
15034 %{
15035   match(Set ex_oop (CreateEx));
15036 
15037   format %{ " -- \t// exception oop; no code emitted" %}
15038 
15039   size(0);
15040 
15041   ins_encode( /*empty*/ );
15042 
15043   ins_pipe(pipe_class_empty);
15044 %}
15045 
15046 // Rethrow exception: The exception oop will come in the first
15047 // argument position. Then JUMP (not call) to the rethrow stub code.
15048 instruct RethrowException() %{
15049   match(Rethrow);
15050   ins_cost(CALL_COST);
15051 
15052   format %{ "b rethrow_stub" %}
15053 
15054   ins_encode( aarch64_enc_rethrow() );
15055 
15056   ins_pipe(pipe_class_call);
15057 %}
15058 
15059 
15060 // Return Instruction
15061 // epilog node loads ret address into lr as part of frame pop
15062 instruct Ret()
15063 %{
15064   match(Return);
15065 
15066   format %{ "ret\t// return register" %}
15067 
15068   ins_encode( aarch64_enc_ret() );
15069 
15070   ins_pipe(pipe_branch);
15071 %}
15072 
15073 // Die now.
15074 instruct ShouldNotReachHere() %{
15075   match(Halt);
15076 
15077   ins_cost(CALL_COST);
15078   format %{ "ShouldNotReachHere" %}
15079 
15080   ins_encode %{
15081     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
15082     // return true
15083     __ dpcs1(0xdead + 1);
15084   %}
15085 
15086   ins_pipe(pipe_class_default);
15087 %}
15088 
15089 // ============================================================================
15090 // Partial Subtype Check
15091 //
15092 // superklass array for an instance of the superklass.  Set a hidden
15093 // internal cache on a hit (cache is checked with exposed code in
15094 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
15095 // encoding ALSO sets flags.
15096 
15097 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
15098 %{
15099   match(Set result (PartialSubtypeCheck sub super));
15100   effect(KILL cr, KILL temp);
15101 
15102   ins_cost(1100);  // slightly larger than the next version
15103   format %{ "partialSubtypeCheck $result, $sub, $super" %}
15104 
15105   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15106 
15107   opcode(0x1); // Force zero of result reg on hit
15108 
15109   ins_pipe(pipe_class_memory);
15110 %}
15111 
15112 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
15113 %{
15114   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
15115   effect(KILL temp, KILL result);
15116 
15117   ins_cost(1100);  // slightly larger than the next version
15118   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
15119 
15120   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
15121 
15122   opcode(0x0); // Don't zero result reg on hit
15123 
15124   ins_pipe(pipe_class_memory);
15125 %}
15126 
15127 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15128                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15129 %{
15130   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
15131   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15132   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15133 
15134   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15135   ins_encode %{
15136     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15137     __ string_compare($str1$$Register, $str2$$Register,
15138                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15139                       $tmp1$$Register, $tmp2$$Register,
15140                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
15141   %}
15142   ins_pipe(pipe_class_memory);
15143 %}
15144 
15145 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15146                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
15147 %{
15148   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
15149   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15150   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15151 
15152   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
15153   ins_encode %{
15154     __ string_compare($str1$$Register, $str2$$Register,
15155                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15156                       $tmp1$$Register, $tmp2$$Register,
15157                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
15158   %}
15159   ins_pipe(pipe_class_memory);
15160 %}
15161 
15162 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15163                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15164                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15165 %{
15166   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
15167   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15168   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15169          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15170 
15171   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15172   ins_encode %{
15173     __ string_compare($str1$$Register, $str2$$Register,
15174                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15175                       $tmp1$$Register, $tmp2$$Register,
15176                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15177                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
15178   %}
15179   ins_pipe(pipe_class_memory);
15180 %}
15181 
15182 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
15183                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
15184                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
15185 %{
15186   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
15187   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
15188   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
15189          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
15190 
15191   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
15192   ins_encode %{
15193     __ string_compare($str1$$Register, $str2$$Register,
15194                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
15195                       $tmp1$$Register, $tmp2$$Register,
15196                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
15197                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
15198   %}
15199   ins_pipe(pipe_class_memory);
15200 %}
15201 
15202 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15203        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15204        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15205 %{
15206   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15207   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15208   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15209          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15210   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
15211 
15212   ins_encode %{
15213     __ string_indexof($str1$$Register, $str2$$Register,
15214                       $cnt1$$Register, $cnt2$$Register,
15215                       $tmp1$$Register, $tmp2$$Register,
15216                       $tmp3$$Register, $tmp4$$Register,
15217                       $tmp5$$Register, $tmp6$$Register,
15218                       -1, $result$$Register, StrIntrinsicNode::UU);
15219   %}
15220   ins_pipe(pipe_class_memory);
15221 %}
15222 
15223 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15224        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15225        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15226 %{
15227   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15228   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15229   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15230          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15231   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
15232 
15233   ins_encode %{
15234     __ string_indexof($str1$$Register, $str2$$Register,
15235                       $cnt1$$Register, $cnt2$$Register,
15236                       $tmp1$$Register, $tmp2$$Register,
15237                       $tmp3$$Register, $tmp4$$Register,
15238                       $tmp5$$Register, $tmp6$$Register,
15239                       -1, $result$$Register, StrIntrinsicNode::LL);
15240   %}
15241   ins_pipe(pipe_class_memory);
15242 %}
15243 
15244 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15245        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15246        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15247 %{
15248   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15249   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15250   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15251          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15252   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15253 
15254   ins_encode %{
15255     __ string_indexof($str1$$Register, $str2$$Register,
15256                       $cnt1$$Register, $cnt2$$Register,
15257                       $tmp1$$Register, $tmp2$$Register,
15258                       $tmp3$$Register, $tmp4$$Register,
15259                       $tmp5$$Register, $tmp6$$Register,
15260                       -1, $result$$Register, StrIntrinsicNode::UL);
15261   %}
15262   ins_pipe(pipe_class_memory);
15263 %}
15264 
15265 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15266                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15267                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15268 %{
15269   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15270   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15271   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15272          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15273   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15274 
15275   ins_encode %{
15276     int icnt2 = (int)$int_cnt2$$constant;
15277     __ string_indexof($str1$$Register, $str2$$Register,
15278                       $cnt1$$Register, zr,
15279                       $tmp1$$Register, $tmp2$$Register,
15280                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15281                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15282   %}
15283   ins_pipe(pipe_class_memory);
15284 %}
15285 
15286 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15287                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15288                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15289 %{
15290   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15291   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15292   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15293          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15294   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15295 
15296   ins_encode %{
15297     int icnt2 = (int)$int_cnt2$$constant;
15298     __ string_indexof($str1$$Register, $str2$$Register,
15299                       $cnt1$$Register, zr,
15300                       $tmp1$$Register, $tmp2$$Register,
15301                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15302                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15303   %}
15304   ins_pipe(pipe_class_memory);
15305 %}
15306 
15307 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15308                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15309                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15310 %{
15311   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15312   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15313   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15314          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15315   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15316 
15317   ins_encode %{
15318     int icnt2 = (int)$int_cnt2$$constant;
15319     __ string_indexof($str1$$Register, $str2$$Register,
15320                       $cnt1$$Register, zr,
15321                       $tmp1$$Register, $tmp2$$Register,
15322                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15323                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15324   %}
15325   ins_pipe(pipe_class_memory);
15326 %}
15327 
15328 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15329                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15330                               iRegINoSp tmp3, rFlagsReg cr)
15331 %{
15332   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15333   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15334          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15335 
15336   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15337 
15338   ins_encode %{
15339     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15340                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15341                            $tmp3$$Register);
15342   %}
15343   ins_pipe(pipe_class_memory);
15344 %}
15345 
15346 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15347                         iRegI_R0 result, rFlagsReg cr)
15348 %{
15349   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15350   match(Set result (StrEquals (Binary str1 str2) cnt));
15351   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15352 
15353   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15354   ins_encode %{
15355     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15356     __ string_equals($str1$$Register, $str2$$Register,
15357                      $result$$Register, $cnt$$Register, 1);
15358   %}
15359   ins_pipe(pipe_class_memory);
15360 %}
15361 
15362 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15363                         iRegI_R0 result, rFlagsReg cr)
15364 %{
15365   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15366   match(Set result (StrEquals (Binary str1 str2) cnt));
15367   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15368 
15369   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15370   ins_encode %{
15371     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15372     __ string_equals($str1$$Register, $str2$$Register,
15373                      $result$$Register, $cnt$$Register, 2);
15374   %}
15375   ins_pipe(pipe_class_memory);
15376 %}
15377 
15378 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15379                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15380                        iRegP_R10 tmp, rFlagsReg cr)
15381 %{
15382   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15383   match(Set result (AryEq ary1 ary2));
15384   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15385 
15386   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15387   ins_encode %{
15388     __ arrays_equals($ary1$$Register, $ary2$$Register,
15389                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15390                      $result$$Register, $tmp$$Register, 1);
15391     %}
15392   ins_pipe(pipe_class_memory);
15393 %}
15394 
15395 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15396                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15397                        iRegP_R10 tmp, rFlagsReg cr)
15398 %{
15399   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15400   match(Set result (AryEq ary1 ary2));
15401   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15402 
15403   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15404   ins_encode %{
15405     __ arrays_equals($ary1$$Register, $ary2$$Register,
15406                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15407                      $result$$Register, $tmp$$Register, 2);
15408   %}
15409   ins_pipe(pipe_class_memory);
15410 %}
15411 
15412 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15413 %{
15414   match(Set result (HasNegatives ary1 len));
15415   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15416   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15417   ins_encode %{
15418     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15419   %}
15420   ins_pipe( pipe_slow );
15421 %}
15422 
15423 // fast char[] to byte[] compression
15424 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15425                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15426                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15427                          iRegI_R0 result, rFlagsReg cr)
15428 %{
15429   match(Set result (StrCompressedCopy src (Binary dst len)));
15430   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15431 
15432   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15433   ins_encode %{
15434     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15435                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15436                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15437                            $result$$Register);
15438   %}
15439   ins_pipe( pipe_slow );
15440 %}
15441 
15442 // fast byte[] to char[] inflation
15443 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15444                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15445 %{
15446   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15447   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15448 
15449   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15450   ins_encode %{
15451     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15452                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15453   %}
15454   ins_pipe(pipe_class_memory);
15455 %}
15456 
15457 // encode char[] to byte[] in ISO_8859_1
15458 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15459                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15460                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15461                           iRegI_R0 result, rFlagsReg cr)
15462 %{
15463   match(Set result (EncodeISOArray src (Binary dst len)));
15464   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15465          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15466 
15467   format %{ "Encode array $src,$dst,$len -> $result" %}
15468   ins_encode %{
15469     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15470          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15471          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15472   %}
15473   ins_pipe( pipe_class_memory );
15474 %}
15475 
15476 // ============================================================================
15477 // This name is KNOWN by the ADLC and cannot be changed.
15478 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15479 // for this guy.
15480 instruct tlsLoadP(thread_RegP dst)
15481 %{
15482   match(Set dst (ThreadLocal));
15483 
15484   ins_cost(0);
15485 
15486   format %{ " -- \t// $dst=Thread::current(), empty" %}
15487 
15488   size(0);
15489 
15490   ins_encode( /*empty*/ );
15491 
15492   ins_pipe(pipe_class_empty);
15493 %}
15494 
15495 // ====================VECTOR INSTRUCTIONS=====================================
15496 
15497 // Load vector (32 bits)
15498 instruct loadV4(vecD dst, vmem4 mem)
15499 %{
15500   predicate(n->as_LoadVector()->memory_size() == 4);
15501   match(Set dst (LoadVector mem));
15502   ins_cost(4 * INSN_COST);
15503   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15504   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15505   ins_pipe(vload_reg_mem64);
15506 %}
15507 
15508 // Load vector (64 bits)
15509 instruct loadV8(vecD dst, vmem8 mem)
15510 %{
15511   predicate(n->as_LoadVector()->memory_size() == 8);
15512   match(Set dst (LoadVector mem));
15513   ins_cost(4 * INSN_COST);
15514   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15515   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15516   ins_pipe(vload_reg_mem64);
15517 %}
15518 
15519 // Load Vector (128 bits)
15520 instruct loadV16(vecX dst, vmem16 mem)
15521 %{
15522   predicate(n->as_LoadVector()->memory_size() == 16);
15523   match(Set dst (LoadVector mem));
15524   ins_cost(4 * INSN_COST);
15525   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15526   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15527   ins_pipe(vload_reg_mem128);
15528 %}
15529 
15530 // Store Vector (32 bits)
15531 instruct storeV4(vecD src, vmem4 mem)
15532 %{
15533   predicate(n->as_StoreVector()->memory_size() == 4);
15534   match(Set mem (StoreVector mem src));
15535   ins_cost(4 * INSN_COST);
15536   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15537   ins_encode( aarch64_enc_strvS(src, mem) );
15538   ins_pipe(vstore_reg_mem64);
15539 %}
15540 
15541 // Store Vector (64 bits)
15542 instruct storeV8(vecD src, vmem8 mem)
15543 %{
15544   predicate(n->as_StoreVector()->memory_size() == 8);
15545   match(Set mem (StoreVector mem src));
15546   ins_cost(4 * INSN_COST);
15547   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15548   ins_encode( aarch64_enc_strvD(src, mem) );
15549   ins_pipe(vstore_reg_mem64);
15550 %}
15551 
15552 // Store Vector (128 bits)
15553 instruct storeV16(vecX src, vmem16 mem)
15554 %{
15555   predicate(n->as_StoreVector()->memory_size() == 16);
15556   match(Set mem (StoreVector mem src));
15557   ins_cost(4 * INSN_COST);
15558   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15559   ins_encode( aarch64_enc_strvQ(src, mem) );
15560   ins_pipe(vstore_reg_mem128);
15561 %}
15562 
15563 instruct replicate8B(vecD dst, iRegIorL2I src)
15564 %{
15565   predicate(n->as_Vector()->length() == 4 ||
15566             n->as_Vector()->length() == 8);
15567   match(Set dst (ReplicateB src));
15568   ins_cost(INSN_COST);
15569   format %{ "dup  $dst, $src\t# vector (8B)" %}
15570   ins_encode %{
15571     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15572   %}
15573   ins_pipe(vdup_reg_reg64);
15574 %}
15575 
15576 instruct replicate16B(vecX dst, iRegIorL2I src)
15577 %{
15578   predicate(n->as_Vector()->length() == 16);
15579   match(Set dst (ReplicateB src));
15580   ins_cost(INSN_COST);
15581   format %{ "dup  $dst, $src\t# vector (16B)" %}
15582   ins_encode %{
15583     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15584   %}
15585   ins_pipe(vdup_reg_reg128);
15586 %}
15587 
15588 instruct replicate8B_imm(vecD dst, immI con)
15589 %{
15590   predicate(n->as_Vector()->length() == 4 ||
15591             n->as_Vector()->length() == 8);
15592   match(Set dst (ReplicateB con));
15593   ins_cost(INSN_COST);
15594   format %{ "movi  $dst, $con\t# vector(8B)" %}
15595   ins_encode %{
15596     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15597   %}
15598   ins_pipe(vmovi_reg_imm64);
15599 %}
15600 
15601 instruct replicate16B_imm(vecX dst, immI con)
15602 %{
15603   predicate(n->as_Vector()->length() == 16);
15604   match(Set dst (ReplicateB con));
15605   ins_cost(INSN_COST);
15606   format %{ "movi  $dst, $con\t# vector(16B)" %}
15607   ins_encode %{
15608     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15609   %}
15610   ins_pipe(vmovi_reg_imm128);
15611 %}
15612 
15613 instruct replicate4S(vecD dst, iRegIorL2I src)
15614 %{
15615   predicate(n->as_Vector()->length() == 2 ||
15616             n->as_Vector()->length() == 4);
15617   match(Set dst (ReplicateS src));
15618   ins_cost(INSN_COST);
15619   format %{ "dup  $dst, $src\t# vector (4S)" %}
15620   ins_encode %{
15621     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15622   %}
15623   ins_pipe(vdup_reg_reg64);
15624 %}
15625 
15626 instruct replicate8S(vecX dst, iRegIorL2I src)
15627 %{
15628   predicate(n->as_Vector()->length() == 8);
15629   match(Set dst (ReplicateS src));
15630   ins_cost(INSN_COST);
15631   format %{ "dup  $dst, $src\t# vector (8S)" %}
15632   ins_encode %{
15633     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15634   %}
15635   ins_pipe(vdup_reg_reg128);
15636 %}
15637 
15638 instruct replicate4S_imm(vecD dst, immI con)
15639 %{
15640   predicate(n->as_Vector()->length() == 2 ||
15641             n->as_Vector()->length() == 4);
15642   match(Set dst (ReplicateS con));
15643   ins_cost(INSN_COST);
15644   format %{ "movi  $dst, $con\t# vector(4H)" %}
15645   ins_encode %{
15646     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15647   %}
15648   ins_pipe(vmovi_reg_imm64);
15649 %}
15650 
15651 instruct replicate8S_imm(vecX dst, immI con)
15652 %{
15653   predicate(n->as_Vector()->length() == 8);
15654   match(Set dst (ReplicateS con));
15655   ins_cost(INSN_COST);
15656   format %{ "movi  $dst, $con\t# vector(8H)" %}
15657   ins_encode %{
15658     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15659   %}
15660   ins_pipe(vmovi_reg_imm128);
15661 %}
15662 
15663 instruct replicate2I(vecD dst, iRegIorL2I src)
15664 %{
15665   predicate(n->as_Vector()->length() == 2);
15666   match(Set dst (ReplicateI src));
15667   ins_cost(INSN_COST);
15668   format %{ "dup  $dst, $src\t# vector (2I)" %}
15669   ins_encode %{
15670     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15671   %}
15672   ins_pipe(vdup_reg_reg64);
15673 %}
15674 
15675 instruct replicate4I(vecX dst, iRegIorL2I src)
15676 %{
15677   predicate(n->as_Vector()->length() == 4);
15678   match(Set dst (ReplicateI src));
15679   ins_cost(INSN_COST);
15680   format %{ "dup  $dst, $src\t# vector (4I)" %}
15681   ins_encode %{
15682     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15683   %}
15684   ins_pipe(vdup_reg_reg128);
15685 %}
15686 
15687 instruct replicate2I_imm(vecD dst, immI con)
15688 %{
15689   predicate(n->as_Vector()->length() == 2);
15690   match(Set dst (ReplicateI con));
15691   ins_cost(INSN_COST);
15692   format %{ "movi  $dst, $con\t# vector(2I)" %}
15693   ins_encode %{
15694     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15695   %}
15696   ins_pipe(vmovi_reg_imm64);
15697 %}
15698 
15699 instruct replicate4I_imm(vecX dst, immI con)
15700 %{
15701   predicate(n->as_Vector()->length() == 4);
15702   match(Set dst (ReplicateI con));
15703   ins_cost(INSN_COST);
15704   format %{ "movi  $dst, $con\t# vector(4I)" %}
15705   ins_encode %{
15706     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15707   %}
15708   ins_pipe(vmovi_reg_imm128);
15709 %}
15710 
15711 instruct replicate2L(vecX dst, iRegL src)
15712 %{
15713   predicate(n->as_Vector()->length() == 2);
15714   match(Set dst (ReplicateL src));
15715   ins_cost(INSN_COST);
15716   format %{ "dup  $dst, $src\t# vector (2L)" %}
15717   ins_encode %{
15718     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15719   %}
15720   ins_pipe(vdup_reg_reg128);
15721 %}
15722 
15723 instruct replicate2L_zero(vecX dst, immI0 zero)
15724 %{
15725   predicate(n->as_Vector()->length() == 2);
15726   match(Set dst (ReplicateI zero));
15727   ins_cost(INSN_COST);
15728   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15729   ins_encode %{
15730     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15731            as_FloatRegister($dst$$reg),
15732            as_FloatRegister($dst$$reg));
15733   %}
15734   ins_pipe(vmovi_reg_imm128);
15735 %}
15736 
15737 instruct replicate2F(vecD dst, vRegF src)
15738 %{
15739   predicate(n->as_Vector()->length() == 2);
15740   match(Set dst (ReplicateF src));
15741   ins_cost(INSN_COST);
15742   format %{ "dup  $dst, $src\t# vector (2F)" %}
15743   ins_encode %{
15744     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15745            as_FloatRegister($src$$reg));
15746   %}
15747   ins_pipe(vdup_reg_freg64);
15748 %}
15749 
15750 instruct replicate4F(vecX dst, vRegF src)
15751 %{
15752   predicate(n->as_Vector()->length() == 4);
15753   match(Set dst (ReplicateF src));
15754   ins_cost(INSN_COST);
15755   format %{ "dup  $dst, $src\t# vector (4F)" %}
15756   ins_encode %{
15757     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15758            as_FloatRegister($src$$reg));
15759   %}
15760   ins_pipe(vdup_reg_freg128);
15761 %}
15762 
15763 instruct replicate2D(vecX dst, vRegD src)
15764 %{
15765   predicate(n->as_Vector()->length() == 2);
15766   match(Set dst (ReplicateD src));
15767   ins_cost(INSN_COST);
15768   format %{ "dup  $dst, $src\t# vector (2D)" %}
15769   ins_encode %{
15770     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15771            as_FloatRegister($src$$reg));
15772   %}
15773   ins_pipe(vdup_reg_dreg128);
15774 %}
15775 
15776 // ====================REDUCTION ARITHMETIC====================================
15777 
15778 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15779 %{
15780   match(Set dst (AddReductionVI src1 src2));
15781   ins_cost(INSN_COST);
15782   effect(TEMP tmp, TEMP tmp2);
15783   format %{ "umov  $tmp, $src2, S, 0\n\t"
15784             "umov  $tmp2, $src2, S, 1\n\t"
15785             "addw  $dst, $src1, $tmp\n\t"
15786             "addw  $dst, $dst, $tmp2\t add reduction2i"
15787   %}
15788   ins_encode %{
15789     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15790     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15791     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15792     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15793   %}
15794   ins_pipe(pipe_class_default);
15795 %}
15796 
15797 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15798 %{
15799   match(Set dst (AddReductionVI src1 src2));
15800   ins_cost(INSN_COST);
15801   effect(TEMP tmp, TEMP tmp2);
15802   format %{ "addv  $tmp, T4S, $src2\n\t"
15803             "umov  $tmp2, $tmp, S, 0\n\t"
15804             "addw  $dst, $tmp2, $src1\t add reduction4i"
15805   %}
15806   ins_encode %{
15807     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15808             as_FloatRegister($src2$$reg));
15809     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15810     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15811   %}
15812   ins_pipe(pipe_class_default);
15813 %}
15814 
15815 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15816 %{
15817   match(Set dst (MulReductionVI src1 src2));
15818   ins_cost(INSN_COST);
15819   effect(TEMP tmp, TEMP dst);
15820   format %{ "umov  $tmp, $src2, S, 0\n\t"
15821             "mul   $dst, $tmp, $src1\n\t"
15822             "umov  $tmp, $src2, S, 1\n\t"
15823             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15824   %}
15825   ins_encode %{
15826     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15827     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15828     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15829     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15830   %}
15831   ins_pipe(pipe_class_default);
15832 %}
15833 
15834 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15835 %{
15836   match(Set dst (MulReductionVI src1 src2));
15837   ins_cost(INSN_COST);
15838   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15839   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15840             "mul   $tmp, $tmp, $src2\n\t"
15841             "umov  $tmp2, $tmp, S, 0\n\t"
15842             "mul   $dst, $tmp2, $src1\n\t"
15843             "umov  $tmp2, $tmp, S, 1\n\t"
15844             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15845   %}
15846   ins_encode %{
15847     __ ins(as_FloatRegister($tmp$$reg), __ D,
15848            as_FloatRegister($src2$$reg), 0, 1);
15849     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15850            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15851     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15852     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15853     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15854     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15855   %}
15856   ins_pipe(pipe_class_default);
15857 %}
15858 
15859 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15860 %{
15861   match(Set dst (AddReductionVF src1 src2));
15862   ins_cost(INSN_COST);
15863   effect(TEMP tmp, TEMP dst);
15864   format %{ "fadds $dst, $src1, $src2\n\t"
15865             "ins   $tmp, S, $src2, 0, 1\n\t"
15866             "fadds $dst, $dst, $tmp\t add reduction2f"
15867   %}
15868   ins_encode %{
15869     __ fadds(as_FloatRegister($dst$$reg),
15870              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15871     __ ins(as_FloatRegister($tmp$$reg), __ S,
15872            as_FloatRegister($src2$$reg), 0, 1);
15873     __ fadds(as_FloatRegister($dst$$reg),
15874              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15875   %}
15876   ins_pipe(pipe_class_default);
15877 %}
15878 
15879 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15880 %{
15881   match(Set dst (AddReductionVF src1 src2));
15882   ins_cost(INSN_COST);
15883   effect(TEMP tmp, TEMP dst);
15884   format %{ "fadds $dst, $src1, $src2\n\t"
15885             "ins   $tmp, S, $src2, 0, 1\n\t"
15886             "fadds $dst, $dst, $tmp\n\t"
15887             "ins   $tmp, S, $src2, 0, 2\n\t"
15888             "fadds $dst, $dst, $tmp\n\t"
15889             "ins   $tmp, S, $src2, 0, 3\n\t"
15890             "fadds $dst, $dst, $tmp\t add reduction4f"
15891   %}
15892   ins_encode %{
15893     __ fadds(as_FloatRegister($dst$$reg),
15894              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15895     __ ins(as_FloatRegister($tmp$$reg), __ S,
15896            as_FloatRegister($src2$$reg), 0, 1);
15897     __ fadds(as_FloatRegister($dst$$reg),
15898              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15899     __ ins(as_FloatRegister($tmp$$reg), __ S,
15900            as_FloatRegister($src2$$reg), 0, 2);
15901     __ fadds(as_FloatRegister($dst$$reg),
15902              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15903     __ ins(as_FloatRegister($tmp$$reg), __ S,
15904            as_FloatRegister($src2$$reg), 0, 3);
15905     __ fadds(as_FloatRegister($dst$$reg),
15906              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15907   %}
15908   ins_pipe(pipe_class_default);
15909 %}
15910 
15911 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15912 %{
15913   match(Set dst (MulReductionVF src1 src2));
15914   ins_cost(INSN_COST);
15915   effect(TEMP tmp, TEMP dst);
15916   format %{ "fmuls $dst, $src1, $src2\n\t"
15917             "ins   $tmp, S, $src2, 0, 1\n\t"
15918             "fmuls $dst, $dst, $tmp\t add reduction4f"
15919   %}
15920   ins_encode %{
15921     __ fmuls(as_FloatRegister($dst$$reg),
15922              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15923     __ ins(as_FloatRegister($tmp$$reg), __ S,
15924            as_FloatRegister($src2$$reg), 0, 1);
15925     __ fmuls(as_FloatRegister($dst$$reg),
15926              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15927   %}
15928   ins_pipe(pipe_class_default);
15929 %}
15930 
15931 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15932 %{
15933   match(Set dst (MulReductionVF src1 src2));
15934   ins_cost(INSN_COST);
15935   effect(TEMP tmp, TEMP dst);
15936   format %{ "fmuls $dst, $src1, $src2\n\t"
15937             "ins   $tmp, S, $src2, 0, 1\n\t"
15938             "fmuls $dst, $dst, $tmp\n\t"
15939             "ins   $tmp, S, $src2, 0, 2\n\t"
15940             "fmuls $dst, $dst, $tmp\n\t"
15941             "ins   $tmp, S, $src2, 0, 3\n\t"
15942             "fmuls $dst, $dst, $tmp\t add reduction4f"
15943   %}
15944   ins_encode %{
15945     __ fmuls(as_FloatRegister($dst$$reg),
15946              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15947     __ ins(as_FloatRegister($tmp$$reg), __ S,
15948            as_FloatRegister($src2$$reg), 0, 1);
15949     __ fmuls(as_FloatRegister($dst$$reg),
15950              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15951     __ ins(as_FloatRegister($tmp$$reg), __ S,
15952            as_FloatRegister($src2$$reg), 0, 2);
15953     __ fmuls(as_FloatRegister($dst$$reg),
15954              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15955     __ ins(as_FloatRegister($tmp$$reg), __ S,
15956            as_FloatRegister($src2$$reg), 0, 3);
15957     __ fmuls(as_FloatRegister($dst$$reg),
15958              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15959   %}
15960   ins_pipe(pipe_class_default);
15961 %}
15962 
15963 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15964 %{
15965   match(Set dst (AddReductionVD src1 src2));
15966   ins_cost(INSN_COST);
15967   effect(TEMP tmp, TEMP dst);
15968   format %{ "faddd $dst, $src1, $src2\n\t"
15969             "ins   $tmp, D, $src2, 0, 1\n\t"
15970             "faddd $dst, $dst, $tmp\t add reduction2d"
15971   %}
15972   ins_encode %{
15973     __ faddd(as_FloatRegister($dst$$reg),
15974              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15975     __ ins(as_FloatRegister($tmp$$reg), __ D,
15976            as_FloatRegister($src2$$reg), 0, 1);
15977     __ faddd(as_FloatRegister($dst$$reg),
15978              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15979   %}
15980   ins_pipe(pipe_class_default);
15981 %}
15982 
15983 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15984 %{
15985   match(Set dst (MulReductionVD src1 src2));
15986   ins_cost(INSN_COST);
15987   effect(TEMP tmp, TEMP dst);
15988   format %{ "fmuld $dst, $src1, $src2\n\t"
15989             "ins   $tmp, D, $src2, 0, 1\n\t"
15990             "fmuld $dst, $dst, $tmp\t add reduction2d"
15991   %}
15992   ins_encode %{
15993     __ fmuld(as_FloatRegister($dst$$reg),
15994              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15995     __ ins(as_FloatRegister($tmp$$reg), __ D,
15996            as_FloatRegister($src2$$reg), 0, 1);
15997     __ fmuld(as_FloatRegister($dst$$reg),
15998              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15999   %}
16000   ins_pipe(pipe_class_default);
16001 %}
16002 
16003 // ====================VECTOR ARITHMETIC=======================================
16004 
16005 // --------------------------------- ADD --------------------------------------
16006 
16007 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16008 %{
16009   predicate(n->as_Vector()->length() == 4 ||
16010             n->as_Vector()->length() == 8);
16011   match(Set dst (AddVB src1 src2));
16012   ins_cost(INSN_COST);
16013   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16014   ins_encode %{
16015     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16016             as_FloatRegister($src1$$reg),
16017             as_FloatRegister($src2$$reg));
16018   %}
16019   ins_pipe(vdop64);
16020 %}
16021 
16022 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16023 %{
16024   predicate(n->as_Vector()->length() == 16);
16025   match(Set dst (AddVB src1 src2));
16026   ins_cost(INSN_COST);
16027   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16028   ins_encode %{
16029     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16030             as_FloatRegister($src1$$reg),
16031             as_FloatRegister($src2$$reg));
16032   %}
16033   ins_pipe(vdop128);
16034 %}
16035 
16036 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16037 %{
16038   predicate(n->as_Vector()->length() == 2 ||
16039             n->as_Vector()->length() == 4);
16040   match(Set dst (AddVS src1 src2));
16041   ins_cost(INSN_COST);
16042   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
16043   ins_encode %{
16044     __ addv(as_FloatRegister($dst$$reg), __ T4H,
16045             as_FloatRegister($src1$$reg),
16046             as_FloatRegister($src2$$reg));
16047   %}
16048   ins_pipe(vdop64);
16049 %}
16050 
16051 instruct vadd8S(vecX dst, vecX src1, vecX src2)
16052 %{
16053   predicate(n->as_Vector()->length() == 8);
16054   match(Set dst (AddVS src1 src2));
16055   ins_cost(INSN_COST);
16056   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
16057   ins_encode %{
16058     __ addv(as_FloatRegister($dst$$reg), __ T8H,
16059             as_FloatRegister($src1$$reg),
16060             as_FloatRegister($src2$$reg));
16061   %}
16062   ins_pipe(vdop128);
16063 %}
16064 
16065 instruct vadd2I(vecD dst, vecD src1, vecD src2)
16066 %{
16067   predicate(n->as_Vector()->length() == 2);
16068   match(Set dst (AddVI src1 src2));
16069   ins_cost(INSN_COST);
16070   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
16071   ins_encode %{
16072     __ addv(as_FloatRegister($dst$$reg), __ T2S,
16073             as_FloatRegister($src1$$reg),
16074             as_FloatRegister($src2$$reg));
16075   %}
16076   ins_pipe(vdop64);
16077 %}
16078 
16079 instruct vadd4I(vecX dst, vecX src1, vecX src2)
16080 %{
16081   predicate(n->as_Vector()->length() == 4);
16082   match(Set dst (AddVI src1 src2));
16083   ins_cost(INSN_COST);
16084   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
16085   ins_encode %{
16086     __ addv(as_FloatRegister($dst$$reg), __ T4S,
16087             as_FloatRegister($src1$$reg),
16088             as_FloatRegister($src2$$reg));
16089   %}
16090   ins_pipe(vdop128);
16091 %}
16092 
16093 instruct vadd2L(vecX dst, vecX src1, vecX src2)
16094 %{
16095   predicate(n->as_Vector()->length() == 2);
16096   match(Set dst (AddVL src1 src2));
16097   ins_cost(INSN_COST);
16098   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
16099   ins_encode %{
16100     __ addv(as_FloatRegister($dst$$reg), __ T2D,
16101             as_FloatRegister($src1$$reg),
16102             as_FloatRegister($src2$$reg));
16103   %}
16104   ins_pipe(vdop128);
16105 %}
16106 
16107 instruct vadd2F(vecD dst, vecD src1, vecD src2)
16108 %{
16109   predicate(n->as_Vector()->length() == 2);
16110   match(Set dst (AddVF src1 src2));
16111   ins_cost(INSN_COST);
16112   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
16113   ins_encode %{
16114     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
16115             as_FloatRegister($src1$$reg),
16116             as_FloatRegister($src2$$reg));
16117   %}
16118   ins_pipe(vdop_fp64);
16119 %}
16120 
16121 instruct vadd4F(vecX dst, vecX src1, vecX src2)
16122 %{
16123   predicate(n->as_Vector()->length() == 4);
16124   match(Set dst (AddVF src1 src2));
16125   ins_cost(INSN_COST);
16126   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
16127   ins_encode %{
16128     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
16129             as_FloatRegister($src1$$reg),
16130             as_FloatRegister($src2$$reg));
16131   %}
16132   ins_pipe(vdop_fp128);
16133 %}
16134 
16135 instruct vadd2D(vecX dst, vecX src1, vecX src2)
16136 %{
16137   match(Set dst (AddVD src1 src2));
16138   ins_cost(INSN_COST);
16139   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
16140   ins_encode %{
16141     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
16142             as_FloatRegister($src1$$reg),
16143             as_FloatRegister($src2$$reg));
16144   %}
16145   ins_pipe(vdop_fp128);
16146 %}
16147 
16148 // --------------------------------- SUB --------------------------------------
16149 
16150 instruct vsub8B(vecD dst, vecD src1, vecD src2)
16151 %{
16152   predicate(n->as_Vector()->length() == 4 ||
16153             n->as_Vector()->length() == 8);
16154   match(Set dst (SubVB src1 src2));
16155   ins_cost(INSN_COST);
16156   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
16157   ins_encode %{
16158     __ subv(as_FloatRegister($dst$$reg), __ T8B,
16159             as_FloatRegister($src1$$reg),
16160             as_FloatRegister($src2$$reg));
16161   %}
16162   ins_pipe(vdop64);
16163 %}
16164 
16165 instruct vsub16B(vecX dst, vecX src1, vecX src2)
16166 %{
16167   predicate(n->as_Vector()->length() == 16);
16168   match(Set dst (SubVB src1 src2));
16169   ins_cost(INSN_COST);
16170   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
16171   ins_encode %{
16172     __ subv(as_FloatRegister($dst$$reg), __ T16B,
16173             as_FloatRegister($src1$$reg),
16174             as_FloatRegister($src2$$reg));
16175   %}
16176   ins_pipe(vdop128);
16177 %}
16178 
16179 instruct vsub4S(vecD dst, vecD src1, vecD src2)
16180 %{
16181   predicate(n->as_Vector()->length() == 2 ||
16182             n->as_Vector()->length() == 4);
16183   match(Set dst (SubVS src1 src2));
16184   ins_cost(INSN_COST);
16185   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
16186   ins_encode %{
16187     __ subv(as_FloatRegister($dst$$reg), __ T4H,
16188             as_FloatRegister($src1$$reg),
16189             as_FloatRegister($src2$$reg));
16190   %}
16191   ins_pipe(vdop64);
16192 %}
16193 
16194 instruct vsub8S(vecX dst, vecX src1, vecX src2)
16195 %{
16196   predicate(n->as_Vector()->length() == 8);
16197   match(Set dst (SubVS src1 src2));
16198   ins_cost(INSN_COST);
16199   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
16200   ins_encode %{
16201     __ subv(as_FloatRegister($dst$$reg), __ T8H,
16202             as_FloatRegister($src1$$reg),
16203             as_FloatRegister($src2$$reg));
16204   %}
16205   ins_pipe(vdop128);
16206 %}
16207 
16208 instruct vsub2I(vecD dst, vecD src1, vecD src2)
16209 %{
16210   predicate(n->as_Vector()->length() == 2);
16211   match(Set dst (SubVI src1 src2));
16212   ins_cost(INSN_COST);
16213   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
16214   ins_encode %{
16215     __ subv(as_FloatRegister($dst$$reg), __ T2S,
16216             as_FloatRegister($src1$$reg),
16217             as_FloatRegister($src2$$reg));
16218   %}
16219   ins_pipe(vdop64);
16220 %}
16221 
16222 instruct vsub4I(vecX dst, vecX src1, vecX src2)
16223 %{
16224   predicate(n->as_Vector()->length() == 4);
16225   match(Set dst (SubVI src1 src2));
16226   ins_cost(INSN_COST);
16227   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
16228   ins_encode %{
16229     __ subv(as_FloatRegister($dst$$reg), __ T4S,
16230             as_FloatRegister($src1$$reg),
16231             as_FloatRegister($src2$$reg));
16232   %}
16233   ins_pipe(vdop128);
16234 %}
16235 
16236 instruct vsub2L(vecX dst, vecX src1, vecX src2)
16237 %{
16238   predicate(n->as_Vector()->length() == 2);
16239   match(Set dst (SubVL src1 src2));
16240   ins_cost(INSN_COST);
16241   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
16242   ins_encode %{
16243     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16244             as_FloatRegister($src1$$reg),
16245             as_FloatRegister($src2$$reg));
16246   %}
16247   ins_pipe(vdop128);
16248 %}
16249 
16250 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16251 %{
16252   predicate(n->as_Vector()->length() == 2);
16253   match(Set dst (SubVF src1 src2));
16254   ins_cost(INSN_COST);
16255   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16256   ins_encode %{
16257     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16258             as_FloatRegister($src1$$reg),
16259             as_FloatRegister($src2$$reg));
16260   %}
16261   ins_pipe(vdop_fp64);
16262 %}
16263 
16264 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16265 %{
16266   predicate(n->as_Vector()->length() == 4);
16267   match(Set dst (SubVF src1 src2));
16268   ins_cost(INSN_COST);
16269   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16270   ins_encode %{
16271     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16272             as_FloatRegister($src1$$reg),
16273             as_FloatRegister($src2$$reg));
16274   %}
16275   ins_pipe(vdop_fp128);
16276 %}
16277 
16278 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16279 %{
16280   predicate(n->as_Vector()->length() == 2);
16281   match(Set dst (SubVD src1 src2));
16282   ins_cost(INSN_COST);
16283   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16284   ins_encode %{
16285     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16286             as_FloatRegister($src1$$reg),
16287             as_FloatRegister($src2$$reg));
16288   %}
16289   ins_pipe(vdop_fp128);
16290 %}
16291 
16292 // --------------------------------- MUL --------------------------------------
16293 
16294 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16295 %{
16296   predicate(n->as_Vector()->length() == 2 ||
16297             n->as_Vector()->length() == 4);
16298   match(Set dst (MulVS src1 src2));
16299   ins_cost(INSN_COST);
16300   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16301   ins_encode %{
16302     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16303             as_FloatRegister($src1$$reg),
16304             as_FloatRegister($src2$$reg));
16305   %}
16306   ins_pipe(vmul64);
16307 %}
16308 
16309 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16310 %{
16311   predicate(n->as_Vector()->length() == 8);
16312   match(Set dst (MulVS src1 src2));
16313   ins_cost(INSN_COST);
16314   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16315   ins_encode %{
16316     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16317             as_FloatRegister($src1$$reg),
16318             as_FloatRegister($src2$$reg));
16319   %}
16320   ins_pipe(vmul128);
16321 %}
16322 
16323 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16324 %{
16325   predicate(n->as_Vector()->length() == 2);
16326   match(Set dst (MulVI src1 src2));
16327   ins_cost(INSN_COST);
16328   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16329   ins_encode %{
16330     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16331             as_FloatRegister($src1$$reg),
16332             as_FloatRegister($src2$$reg));
16333   %}
16334   ins_pipe(vmul64);
16335 %}
16336 
16337 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16338 %{
16339   predicate(n->as_Vector()->length() == 4);
16340   match(Set dst (MulVI src1 src2));
16341   ins_cost(INSN_COST);
16342   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16343   ins_encode %{
16344     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16345             as_FloatRegister($src1$$reg),
16346             as_FloatRegister($src2$$reg));
16347   %}
16348   ins_pipe(vmul128);
16349 %}
16350 
16351 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16352 %{
16353   predicate(n->as_Vector()->length() == 2);
16354   match(Set dst (MulVF src1 src2));
16355   ins_cost(INSN_COST);
16356   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16357   ins_encode %{
16358     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16359             as_FloatRegister($src1$$reg),
16360             as_FloatRegister($src2$$reg));
16361   %}
16362   ins_pipe(vmuldiv_fp64);
16363 %}
16364 
16365 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16366 %{
16367   predicate(n->as_Vector()->length() == 4);
16368   match(Set dst (MulVF src1 src2));
16369   ins_cost(INSN_COST);
16370   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16371   ins_encode %{
16372     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16373             as_FloatRegister($src1$$reg),
16374             as_FloatRegister($src2$$reg));
16375   %}
16376   ins_pipe(vmuldiv_fp128);
16377 %}
16378 
16379 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16380 %{
16381   predicate(n->as_Vector()->length() == 2);
16382   match(Set dst (MulVD src1 src2));
16383   ins_cost(INSN_COST);
16384   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16385   ins_encode %{
16386     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16387             as_FloatRegister($src1$$reg),
16388             as_FloatRegister($src2$$reg));
16389   %}
16390   ins_pipe(vmuldiv_fp128);
16391 %}
16392 
16393 // --------------------------------- MLA --------------------------------------
16394 
16395 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16396 %{
16397   predicate(n->as_Vector()->length() == 2 ||
16398             n->as_Vector()->length() == 4);
16399   match(Set dst (AddVS dst (MulVS src1 src2)));
16400   ins_cost(INSN_COST);
16401   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16402   ins_encode %{
16403     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16404             as_FloatRegister($src1$$reg),
16405             as_FloatRegister($src2$$reg));
16406   %}
16407   ins_pipe(vmla64);
16408 %}
16409 
16410 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16411 %{
16412   predicate(n->as_Vector()->length() == 8);
16413   match(Set dst (AddVS dst (MulVS src1 src2)));
16414   ins_cost(INSN_COST);
16415   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16416   ins_encode %{
16417     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16418             as_FloatRegister($src1$$reg),
16419             as_FloatRegister($src2$$reg));
16420   %}
16421   ins_pipe(vmla128);
16422 %}
16423 
16424 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16425 %{
16426   predicate(n->as_Vector()->length() == 2);
16427   match(Set dst (AddVI dst (MulVI src1 src2)));
16428   ins_cost(INSN_COST);
16429   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16430   ins_encode %{
16431     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16432             as_FloatRegister($src1$$reg),
16433             as_FloatRegister($src2$$reg));
16434   %}
16435   ins_pipe(vmla64);
16436 %}
16437 
16438 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16439 %{
16440   predicate(n->as_Vector()->length() == 4);
16441   match(Set dst (AddVI dst (MulVI src1 src2)));
16442   ins_cost(INSN_COST);
16443   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16444   ins_encode %{
16445     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16446             as_FloatRegister($src1$$reg),
16447             as_FloatRegister($src2$$reg));
16448   %}
16449   ins_pipe(vmla128);
16450 %}
16451 
16452 // dst + src1 * src2
16453 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16454   predicate(UseFMA && n->as_Vector()->length() == 2);
16455   match(Set dst (FmaVF  dst (Binary src1 src2)));
16456   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16457   ins_cost(INSN_COST);
16458   ins_encode %{
16459     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16460             as_FloatRegister($src1$$reg),
16461             as_FloatRegister($src2$$reg));
16462   %}
16463   ins_pipe(vmuldiv_fp64);
16464 %}
16465 
16466 // dst + src1 * src2
16467 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16468   predicate(UseFMA && n->as_Vector()->length() == 4);
16469   match(Set dst (FmaVF  dst (Binary src1 src2)));
16470   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16471   ins_cost(INSN_COST);
16472   ins_encode %{
16473     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16474             as_FloatRegister($src1$$reg),
16475             as_FloatRegister($src2$$reg));
16476   %}
16477   ins_pipe(vmuldiv_fp128);
16478 %}
16479 
16480 // dst + src1 * src2
16481 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16482   predicate(UseFMA && n->as_Vector()->length() == 2);
16483   match(Set dst (FmaVD  dst (Binary src1 src2)));
16484   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16485   ins_cost(INSN_COST);
16486   ins_encode %{
16487     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16488             as_FloatRegister($src1$$reg),
16489             as_FloatRegister($src2$$reg));
16490   %}
16491   ins_pipe(vmuldiv_fp128);
16492 %}
16493 
16494 // --------------------------------- MLS --------------------------------------
16495 
16496 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16497 %{
16498   predicate(n->as_Vector()->length() == 2 ||
16499             n->as_Vector()->length() == 4);
16500   match(Set dst (SubVS dst (MulVS src1 src2)));
16501   ins_cost(INSN_COST);
16502   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16503   ins_encode %{
16504     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16505             as_FloatRegister($src1$$reg),
16506             as_FloatRegister($src2$$reg));
16507   %}
16508   ins_pipe(vmla64);
16509 %}
16510 
16511 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16512 %{
16513   predicate(n->as_Vector()->length() == 8);
16514   match(Set dst (SubVS dst (MulVS src1 src2)));
16515   ins_cost(INSN_COST);
16516   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16517   ins_encode %{
16518     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16519             as_FloatRegister($src1$$reg),
16520             as_FloatRegister($src2$$reg));
16521   %}
16522   ins_pipe(vmla128);
16523 %}
16524 
16525 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16526 %{
16527   predicate(n->as_Vector()->length() == 2);
16528   match(Set dst (SubVI dst (MulVI src1 src2)));
16529   ins_cost(INSN_COST);
16530   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16531   ins_encode %{
16532     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16533             as_FloatRegister($src1$$reg),
16534             as_FloatRegister($src2$$reg));
16535   %}
16536   ins_pipe(vmla64);
16537 %}
16538 
16539 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16540 %{
16541   predicate(n->as_Vector()->length() == 4);
16542   match(Set dst (SubVI dst (MulVI src1 src2)));
16543   ins_cost(INSN_COST);
16544   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16545   ins_encode %{
16546     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16547             as_FloatRegister($src1$$reg),
16548             as_FloatRegister($src2$$reg));
16549   %}
16550   ins_pipe(vmla128);
16551 %}
16552 
16553 // dst - src1 * src2
16554 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16555   predicate(UseFMA && n->as_Vector()->length() == 2);
16556   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16557   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16558   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16559   ins_cost(INSN_COST);
16560   ins_encode %{
16561     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16562             as_FloatRegister($src1$$reg),
16563             as_FloatRegister($src2$$reg));
16564   %}
16565   ins_pipe(vmuldiv_fp64);
16566 %}
16567 
16568 // dst - src1 * src2
16569 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16570   predicate(UseFMA && n->as_Vector()->length() == 4);
16571   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16572   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16573   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16574   ins_cost(INSN_COST);
16575   ins_encode %{
16576     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16577             as_FloatRegister($src1$$reg),
16578             as_FloatRegister($src2$$reg));
16579   %}
16580   ins_pipe(vmuldiv_fp128);
16581 %}
16582 
16583 // dst - src1 * src2
16584 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16585   predicate(UseFMA && n->as_Vector()->length() == 2);
16586   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16587   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16588   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16589   ins_cost(INSN_COST);
16590   ins_encode %{
16591     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16592             as_FloatRegister($src1$$reg),
16593             as_FloatRegister($src2$$reg));
16594   %}
16595   ins_pipe(vmuldiv_fp128);
16596 %}
16597 
16598 // --------------------------------- DIV --------------------------------------
16599 
16600 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16601 %{
16602   predicate(n->as_Vector()->length() == 2);
16603   match(Set dst (DivVF src1 src2));
16604   ins_cost(INSN_COST);
16605   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16606   ins_encode %{
16607     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16608             as_FloatRegister($src1$$reg),
16609             as_FloatRegister($src2$$reg));
16610   %}
16611   ins_pipe(vmuldiv_fp64);
16612 %}
16613 
16614 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16615 %{
16616   predicate(n->as_Vector()->length() == 4);
16617   match(Set dst (DivVF src1 src2));
16618   ins_cost(INSN_COST);
16619   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16620   ins_encode %{
16621     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16622             as_FloatRegister($src1$$reg),
16623             as_FloatRegister($src2$$reg));
16624   %}
16625   ins_pipe(vmuldiv_fp128);
16626 %}
16627 
16628 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16629 %{
16630   predicate(n->as_Vector()->length() == 2);
16631   match(Set dst (DivVD src1 src2));
16632   ins_cost(INSN_COST);
16633   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16634   ins_encode %{
16635     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16636             as_FloatRegister($src1$$reg),
16637             as_FloatRegister($src2$$reg));
16638   %}
16639   ins_pipe(vmuldiv_fp128);
16640 %}
16641 
16642 // --------------------------------- SQRT -------------------------------------
16643 
16644 instruct vsqrt2D(vecX dst, vecX src)
16645 %{
16646   predicate(n->as_Vector()->length() == 2);
16647   match(Set dst (SqrtVD src));
16648   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16649   ins_encode %{
16650     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16651              as_FloatRegister($src$$reg));
16652   %}
16653   ins_pipe(vsqrt_fp128);
16654 %}
16655 
16656 // --------------------------------- ABS --------------------------------------
16657 
16658 instruct vabs2F(vecD dst, vecD src)
16659 %{
16660   predicate(n->as_Vector()->length() == 2);
16661   match(Set dst (AbsVF src));
16662   ins_cost(INSN_COST * 3);
16663   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16664   ins_encode %{
16665     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16666             as_FloatRegister($src$$reg));
16667   %}
16668   ins_pipe(vunop_fp64);
16669 %}
16670 
16671 instruct vabs4F(vecX dst, vecX src)
16672 %{
16673   predicate(n->as_Vector()->length() == 4);
16674   match(Set dst (AbsVF src));
16675   ins_cost(INSN_COST * 3);
16676   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16677   ins_encode %{
16678     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16679             as_FloatRegister($src$$reg));
16680   %}
16681   ins_pipe(vunop_fp128);
16682 %}
16683 
16684 instruct vabs2D(vecX dst, vecX src)
16685 %{
16686   predicate(n->as_Vector()->length() == 2);
16687   match(Set dst (AbsVD src));
16688   ins_cost(INSN_COST * 3);
16689   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16690   ins_encode %{
16691     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16692             as_FloatRegister($src$$reg));
16693   %}
16694   ins_pipe(vunop_fp128);
16695 %}
16696 
16697 // --------------------------------- NEG --------------------------------------
16698 
16699 instruct vneg2F(vecD dst, vecD src)
16700 %{
16701   predicate(n->as_Vector()->length() == 2);
16702   match(Set dst (NegVF src));
16703   ins_cost(INSN_COST * 3);
16704   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16705   ins_encode %{
16706     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16707             as_FloatRegister($src$$reg));
16708   %}
16709   ins_pipe(vunop_fp64);
16710 %}
16711 
16712 instruct vneg4F(vecX dst, vecX src)
16713 %{
16714   predicate(n->as_Vector()->length() == 4);
16715   match(Set dst (NegVF src));
16716   ins_cost(INSN_COST * 3);
16717   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16718   ins_encode %{
16719     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16720             as_FloatRegister($src$$reg));
16721   %}
16722   ins_pipe(vunop_fp128);
16723 %}
16724 
16725 instruct vneg2D(vecX dst, vecX src)
16726 %{
16727   predicate(n->as_Vector()->length() == 2);
16728   match(Set dst (NegVD src));
16729   ins_cost(INSN_COST * 3);
16730   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16731   ins_encode %{
16732     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16733             as_FloatRegister($src$$reg));
16734   %}
16735   ins_pipe(vunop_fp128);
16736 %}
16737 
16738 // --------------------------------- AND --------------------------------------
16739 
16740 instruct vand8B(vecD dst, vecD src1, vecD src2)
16741 %{
16742   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16743             n->as_Vector()->length_in_bytes() == 8);
16744   match(Set dst (AndV src1 src2));
16745   ins_cost(INSN_COST);
16746   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16747   ins_encode %{
16748     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16749             as_FloatRegister($src1$$reg),
16750             as_FloatRegister($src2$$reg));
16751   %}
16752   ins_pipe(vlogical64);
16753 %}
16754 
16755 instruct vand16B(vecX dst, vecX src1, vecX src2)
16756 %{
16757   predicate(n->as_Vector()->length_in_bytes() == 16);
16758   match(Set dst (AndV src1 src2));
16759   ins_cost(INSN_COST);
16760   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16761   ins_encode %{
16762     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16763             as_FloatRegister($src1$$reg),
16764             as_FloatRegister($src2$$reg));
16765   %}
16766   ins_pipe(vlogical128);
16767 %}
16768 
16769 // --------------------------------- OR ---------------------------------------
16770 
16771 instruct vor8B(vecD dst, vecD src1, vecD src2)
16772 %{
16773   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16774             n->as_Vector()->length_in_bytes() == 8);
16775   match(Set dst (OrV src1 src2));
16776   ins_cost(INSN_COST);
16777   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16778   ins_encode %{
16779     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16780             as_FloatRegister($src1$$reg),
16781             as_FloatRegister($src2$$reg));
16782   %}
16783   ins_pipe(vlogical64);
16784 %}
16785 
16786 instruct vor16B(vecX dst, vecX src1, vecX src2)
16787 %{
16788   predicate(n->as_Vector()->length_in_bytes() == 16);
16789   match(Set dst (OrV src1 src2));
16790   ins_cost(INSN_COST);
16791   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16792   ins_encode %{
16793     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16794             as_FloatRegister($src1$$reg),
16795             as_FloatRegister($src2$$reg));
16796   %}
16797   ins_pipe(vlogical128);
16798 %}
16799 
16800 // --------------------------------- XOR --------------------------------------
16801 
16802 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16803 %{
16804   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16805             n->as_Vector()->length_in_bytes() == 8);
16806   match(Set dst (XorV src1 src2));
16807   ins_cost(INSN_COST);
16808   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16809   ins_encode %{
16810     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16811             as_FloatRegister($src1$$reg),
16812             as_FloatRegister($src2$$reg));
16813   %}
16814   ins_pipe(vlogical64);
16815 %}
16816 
16817 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16818 %{
16819   predicate(n->as_Vector()->length_in_bytes() == 16);
16820   match(Set dst (XorV src1 src2));
16821   ins_cost(INSN_COST);
16822   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16823   ins_encode %{
16824     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16825             as_FloatRegister($src1$$reg),
16826             as_FloatRegister($src2$$reg));
16827   %}
16828   ins_pipe(vlogical128);
16829 %}
16830 
16831 // ------------------------------ Shift ---------------------------------------
16832 
16833 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16834   match(Set dst (LShiftCntV cnt));
16835   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16836   ins_encode %{
16837     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16838   %}
16839   ins_pipe(vdup_reg_reg128);
16840 %}
16841 
16842 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16843 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16844   match(Set dst (RShiftCntV cnt));
16845   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16846   ins_encode %{
16847     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16848     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16849   %}
16850   ins_pipe(vdup_reg_reg128);
16851 %}
16852 
16853 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16854   predicate(n->as_Vector()->length() == 4 ||
16855             n->as_Vector()->length() == 8);
16856   match(Set dst (LShiftVB src shift));
16857   match(Set dst (RShiftVB src shift));
16858   ins_cost(INSN_COST);
16859   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16860   ins_encode %{
16861     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16862             as_FloatRegister($src$$reg),
16863             as_FloatRegister($shift$$reg));
16864   %}
16865   ins_pipe(vshift64);
16866 %}
16867 
16868 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16869   predicate(n->as_Vector()->length() == 16);
16870   match(Set dst (LShiftVB src shift));
16871   match(Set dst (RShiftVB src shift));
16872   ins_cost(INSN_COST);
16873   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16874   ins_encode %{
16875     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16876             as_FloatRegister($src$$reg),
16877             as_FloatRegister($shift$$reg));
16878   %}
16879   ins_pipe(vshift128);
16880 %}
16881 
16882 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16883   predicate(n->as_Vector()->length() == 4 ||
16884             n->as_Vector()->length() == 8);
16885   match(Set dst (URShiftVB src shift));
16886   ins_cost(INSN_COST);
16887   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16888   ins_encode %{
16889     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16890             as_FloatRegister($src$$reg),
16891             as_FloatRegister($shift$$reg));
16892   %}
16893   ins_pipe(vshift64);
16894 %}
16895 
16896 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16897   predicate(n->as_Vector()->length() == 16);
16898   match(Set dst (URShiftVB src shift));
16899   ins_cost(INSN_COST);
16900   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16901   ins_encode %{
16902     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16903             as_FloatRegister($src$$reg),
16904             as_FloatRegister($shift$$reg));
16905   %}
16906   ins_pipe(vshift128);
16907 %}
16908 
16909 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16910   predicate(n->as_Vector()->length() == 4 ||
16911             n->as_Vector()->length() == 8);
16912   match(Set dst (LShiftVB src shift));
16913   ins_cost(INSN_COST);
16914   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16915   ins_encode %{
16916     int sh = (int)$shift$$constant;
16917     if (sh >= 8) {
16918       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16919              as_FloatRegister($src$$reg),
16920              as_FloatRegister($src$$reg));
16921     } else {
16922       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16923              as_FloatRegister($src$$reg), sh);
16924     }
16925   %}
16926   ins_pipe(vshift64_imm);
16927 %}
16928 
16929 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16930   predicate(n->as_Vector()->length() == 16);
16931   match(Set dst (LShiftVB src shift));
16932   ins_cost(INSN_COST);
16933   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16934   ins_encode %{
16935     int sh = (int)$shift$$constant;
16936     if (sh >= 8) {
16937       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16938              as_FloatRegister($src$$reg),
16939              as_FloatRegister($src$$reg));
16940     } else {
16941       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16942              as_FloatRegister($src$$reg), sh);
16943     }
16944   %}
16945   ins_pipe(vshift128_imm);
16946 %}
16947 
16948 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16949   predicate(n->as_Vector()->length() == 4 ||
16950             n->as_Vector()->length() == 8);
16951   match(Set dst (RShiftVB src shift));
16952   ins_cost(INSN_COST);
16953   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16954   ins_encode %{
16955     int sh = (int)$shift$$constant;
16956     if (sh >= 8) sh = 7;
16957     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16958            as_FloatRegister($src$$reg), sh);
16959   %}
16960   ins_pipe(vshift64_imm);
16961 %}
16962 
16963 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16964   predicate(n->as_Vector()->length() == 16);
16965   match(Set dst (RShiftVB src shift));
16966   ins_cost(INSN_COST);
16967   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16968   ins_encode %{
16969     int sh = (int)$shift$$constant;
16970     if (sh >= 8) sh = 7;
16971     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16972            as_FloatRegister($src$$reg), sh);
16973   %}
16974   ins_pipe(vshift128_imm);
16975 %}
16976 
16977 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16978   predicate(n->as_Vector()->length() == 4 ||
16979             n->as_Vector()->length() == 8);
16980   match(Set dst (URShiftVB src shift));
16981   ins_cost(INSN_COST);
16982   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16983   ins_encode %{
16984     int sh = (int)$shift$$constant;
16985     if (sh >= 8) {
16986       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16987              as_FloatRegister($src$$reg),
16988              as_FloatRegister($src$$reg));
16989     } else {
16990       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16991              as_FloatRegister($src$$reg), sh);
16992     }
16993   %}
16994   ins_pipe(vshift64_imm);
16995 %}
16996 
16997 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16998   predicate(n->as_Vector()->length() == 16);
16999   match(Set dst (URShiftVB src shift));
17000   ins_cost(INSN_COST);
17001   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17002   ins_encode %{
17003     int sh = (int)$shift$$constant;
17004     if (sh >= 8) {
17005       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17006              as_FloatRegister($src$$reg),
17007              as_FloatRegister($src$$reg));
17008     } else {
17009       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17010              as_FloatRegister($src$$reg), sh);
17011     }
17012   %}
17013   ins_pipe(vshift128_imm);
17014 %}
17015 
17016 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
17017   predicate(n->as_Vector()->length() == 2 ||
17018             n->as_Vector()->length() == 4);
17019   match(Set dst (LShiftVS src shift));
17020   match(Set dst (RShiftVS src shift));
17021   ins_cost(INSN_COST);
17022   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17023   ins_encode %{
17024     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17025             as_FloatRegister($src$$reg),
17026             as_FloatRegister($shift$$reg));
17027   %}
17028   ins_pipe(vshift64);
17029 %}
17030 
17031 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17032   predicate(n->as_Vector()->length() == 8);
17033   match(Set dst (LShiftVS src shift));
17034   match(Set dst (RShiftVS src shift));
17035   ins_cost(INSN_COST);
17036   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17037   ins_encode %{
17038     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17039             as_FloatRegister($src$$reg),
17040             as_FloatRegister($shift$$reg));
17041   %}
17042   ins_pipe(vshift128);
17043 %}
17044 
17045 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
17046   predicate(n->as_Vector()->length() == 2 ||
17047             n->as_Vector()->length() == 4);
17048   match(Set dst (URShiftVS src shift));
17049   ins_cost(INSN_COST);
17050   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
17051   ins_encode %{
17052     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
17053             as_FloatRegister($src$$reg),
17054             as_FloatRegister($shift$$reg));
17055   %}
17056   ins_pipe(vshift64);
17057 %}
17058 
17059 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
17060   predicate(n->as_Vector()->length() == 8);
17061   match(Set dst (URShiftVS src shift));
17062   ins_cost(INSN_COST);
17063   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
17064   ins_encode %{
17065     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
17066             as_FloatRegister($src$$reg),
17067             as_FloatRegister($shift$$reg));
17068   %}
17069   ins_pipe(vshift128);
17070 %}
17071 
17072 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
17073   predicate(n->as_Vector()->length() == 2 ||
17074             n->as_Vector()->length() == 4);
17075   match(Set dst (LShiftVS src shift));
17076   ins_cost(INSN_COST);
17077   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
17078   ins_encode %{
17079     int sh = (int)$shift$$constant;
17080     if (sh >= 16) {
17081       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17082              as_FloatRegister($src$$reg),
17083              as_FloatRegister($src$$reg));
17084     } else {
17085       __ shl(as_FloatRegister($dst$$reg), __ T4H,
17086              as_FloatRegister($src$$reg), sh);
17087     }
17088   %}
17089   ins_pipe(vshift64_imm);
17090 %}
17091 
17092 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
17093   predicate(n->as_Vector()->length() == 8);
17094   match(Set dst (LShiftVS src shift));
17095   ins_cost(INSN_COST);
17096   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
17097   ins_encode %{
17098     int sh = (int)$shift$$constant;
17099     if (sh >= 16) {
17100       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17101              as_FloatRegister($src$$reg),
17102              as_FloatRegister($src$$reg));
17103     } else {
17104       __ shl(as_FloatRegister($dst$$reg), __ T8H,
17105              as_FloatRegister($src$$reg), sh);
17106     }
17107   %}
17108   ins_pipe(vshift128_imm);
17109 %}
17110 
17111 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
17112   predicate(n->as_Vector()->length() == 2 ||
17113             n->as_Vector()->length() == 4);
17114   match(Set dst (RShiftVS src shift));
17115   ins_cost(INSN_COST);
17116   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
17117   ins_encode %{
17118     int sh = (int)$shift$$constant;
17119     if (sh >= 16) sh = 15;
17120     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
17121            as_FloatRegister($src$$reg), sh);
17122   %}
17123   ins_pipe(vshift64_imm);
17124 %}
17125 
17126 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
17127   predicate(n->as_Vector()->length() == 8);
17128   match(Set dst (RShiftVS src shift));
17129   ins_cost(INSN_COST);
17130   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
17131   ins_encode %{
17132     int sh = (int)$shift$$constant;
17133     if (sh >= 16) sh = 15;
17134     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
17135            as_FloatRegister($src$$reg), sh);
17136   %}
17137   ins_pipe(vshift128_imm);
17138 %}
17139 
17140 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17141   predicate(n->as_Vector()->length() == 2 ||
17142             n->as_Vector()->length() == 4);
17143   match(Set dst (URShiftVS src shift));
17144   ins_cost(INSN_COST);
17145   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17146   ins_encode %{
17147     int sh = (int)$shift$$constant;
17148     if (sh >= 16) {
17149       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17150              as_FloatRegister($src$$reg),
17151              as_FloatRegister($src$$reg));
17152     } else {
17153       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17154              as_FloatRegister($src$$reg), sh);
17155     }
17156   %}
17157   ins_pipe(vshift64_imm);
17158 %}
17159 
17160 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17161   predicate(n->as_Vector()->length() == 8);
17162   match(Set dst (URShiftVS src shift));
17163   ins_cost(INSN_COST);
17164   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17165   ins_encode %{
17166     int sh = (int)$shift$$constant;
17167     if (sh >= 16) {
17168       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17169              as_FloatRegister($src$$reg),
17170              as_FloatRegister($src$$reg));
17171     } else {
17172       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17173              as_FloatRegister($src$$reg), sh);
17174     }
17175   %}
17176   ins_pipe(vshift128_imm);
17177 %}
17178 
17179 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
17180   predicate(n->as_Vector()->length() == 2);
17181   match(Set dst (LShiftVI src shift));
17182   match(Set dst (RShiftVI src shift));
17183   ins_cost(INSN_COST);
17184   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17185   ins_encode %{
17186     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17187             as_FloatRegister($src$$reg),
17188             as_FloatRegister($shift$$reg));
17189   %}
17190   ins_pipe(vshift64);
17191 %}
17192 
17193 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17194   predicate(n->as_Vector()->length() == 4);
17195   match(Set dst (LShiftVI src shift));
17196   match(Set dst (RShiftVI src shift));
17197   ins_cost(INSN_COST);
17198   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17199   ins_encode %{
17200     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17201             as_FloatRegister($src$$reg),
17202             as_FloatRegister($shift$$reg));
17203   %}
17204   ins_pipe(vshift128);
17205 %}
17206 
17207 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
17208   predicate(n->as_Vector()->length() == 2);
17209   match(Set dst (URShiftVI src shift));
17210   ins_cost(INSN_COST);
17211   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
17212   ins_encode %{
17213     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17214             as_FloatRegister($src$$reg),
17215             as_FloatRegister($shift$$reg));
17216   %}
17217   ins_pipe(vshift64);
17218 %}
17219 
17220 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
17221   predicate(n->as_Vector()->length() == 4);
17222   match(Set dst (URShiftVI src shift));
17223   ins_cost(INSN_COST);
17224   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
17225   ins_encode %{
17226     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17227             as_FloatRegister($src$$reg),
17228             as_FloatRegister($shift$$reg));
17229   %}
17230   ins_pipe(vshift128);
17231 %}
17232 
17233 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17234   predicate(n->as_Vector()->length() == 2);
17235   match(Set dst (LShiftVI src shift));
17236   ins_cost(INSN_COST);
17237   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17238   ins_encode %{
17239     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17240            as_FloatRegister($src$$reg),
17241            (int)$shift$$constant);
17242   %}
17243   ins_pipe(vshift64_imm);
17244 %}
17245 
17246 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17247   predicate(n->as_Vector()->length() == 4);
17248   match(Set dst (LShiftVI src shift));
17249   ins_cost(INSN_COST);
17250   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17251   ins_encode %{
17252     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17253            as_FloatRegister($src$$reg),
17254            (int)$shift$$constant);
17255   %}
17256   ins_pipe(vshift128_imm);
17257 %}
17258 
17259 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17260   predicate(n->as_Vector()->length() == 2);
17261   match(Set dst (RShiftVI src shift));
17262   ins_cost(INSN_COST);
17263   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17264   ins_encode %{
17265     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17266             as_FloatRegister($src$$reg),
17267             (int)$shift$$constant);
17268   %}
17269   ins_pipe(vshift64_imm);
17270 %}
17271 
17272 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17273   predicate(n->as_Vector()->length() == 4);
17274   match(Set dst (RShiftVI src shift));
17275   ins_cost(INSN_COST);
17276   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17277   ins_encode %{
17278     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17279             as_FloatRegister($src$$reg),
17280             (int)$shift$$constant);
17281   %}
17282   ins_pipe(vshift128_imm);
17283 %}
17284 
17285 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17286   predicate(n->as_Vector()->length() == 2);
17287   match(Set dst (URShiftVI src shift));
17288   ins_cost(INSN_COST);
17289   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17290   ins_encode %{
17291     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17292             as_FloatRegister($src$$reg),
17293             (int)$shift$$constant);
17294   %}
17295   ins_pipe(vshift64_imm);
17296 %}
17297 
17298 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17299   predicate(n->as_Vector()->length() == 4);
17300   match(Set dst (URShiftVI src shift));
17301   ins_cost(INSN_COST);
17302   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17303   ins_encode %{
17304     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17305             as_FloatRegister($src$$reg),
17306             (int)$shift$$constant);
17307   %}
17308   ins_pipe(vshift128_imm);
17309 %}
17310 
17311 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17312   predicate(n->as_Vector()->length() == 2);
17313   match(Set dst (LShiftVL src shift));
17314   match(Set dst (RShiftVL src shift));
17315   ins_cost(INSN_COST);
17316   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17317   ins_encode %{
17318     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17319             as_FloatRegister($src$$reg),
17320             as_FloatRegister($shift$$reg));
17321   %}
17322   ins_pipe(vshift128);
17323 %}
17324 
17325 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
17326   predicate(n->as_Vector()->length() == 2);
17327   match(Set dst (URShiftVL src shift));
17328   ins_cost(INSN_COST);
17329   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
17330   ins_encode %{
17331     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17332             as_FloatRegister($src$$reg),
17333             as_FloatRegister($shift$$reg));
17334   %}
17335   ins_pipe(vshift128);
17336 %}
17337 
17338 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17339   predicate(n->as_Vector()->length() == 2);
17340   match(Set dst (LShiftVL src shift));
17341   ins_cost(INSN_COST);
17342   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17343   ins_encode %{
17344     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17345            as_FloatRegister($src$$reg),
17346            (int)$shift$$constant);
17347   %}
17348   ins_pipe(vshift128_imm);
17349 %}
17350 
17351 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17352   predicate(n->as_Vector()->length() == 2);
17353   match(Set dst (RShiftVL src shift));
17354   ins_cost(INSN_COST);
17355   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17356   ins_encode %{
17357     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17358             as_FloatRegister($src$$reg),
17359             (int)$shift$$constant);
17360   %}
17361   ins_pipe(vshift128_imm);
17362 %}
17363 
17364 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17365   predicate(n->as_Vector()->length() == 2);
17366   match(Set dst (URShiftVL src shift));
17367   ins_cost(INSN_COST);
17368   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17369   ins_encode %{
17370     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17371             as_FloatRegister($src$$reg),
17372             (int)$shift$$constant);
17373   %}
17374   ins_pipe(vshift128_imm);
17375 %}
17376 
17377 //----------PEEPHOLE RULES-----------------------------------------------------
17378 // These must follow all instruction definitions as they use the names
17379 // defined in the instructions definitions.
17380 //
17381 // peepmatch ( root_instr_name [preceding_instruction]* );
17382 //
17383 // peepconstraint %{
17384 // (instruction_number.operand_name relational_op instruction_number.operand_name
17385 //  [, ...] );
17386 // // instruction numbers are zero-based using left to right order in peepmatch
17387 //
17388 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17389 // // provide an instruction_number.operand_name for each operand that appears
17390 // // in the replacement instruction's match rule
17391 //
17392 // ---------VM FLAGS---------------------------------------------------------
17393 //
17394 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17395 //
17396 // Each peephole rule is given an identifying number starting with zero and
17397 // increasing by one in the order seen by the parser.  An individual peephole
17398 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17399 // on the command-line.
17400 //
17401 // ---------CURRENT LIMITATIONS----------------------------------------------
17402 //
17403 // Only match adjacent instructions in same basic block
17404 // Only equality constraints
17405 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17406 // Only one replacement instruction
17407 //
17408 // ---------EXAMPLE----------------------------------------------------------
17409 //
17410 // // pertinent parts of existing instructions in architecture description
17411 // instruct movI(iRegINoSp dst, iRegI src)
17412 // %{
17413 //   match(Set dst (CopyI src));
17414 // %}
17415 //
17416 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17417 // %{
17418 //   match(Set dst (AddI dst src));
17419 //   effect(KILL cr);
17420 // %}
17421 //
17422 // // Change (inc mov) to lea
17423 // peephole %{
17424 //   // increment preceeded by register-register move
17425 //   peepmatch ( incI_iReg movI );
17426 //   // require that the destination register of the increment
17427 //   // match the destination register of the move
17428 //   peepconstraint ( 0.dst == 1.dst );
17429 //   // construct a replacement instruction that sets
17430 //   // the destination to ( move's source register + one )
17431 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17432 // %}
17433 //
17434 
17435 // Implementation no longer uses movX instructions since
17436 // machine-independent system no longer uses CopyX nodes.
17437 //
17438 // peephole
17439 // %{
17440 //   peepmatch (incI_iReg movI);
17441 //   peepconstraint (0.dst == 1.dst);
17442 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17443 // %}
17444 
17445 // peephole
17446 // %{
17447 //   peepmatch (decI_iReg movI);
17448 //   peepconstraint (0.dst == 1.dst);
17449 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17450 // %}
17451 
17452 // peephole
17453 // %{
17454 //   peepmatch (addI_iReg_imm movI);
17455 //   peepconstraint (0.dst == 1.dst);
17456 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17457 // %}
17458 
17459 // peephole
17460 // %{
17461 //   peepmatch (incL_iReg movL);
17462 //   peepconstraint (0.dst == 1.dst);
17463 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17464 // %}
17465 
17466 // peephole
17467 // %{
17468 //   peepmatch (decL_iReg movL);
17469 //   peepconstraint (0.dst == 1.dst);
17470 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17471 // %}
17472 
17473 // peephole
17474 // %{
17475 //   peepmatch (addL_iReg_imm movL);
17476 //   peepconstraint (0.dst == 1.dst);
17477 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17478 // %}
17479 
17480 // peephole
17481 // %{
17482 //   peepmatch (addP_iReg_imm movP);
17483 //   peepconstraint (0.dst == 1.dst);
17484 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17485 // %}
17486 
17487 // // Change load of spilled value to only a spill
17488 // instruct storeI(memory mem, iRegI src)
17489 // %{
17490 //   match(Set mem (StoreI mem src));
17491 // %}
17492 //
17493 // instruct loadI(iRegINoSp dst, memory mem)
17494 // %{
17495 //   match(Set dst (LoadI mem));
17496 // %}
17497 //
17498 
17499 //----------SMARTSPILL RULES---------------------------------------------------
17500 // These must follow all instruction definitions as they use the names
17501 // defined in the instructions definitions.
17502 
17503 // Local Variables:
17504 // mode: c++
17505 // End: