1 //
   2 // Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 
1004 class CallStubImpl {
1005 
1006   //--------------------------------------------------------------
1007   //---<  Used for optimization in Compile::shorten_branches  >---
1008   //--------------------------------------------------------------
1009 
1010  public:
1011   // Size of call trampoline stub.
1012   static uint size_call_trampoline() {
1013     return 0; // no call trampolines on this platform
1014   }
1015 
1016   // number of relocations needed by a call trampoline stub
1017   static uint reloc_call_trampoline() {
1018     return 0; // no call trampolines on this platform
1019   }
1020 };
1021 
1022 class HandlerImpl {
1023 
1024  public:
1025 
1026   static int emit_exception_handler(CodeBuffer &cbuf);
1027   static int emit_deopt_handler(CodeBuffer& cbuf);
1028 
1029   static uint size_exception_handler() {
1030     return MacroAssembler::far_branch_size();
1031   }
1032 
1033   static uint size_deopt_handler() {
1034     // count one adr and one far branch instruction
1035     return 4 * NativeInstruction::instruction_size;
1036   }
1037 };
1038 
1039  bool is_CAS(int opcode, bool maybe_volatile);
1040 
1041   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1042 
1043   bool unnecessary_acquire(const Node *barrier);
1044   bool needs_acquiring_load(const Node *load);
1045 
1046   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1047 
1048   bool unnecessary_release(const Node *barrier);
1049   bool unnecessary_volatile(const Node *barrier);
1050   bool needs_releasing_store(const Node *store);
1051 
1052   // predicate controlling translation of CompareAndSwapX
1053   bool needs_acquiring_load_exclusive(const Node *load);
1054 
1055   // predicate controlling translation of StoreCM
1056   bool unnecessary_storestore(const Node *storecm);
1057 
1058   // predicate controlling addressing modes
1059   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1060 %}
1061 
1062 source %{
1063 
1064   // Optimizaton of volatile gets and puts
1065   // -------------------------------------
1066   //
1067   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1068   // use to implement volatile reads and writes. For a volatile read
1069   // we simply need
1070   //
1071   //   ldar<x>
1072   //
1073   // and for a volatile write we need
1074   //
1075   //   stlr<x>
1076   //
1077   // Alternatively, we can implement them by pairing a normal
1078   // load/store with a memory barrier. For a volatile read we need
1079   //
1080   //   ldr<x>
1081   //   dmb ishld
1082   //
1083   // for a volatile write
1084   //
1085   //   dmb ish
1086   //   str<x>
1087   //   dmb ish
1088   //
1089   // We can also use ldaxr and stlxr to implement compare and swap CAS
1090   // sequences. These are normally translated to an instruction
1091   // sequence like the following
1092   //
1093   //   dmb      ish
1094   // retry:
1095   //   ldxr<x>   rval raddr
1096   //   cmp       rval rold
1097   //   b.ne done
1098   //   stlxr<x>  rval, rnew, rold
1099   //   cbnz      rval retry
1100   // done:
1101   //   cset      r0, eq
1102   //   dmb ishld
1103   //
1104   // Note that the exclusive store is already using an stlxr
1105   // instruction. That is required to ensure visibility to other
1106   // threads of the exclusive write (assuming it succeeds) before that
1107   // of any subsequent writes.
1108   //
1109   // The following instruction sequence is an improvement on the above
1110   //
1111   // retry:
1112   //   ldaxr<x>  rval raddr
1113   //   cmp       rval rold
1114   //   b.ne done
1115   //   stlxr<x>  rval, rnew, rold
1116   //   cbnz      rval retry
1117   // done:
1118   //   cset      r0, eq
1119   //
1120   // We don't need the leading dmb ish since the stlxr guarantees
1121   // visibility of prior writes in the case that the swap is
1122   // successful. Crucially we don't have to worry about the case where
1123   // the swap is not successful since no valid program should be
1124   // relying on visibility of prior changes by the attempting thread
1125   // in the case where the CAS fails.
1126   //
1127   // Similarly, we don't need the trailing dmb ishld if we substitute
1128   // an ldaxr instruction since that will provide all the guarantees we
1129   // require regarding observation of changes made by other threads
1130   // before any change to the CAS address observed by the load.
1131   //
1132   // In order to generate the desired instruction sequence we need to
1133   // be able to identify specific 'signature' ideal graph node
1134   // sequences which i) occur as a translation of a volatile reads or
1135   // writes or CAS operations and ii) do not occur through any other
1136   // translation or graph transformation. We can then provide
1137   // alternative aldc matching rules which translate these node
1138   // sequences to the desired machine code sequences. Selection of the
1139   // alternative rules can be implemented by predicates which identify
1140   // the relevant node sequences.
1141   //
1142   // The ideal graph generator translates a volatile read to the node
1143   // sequence
1144   //
1145   //   LoadX[mo_acquire]
1146   //   MemBarAcquire
1147   //
1148   // As a special case when using the compressed oops optimization we
1149   // may also see this variant
1150   //
1151   //   LoadN[mo_acquire]
1152   //   DecodeN
1153   //   MemBarAcquire
1154   //
1155   // A volatile write is translated to the node sequence
1156   //
1157   //   MemBarRelease
1158   //   StoreX[mo_release] {CardMark}-optional
1159   //   MemBarVolatile
1160   //
1161   // n.b. the above node patterns are generated with a strict
1162   // 'signature' configuration of input and output dependencies (see
1163   // the predicates below for exact details). The card mark may be as
1164   // simple as a few extra nodes or, in a few GC configurations, may
1165   // include more complex control flow between the leading and
1166   // trailing memory barriers. However, whatever the card mark
1167   // configuration these signatures are unique to translated volatile
1168   // reads/stores -- they will not appear as a result of any other
1169   // bytecode translation or inlining nor as a consequence of
1170   // optimizing transforms.
1171   //
1172   // We also want to catch inlined unsafe volatile gets and puts and
1173   // be able to implement them using either ldar<x>/stlr<x> or some
1174   // combination of ldr<x>/stlr<x> and dmb instructions.
1175   //
1176   // Inlined unsafe volatiles puts manifest as a minor variant of the
1177   // normal volatile put node sequence containing an extra cpuorder
1178   // membar
1179   //
1180   //   MemBarRelease
1181   //   MemBarCPUOrder
1182   //   StoreX[mo_release] {CardMark}-optional
1183   //   MemBarCPUOrder
1184   //   MemBarVolatile
1185   //
1186   // n.b. as an aside, a cpuorder membar is not itself subject to
1187   // matching and translation by adlc rules.  However, the rule
1188   // predicates need to detect its presence in order to correctly
1189   // select the desired adlc rules.
1190   //
1191   // Inlined unsafe volatile gets manifest as a slightly different
1192   // node sequence to a normal volatile get because of the
1193   // introduction of some CPUOrder memory barriers to bracket the
1194   // Load. However, but the same basic skeleton of a LoadX feeding a
1195   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1196   // present
1197   //
1198   //   MemBarCPUOrder
1199   //        ||       \\
1200   //   MemBarCPUOrder LoadX[mo_acquire]
1201   //        ||            |
1202   //        ||       {DecodeN} optional
1203   //        ||       /
1204   //     MemBarAcquire
1205   //
1206   // In this case the acquire membar does not directly depend on the
1207   // load. However, we can be sure that the load is generated from an
1208   // inlined unsafe volatile get if we see it dependent on this unique
1209   // sequence of membar nodes. Similarly, given an acquire membar we
1210   // can know that it was added because of an inlined unsafe volatile
1211   // get if it is fed and feeds a cpuorder membar and if its feed
1212   // membar also feeds an acquiring load.
1213   //
1214   // Finally an inlined (Unsafe) CAS operation is translated to the
1215   // following ideal graph
1216   //
1217   //   MemBarRelease
1218   //   MemBarCPUOrder
1219   //   CompareAndSwapX {CardMark}-optional
1220   //   MemBarCPUOrder
1221   //   MemBarAcquire
1222   //
1223   // So, where we can identify these volatile read and write
1224   // signatures we can choose to plant either of the above two code
1225   // sequences. For a volatile read we can simply plant a normal
1226   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1227   // also choose to inhibit translation of the MemBarAcquire and
1228   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1229   //
1230   // When we recognise a volatile store signature we can choose to
1231   // plant at a dmb ish as a translation for the MemBarRelease, a
1232   // normal str<x> and then a dmb ish for the MemBarVolatile.
1233   // Alternatively, we can inhibit translation of the MemBarRelease
1234   // and MemBarVolatile and instead plant a simple stlr<x>
1235   // instruction.
1236   //
1237   // when we recognise a CAS signature we can choose to plant a dmb
1238   // ish as a translation for the MemBarRelease, the conventional
1239   // macro-instruction sequence for the CompareAndSwap node (which
1240   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1241   // Alternatively, we can elide generation of the dmb instructions
1242   // and plant the alternative CompareAndSwap macro-instruction
1243   // sequence (which uses ldaxr<x>).
1244   //
1245   // Of course, the above only applies when we see these signature
1246   // configurations. We still want to plant dmb instructions in any
1247   // other cases where we may see a MemBarAcquire, MemBarRelease or
1248   // MemBarVolatile. For example, at the end of a constructor which
1249   // writes final/volatile fields we will see a MemBarRelease
1250   // instruction and this needs a 'dmb ish' lest we risk the
1251   // constructed object being visible without making the
1252   // final/volatile field writes visible.
1253   //
1254   // n.b. the translation rules below which rely on detection of the
1255   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1256   // If we see anything other than the signature configurations we
1257   // always just translate the loads and stores to ldr<x> and str<x>
1258   // and translate acquire, release and volatile membars to the
1259   // relevant dmb instructions.
1260   //
1261 
1262   // is_CAS(int opcode, bool maybe_volatile)
1263   //
1264   // return true if opcode is one of the possible CompareAndSwapX
1265   // values otherwise false.
1266 
1267   bool is_CAS(int opcode, bool maybe_volatile)
1268   {
1269     switch(opcode) {
1270       // We handle these
1271     case Op_CompareAndSwapI:
1272     case Op_CompareAndSwapL:
1273     case Op_CompareAndSwapP:
1274     case Op_CompareAndSwapN:
1275     case Op_ShenandoahCompareAndSwapP:
1276     case Op_ShenandoahCompareAndSwapN:
1277     case Op_CompareAndSwapB:
1278     case Op_CompareAndSwapS:
1279     case Op_GetAndSetI:
1280     case Op_GetAndSetL:
1281     case Op_GetAndSetP:
1282     case Op_GetAndSetN:
1283     case Op_GetAndAddI:
1284     case Op_GetAndAddL:
1285       return true;
1286     case Op_CompareAndExchangeI:
1287     case Op_CompareAndExchangeN:
1288     case Op_CompareAndExchangeB:
1289     case Op_CompareAndExchangeS:
1290     case Op_CompareAndExchangeL:
1291     case Op_CompareAndExchangeP:
1292     case Op_WeakCompareAndSwapB:
1293     case Op_WeakCompareAndSwapS:
1294     case Op_WeakCompareAndSwapI:
1295     case Op_WeakCompareAndSwapL:
1296     case Op_WeakCompareAndSwapP:
1297     case Op_WeakCompareAndSwapN:
1298     case Op_ShenandoahWeakCompareAndSwapP:
1299     case Op_ShenandoahWeakCompareAndSwapN:
1300     case Op_ShenandoahCompareAndExchangeP:
1301     case Op_ShenandoahCompareAndExchangeN:
1302       return maybe_volatile;
1303     default:
1304       return false;
1305     }
1306   }
1307 
1308   // helper to determine the maximum number of Phi nodes we may need to
1309   // traverse when searching from a card mark membar for the merge mem
1310   // feeding a trailing membar or vice versa
1311 
1312 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1313 
1314 bool unnecessary_acquire(const Node *barrier)
1315 {
1316   assert(barrier->is_MemBar(), "expecting a membar");
1317 
1318   if (UseBarriersForVolatile) {
1319     // we need to plant a dmb
1320     return false;
1321   }
1322 
1323   MemBarNode* mb = barrier->as_MemBar();
1324 
1325   if (mb->trailing_load()) {
1326     return true;
1327   }
1328 
1329   if (mb->trailing_load_store()) {
1330     Node* load_store = mb->in(MemBarNode::Precedent);
1331     assert(load_store->is_LoadStore(), "unexpected graph shape");
1332     return is_CAS(load_store->Opcode(), true);
1333   }
1334 
1335   return false;
1336 }
1337 
1338 bool needs_acquiring_load(const Node *n)
1339 {
1340   assert(n->is_Load(), "expecting a load");
1341   if (UseBarriersForVolatile) {
1342     // we use a normal load and a dmb
1343     return false;
1344   }
1345 
1346   LoadNode *ld = n->as_Load();
1347 
1348   return ld->is_acquire();
1349 }
1350 
1351 bool unnecessary_release(const Node *n)
1352 {
1353   assert((n->is_MemBar() &&
1354           n->Opcode() == Op_MemBarRelease),
1355          "expecting a release membar");
1356 
1357   if (UseBarriersForVolatile) {
1358     // we need to plant a dmb
1359     return false;
1360   }
1361 
1362   MemBarNode *barrier = n->as_MemBar();
1363   if (!barrier->leading()) {
1364     return false;
1365   } else {
1366     Node* trailing = barrier->trailing_membar();
1367     MemBarNode* trailing_mb = trailing->as_MemBar();
1368     assert(trailing_mb->trailing(), "Not a trailing membar?");
1369     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1370 
1371     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1372     if (mem->is_Store()) {
1373       assert(mem->as_Store()->is_release(), "");
1374       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1375       return true;
1376     } else {
1377       assert(mem->is_LoadStore(), "");
1378       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1379       return is_CAS(mem->Opcode(), true);
1380     }
1381   }
1382   return false;
1383 }
1384 
1385 bool unnecessary_volatile(const Node *n)
1386 {
1387   // assert n->is_MemBar();
1388   if (UseBarriersForVolatile) {
1389     // we need to plant a dmb
1390     return false;
1391   }
1392 
1393   MemBarNode *mbvol = n->as_MemBar();
1394 
1395   bool release = mbvol->trailing_store();
1396   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1397 #ifdef ASSERT
1398   if (release) {
1399     Node* leading = mbvol->leading_membar();
1400     assert(leading->Opcode() == Op_MemBarRelease, "");
1401     assert(leading->as_MemBar()->leading_store(), "");
1402     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1403   }
1404 #endif
1405 
1406   return release;
1407 }
1408 
1409 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1410 
1411 bool needs_releasing_store(const Node *n)
1412 {
1413   // assert n->is_Store();
1414   if (UseBarriersForVolatile) {
1415     // we use a normal store and dmb combination
1416     return false;
1417   }
1418 
1419   StoreNode *st = n->as_Store();
1420 
1421   return st->trailing_membar() != NULL;
1422 }
1423 
1424 // predicate controlling translation of CAS
1425 //
1426 // returns true if CAS needs to use an acquiring load otherwise false
1427 
1428 bool needs_acquiring_load_exclusive(const Node *n)
1429 {
1430   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1431   if (UseBarriersForVolatile) {
1432     return false;
1433   }
1434 
1435   LoadStoreNode* ldst = n->as_LoadStore();
1436   if (is_CAS(n->Opcode(), false)) {
1437     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1438   } else {
1439     return ldst->trailing_membar() != NULL;
1440   }
1441 
1442   // so we can just return true here
1443   return true;
1444 }
1445 
1446 // predicate controlling translation of StoreCM
1447 //
1448 // returns true if a StoreStore must precede the card write otherwise
1449 // false
1450 
1451 bool unnecessary_storestore(const Node *storecm)
1452 {
1453   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1454 
1455   // we need to generate a dmb ishst between an object put and the
1456   // associated card mark when we are using CMS without conditional
1457   // card marking
1458 
1459   if (UseConcMarkSweepGC && !UseCondCardMark) {
1460     return false;
1461   }
1462 
1463   // a storestore is unnecesary in all other cases
1464 
1465   return true;
1466 }
1467 
1468 
1469 #define __ _masm.
1470 
1471 // advance declarations for helper functions to convert register
1472 // indices to register objects
1473 
1474 // the ad file has to provide implementations of certain methods
1475 // expected by the generic code
1476 //
1477 // REQUIRED FUNCTIONALITY
1478 
1479 //=============================================================================
1480 
1481 // !!!!! Special hack to get all types of calls to specify the byte offset
1482 //       from the start of the call to the point where the return address
1483 //       will point.
1484 
1485 int MachCallStaticJavaNode::ret_addr_offset()
1486 {
1487   // call should be a simple bl
1488   int off = 4;
1489   return off;
1490 }
1491 
1492 int MachCallDynamicJavaNode::ret_addr_offset()
1493 {
1494   return 16; // movz, movk, movk, bl
1495 }
1496 
1497 int MachCallRuntimeNode::ret_addr_offset() {
1498   // for generated stubs the call will be
1499   //   far_call(addr)
1500   // for real runtime callouts it will be six instructions
1501   // see aarch64_enc_java_to_runtime
1502   //   adr(rscratch2, retaddr)
1503   //   lea(rscratch1, RuntimeAddress(addr)
1504   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1505   //   blrt rscratch1
1506   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1507   if (cb) {
1508     return MacroAssembler::far_branch_size();
1509   } else {
1510     return 6 * NativeInstruction::instruction_size;
1511   }
1512 }
1513 
1514 // Indicate if the safepoint node needs the polling page as an input
1515 
1516 // the shared code plants the oop data at the start of the generated
1517 // code for the safepoint node and that needs ot be at the load
1518 // instruction itself. so we cannot plant a mov of the safepoint poll
1519 // address followed by a load. setting this to true means the mov is
1520 // scheduled as a prior instruction. that's better for scheduling
1521 // anyway.
1522 
1523 bool SafePointNode::needs_polling_address_input()
1524 {
1525   return true;
1526 }
1527 
1528 //=============================================================================
1529 
1530 #ifndef PRODUCT
1531 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1532   st->print("BREAKPOINT");
1533 }
1534 #endif
1535 
1536 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1537   MacroAssembler _masm(&cbuf);
1538   __ brk(0);
1539 }
1540 
1541 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1542   return MachNode::size(ra_);
1543 }
1544 
1545 //=============================================================================
1546 
1547 #ifndef PRODUCT
1548   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1549     st->print("nop \t# %d bytes pad for loops and calls", _count);
1550   }
1551 #endif
1552 
1553   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1554     MacroAssembler _masm(&cbuf);
1555     for (int i = 0; i < _count; i++) {
1556       __ nop();
1557     }
1558   }
1559 
1560   uint MachNopNode::size(PhaseRegAlloc*) const {
1561     return _count * NativeInstruction::instruction_size;
1562   }
1563 
1564 //=============================================================================
1565 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1566 
1567 int Compile::ConstantTable::calculate_table_base_offset() const {
1568   return 0;  // absolute addressing, no offset
1569 }
1570 
1571 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1572 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1573   ShouldNotReachHere();
1574 }
1575 
1576 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1577   // Empty encoding
1578 }
1579 
1580 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1581   return 0;
1582 }
1583 
1584 #ifndef PRODUCT
1585 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1586   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1587 }
1588 #endif
1589 
1590 #ifndef PRODUCT
1591 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1592   Compile* C = ra_->C;
1593 
1594   int framesize = C->frame_slots() << LogBytesPerInt;
1595 
1596   if (C->need_stack_bang(framesize))
1597     st->print("# stack bang size=%d\n\t", framesize);
1598 
1599   if (framesize < ((1 << 9) + 2 * wordSize)) {
1600     st->print("sub  sp, sp, #%d\n\t", framesize);
1601     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1602     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1603   } else {
1604     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1605     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1606     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1607     st->print("sub  sp, sp, rscratch1");
1608   }
1609 }
1610 #endif
1611 
1612 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1613   Compile* C = ra_->C;
1614   MacroAssembler _masm(&cbuf);
1615 
1616   // n.b. frame size includes space for return pc and rfp
1617   const long framesize = C->frame_size_in_bytes();
1618   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1619 
1620   // insert a nop at the start of the prolog so we can patch in a
1621   // branch if we need to invalidate the method later
1622   __ nop();
1623 
1624   int bangsize = C->bang_size_in_bytes();
1625   if (C->need_stack_bang(bangsize) && UseStackBanging)
1626     __ generate_stack_overflow_check(bangsize);
1627 
1628   __ build_frame(framesize);
1629 
1630   if (NotifySimulator) {
1631     __ notify(Assembler::method_entry);
1632   }
1633 
1634   if (VerifyStackAtCalls) {
1635     Unimplemented();
1636   }
1637 
1638   C->set_frame_complete(cbuf.insts_size());
1639 
1640   if (C->has_mach_constant_base_node()) {
1641     // NOTE: We set the table base offset here because users might be
1642     // emitted before MachConstantBaseNode.
1643     Compile::ConstantTable& constant_table = C->constant_table();
1644     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1645   }
1646 }
1647 
1648 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1649 {
1650   return MachNode::size(ra_); // too many variables; just compute it
1651                               // the hard way
1652 }
1653 
1654 int MachPrologNode::reloc() const
1655 {
1656   return 0;
1657 }
1658 
1659 //=============================================================================
1660 
1661 #ifndef PRODUCT
1662 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1663   Compile* C = ra_->C;
1664   int framesize = C->frame_slots() << LogBytesPerInt;
1665 
1666   st->print("# pop frame %d\n\t",framesize);
1667 
1668   if (framesize == 0) {
1669     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1670   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1671     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1672     st->print("add  sp, sp, #%d\n\t", framesize);
1673   } else {
1674     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1675     st->print("add  sp, sp, rscratch1\n\t");
1676     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1677   }
1678 
1679   if (do_polling() && C->is_method_compilation()) {
1680     st->print("# touch polling page\n\t");
1681     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1682     st->print("ldr zr, [rscratch1]");
1683   }
1684 }
1685 #endif
1686 
1687 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1688   Compile* C = ra_->C;
1689   MacroAssembler _masm(&cbuf);
1690   int framesize = C->frame_slots() << LogBytesPerInt;
1691 
1692   __ remove_frame(framesize);
1693 
1694   if (NotifySimulator) {
1695     __ notify(Assembler::method_reentry);
1696   }
1697 
1698   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1699     __ reserved_stack_check();
1700   }
1701 
1702   if (do_polling() && C->is_method_compilation()) {
1703     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1704   }
1705 }
1706 
1707 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1708   // Variable size. Determine dynamically.
1709   return MachNode::size(ra_);
1710 }
1711 
1712 int MachEpilogNode::reloc() const {
1713   // Return number of relocatable values contained in this instruction.
1714   return 1; // 1 for polling page.
1715 }
1716 
1717 const Pipeline * MachEpilogNode::pipeline() const {
1718   return MachNode::pipeline_class();
1719 }
1720 
1721 // This method seems to be obsolete. It is declared in machnode.hpp
1722 // and defined in all *.ad files, but it is never called. Should we
1723 // get rid of it?
1724 int MachEpilogNode::safepoint_offset() const {
1725   assert(do_polling(), "no return for this epilog node");
1726   return 4;
1727 }
1728 
1729 //=============================================================================
1730 
1731 // Figure out which register class each belongs in: rc_int, rc_float or
1732 // rc_stack.
1733 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1734 
1735 static enum RC rc_class(OptoReg::Name reg) {
1736 
1737   if (reg == OptoReg::Bad) {
1738     return rc_bad;
1739   }
1740 
1741   // we have 30 int registers * 2 halves
1742   // (rscratch1 and rscratch2 are omitted)
1743 
1744   if (reg < 60) {
1745     return rc_int;
1746   }
1747 
1748   // we have 32 float register * 2 halves
1749   if (reg < 60 + 128) {
1750     return rc_float;
1751   }
1752 
1753   // Between float regs & stack is the flags regs.
1754   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1755 
1756   return rc_stack;
1757 }
1758 
1759 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1760   Compile* C = ra_->C;
1761 
1762   // Get registers to move.
1763   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1764   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1765   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1766   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1767 
1768   enum RC src_hi_rc = rc_class(src_hi);
1769   enum RC src_lo_rc = rc_class(src_lo);
1770   enum RC dst_hi_rc = rc_class(dst_hi);
1771   enum RC dst_lo_rc = rc_class(dst_lo);
1772 
1773   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1774 
1775   if (src_hi != OptoReg::Bad) {
1776     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1777            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1778            "expected aligned-adjacent pairs");
1779   }
1780 
1781   if (src_lo == dst_lo && src_hi == dst_hi) {
1782     return 0;            // Self copy, no move.
1783   }
1784 
1785   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1786               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1787   int src_offset = ra_->reg2offset(src_lo);
1788   int dst_offset = ra_->reg2offset(dst_lo);
1789 
1790   if (bottom_type()->isa_vect() != NULL) {
1791     uint ireg = ideal_reg();
1792     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1793     if (cbuf) {
1794       MacroAssembler _masm(cbuf);
1795       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1796       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1797         // stack->stack
1798         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1799         if (ireg == Op_VecD) {
1800           __ unspill(rscratch1, true, src_offset);
1801           __ spill(rscratch1, true, dst_offset);
1802         } else {
1803           __ spill_copy128(src_offset, dst_offset);
1804         }
1805       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1806         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1807                ireg == Op_VecD ? __ T8B : __ T16B,
1808                as_FloatRegister(Matcher::_regEncode[src_lo]));
1809       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1810         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1811                        ireg == Op_VecD ? __ D : __ Q,
1812                        ra_->reg2offset(dst_lo));
1813       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1814         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1815                        ireg == Op_VecD ? __ D : __ Q,
1816                        ra_->reg2offset(src_lo));
1817       } else {
1818         ShouldNotReachHere();
1819       }
1820     }
1821   } else if (cbuf) {
1822     MacroAssembler _masm(cbuf);
1823     switch (src_lo_rc) {
1824     case rc_int:
1825       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1826         if (is64) {
1827             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1828                    as_Register(Matcher::_regEncode[src_lo]));
1829         } else {
1830             MacroAssembler _masm(cbuf);
1831             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1832                     as_Register(Matcher::_regEncode[src_lo]));
1833         }
1834       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1835         if (is64) {
1836             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1837                      as_Register(Matcher::_regEncode[src_lo]));
1838         } else {
1839             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1840                      as_Register(Matcher::_regEncode[src_lo]));
1841         }
1842       } else {                    // gpr --> stack spill
1843         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1844         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1845       }
1846       break;
1847     case rc_float:
1848       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1849         if (is64) {
1850             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1851                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1852         } else {
1853             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1854                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1855         }
1856       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1857           if (cbuf) {
1858             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1859                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1860         } else {
1861             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1862                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1863         }
1864       } else {                    // fpr --> stack spill
1865         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1866         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1867                  is64 ? __ D : __ S, dst_offset);
1868       }
1869       break;
1870     case rc_stack:
1871       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1872         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1873       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1874         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1875                    is64 ? __ D : __ S, src_offset);
1876       } else {                    // stack --> stack copy
1877         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1878         __ unspill(rscratch1, is64, src_offset);
1879         __ spill(rscratch1, is64, dst_offset);
1880       }
1881       break;
1882     default:
1883       assert(false, "bad rc_class for spill");
1884       ShouldNotReachHere();
1885     }
1886   }
1887 
1888   if (st) {
1889     st->print("spill ");
1890     if (src_lo_rc == rc_stack) {
1891       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1892     } else {
1893       st->print("%s -> ", Matcher::regName[src_lo]);
1894     }
1895     if (dst_lo_rc == rc_stack) {
1896       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1897     } else {
1898       st->print("%s", Matcher::regName[dst_lo]);
1899     }
1900     if (bottom_type()->isa_vect() != NULL) {
1901       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1902     } else {
1903       st->print("\t# spill size = %d", is64 ? 64:32);
1904     }
1905   }
1906 
1907   return 0;
1908 
1909 }
1910 
1911 #ifndef PRODUCT
1912 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1913   if (!ra_)
1914     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1915   else
1916     implementation(NULL, ra_, false, st);
1917 }
1918 #endif
1919 
1920 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1921   implementation(&cbuf, ra_, false, NULL);
1922 }
1923 
1924 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1925   return MachNode::size(ra_);
1926 }
1927 
1928 //=============================================================================
1929 
1930 #ifndef PRODUCT
1931 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1932   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1933   int reg = ra_->get_reg_first(this);
1934   st->print("add %s, rsp, #%d]\t# box lock",
1935             Matcher::regName[reg], offset);
1936 }
1937 #endif
1938 
1939 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1940   MacroAssembler _masm(&cbuf);
1941 
1942   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1943   int reg    = ra_->get_encode(this);
1944 
1945   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1946     __ add(as_Register(reg), sp, offset);
1947   } else {
1948     ShouldNotReachHere();
1949   }
1950 }
1951 
1952 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1953   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1954   return 4;
1955 }
1956 
1957 //=============================================================================
1958 
1959 #ifndef PRODUCT
1960 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1961 {
1962   st->print_cr("# MachUEPNode");
1963   if (UseCompressedClassPointers) {
1964     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1965     if (Universe::narrow_klass_shift() != 0) {
1966       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1967     }
1968   } else {
1969    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1970   }
1971   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1972   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1973 }
1974 #endif
1975 
1976 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1977 {
1978   // This is the unverified entry point.
1979   MacroAssembler _masm(&cbuf);
1980 
1981   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1982   Label skip;
1983   // TODO
1984   // can we avoid this skip and still use a reloc?
1985   __ br(Assembler::EQ, skip);
1986   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1987   __ bind(skip);
1988 }
1989 
1990 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1991 {
1992   return MachNode::size(ra_);
1993 }
1994 
1995 // REQUIRED EMIT CODE
1996 
1997 //=============================================================================
1998 
1999 // Emit exception handler code.
2000 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2001 {
2002   // mov rscratch1 #exception_blob_entry_point
2003   // br rscratch1
2004   // Note that the code buffer's insts_mark is always relative to insts.
2005   // That's why we must use the macroassembler to generate a handler.
2006   MacroAssembler _masm(&cbuf);
2007   address base = __ start_a_stub(size_exception_handler());
2008   if (base == NULL) {
2009     ciEnv::current()->record_failure("CodeCache is full");
2010     return 0;  // CodeBuffer::expand failed
2011   }
2012   int offset = __ offset();
2013   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2014   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2015   __ end_a_stub();
2016   return offset;
2017 }
2018 
2019 // Emit deopt handler code.
2020 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2021 {
2022   // Note that the code buffer's insts_mark is always relative to insts.
2023   // That's why we must use the macroassembler to generate a handler.
2024   MacroAssembler _masm(&cbuf);
2025   address base = __ start_a_stub(size_deopt_handler());
2026   if (base == NULL) {
2027     ciEnv::current()->record_failure("CodeCache is full");
2028     return 0;  // CodeBuffer::expand failed
2029   }
2030   int offset = __ offset();
2031 
2032   __ adr(lr, __ pc());
2033   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2034 
2035   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2036   __ end_a_stub();
2037   return offset;
2038 }
2039 
2040 // REQUIRED MATCHER CODE
2041 
2042 //=============================================================================
2043 
2044 const bool Matcher::match_rule_supported(int opcode) {
2045 
2046   switch (opcode) {
2047   default:
2048     break;
2049   }
2050 
2051   if (!has_match_rule(opcode)) {
2052     return false;
2053   }
2054 
2055   return true;  // Per default match rules are supported.
2056 }
2057 
2058 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
2059 
2060   // TODO
2061   // identify extra cases that we might want to provide match rules for
2062   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2063   bool ret_value = match_rule_supported(opcode);
2064   // Add rules here.
2065 
2066   return ret_value;  // Per default match rules are supported.
2067 }
2068 
2069 const bool Matcher::has_predicated_vectors(void) {
2070   return false;
2071 }
2072 
2073 const int Matcher::float_pressure(int default_pressure_threshold) {
2074   return default_pressure_threshold;
2075 }
2076 
2077 int Matcher::regnum_to_fpu_offset(int regnum)
2078 {
2079   Unimplemented();
2080   return 0;
2081 }
2082 
2083 // Is this branch offset short enough that a short branch can be used?
2084 //
2085 // NOTE: If the platform does not provide any short branch variants, then
2086 //       this method should return false for offset 0.
2087 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2088   // The passed offset is relative to address of the branch.
2089 
2090   return (-32768 <= offset && offset < 32768);
2091 }
2092 
2093 const bool Matcher::isSimpleConstant64(jlong value) {
2094   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2095   // Probably always true, even if a temp register is required.
2096   return true;
2097 }
2098 
2099 // true just means we have fast l2f conversion
2100 const bool Matcher::convL2FSupported(void) {
2101   return true;
2102 }
2103 
2104 // Vector width in bytes.
2105 const int Matcher::vector_width_in_bytes(BasicType bt) {
2106   int size = MIN2(16,(int)MaxVectorSize);
2107   // Minimum 2 values in vector
2108   if (size < 2*type2aelembytes(bt)) size = 0;
2109   // But never < 4
2110   if (size < 4) size = 0;
2111   return size;
2112 }
2113 
2114 // Limits on vector size (number of elements) loaded into vector.
2115 const int Matcher::max_vector_size(const BasicType bt) {
2116   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2117 }
2118 const int Matcher::min_vector_size(const BasicType bt) {
2119 //  For the moment limit the vector size to 8 bytes
2120     int size = 8 / type2aelembytes(bt);
2121     if (size < 2) size = 2;
2122     return size;
2123 }
2124 
2125 // Vector ideal reg.
2126 const uint Matcher::vector_ideal_reg(int len) {
2127   switch(len) {
2128     case  8: return Op_VecD;
2129     case 16: return Op_VecX;
2130   }
2131   ShouldNotReachHere();
2132   return 0;
2133 }
2134 
2135 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2136   switch(size) {
2137     case  8: return Op_VecD;
2138     case 16: return Op_VecX;
2139   }
2140   ShouldNotReachHere();
2141   return 0;
2142 }
2143 
2144 // AES support not yet implemented
2145 const bool Matcher::pass_original_key_for_aes() {
2146   return false;
2147 }
2148 
2149 // x86 supports misaligned vectors store/load.
2150 const bool Matcher::misaligned_vectors_ok() {
2151   return !AlignVector; // can be changed by flag
2152 }
2153 
2154 // false => size gets scaled to BytesPerLong, ok.
2155 const bool Matcher::init_array_count_is_in_bytes = false;
2156 
2157 // Use conditional move (CMOVL)
2158 const int Matcher::long_cmove_cost() {
2159   // long cmoves are no more expensive than int cmoves
2160   return 0;
2161 }
2162 
2163 const int Matcher::float_cmove_cost() {
2164   // float cmoves are no more expensive than int cmoves
2165   return 0;
2166 }
2167 
2168 // Does the CPU require late expand (see block.cpp for description of late expand)?
2169 const bool Matcher::require_postalloc_expand = false;
2170 
2171 // Do we need to mask the count passed to shift instructions or does
2172 // the cpu only look at the lower 5/6 bits anyway?
2173 const bool Matcher::need_masked_shift_count = false;
2174 
2175 // This affects two different things:
2176 //  - how Decode nodes are matched
2177 //  - how ImplicitNullCheck opportunities are recognized
2178 // If true, the matcher will try to remove all Decodes and match them
2179 // (as operands) into nodes. NullChecks are not prepared to deal with
2180 // Decodes by final_graph_reshaping().
2181 // If false, final_graph_reshaping() forces the decode behind the Cmp
2182 // for a NullCheck. The matcher matches the Decode node into a register.
2183 // Implicit_null_check optimization moves the Decode along with the
2184 // memory operation back up before the NullCheck.
2185 bool Matcher::narrow_oop_use_complex_address() {
2186   return Universe::narrow_oop_shift() == 0;
2187 }
2188 
2189 bool Matcher::narrow_klass_use_complex_address() {
2190 // TODO
2191 // decide whether we need to set this to true
2192   return false;
2193 }
2194 
2195 bool Matcher::const_oop_prefer_decode() {
2196   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2197   return Universe::narrow_oop_base() == NULL;
2198 }
2199 
2200 bool Matcher::const_klass_prefer_decode() {
2201   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2202   return Universe::narrow_klass_base() == NULL;
2203 }
2204 
2205 // Is it better to copy float constants, or load them directly from
2206 // memory?  Intel can load a float constant from a direct address,
2207 // requiring no extra registers.  Most RISCs will have to materialize
2208 // an address into a register first, so they would do better to copy
2209 // the constant from stack.
2210 const bool Matcher::rematerialize_float_constants = false;
2211 
2212 // If CPU can load and store mis-aligned doubles directly then no
2213 // fixup is needed.  Else we split the double into 2 integer pieces
2214 // and move it piece-by-piece.  Only happens when passing doubles into
2215 // C code as the Java calling convention forces doubles to be aligned.
2216 const bool Matcher::misaligned_doubles_ok = true;
2217 
2218 // No-op on amd64
2219 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2220   Unimplemented();
2221 }
2222 
2223 // Advertise here if the CPU requires explicit rounding operations to
2224 // implement the UseStrictFP mode.
2225 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2226 
2227 // Are floats converted to double when stored to stack during
2228 // deoptimization?
2229 bool Matcher::float_in_double() { return false; }
2230 
2231 // Do ints take an entire long register or just half?
2232 // The relevant question is how the int is callee-saved:
2233 // the whole long is written but de-opt'ing will have to extract
2234 // the relevant 32 bits.
2235 const bool Matcher::int_in_long = true;
2236 
2237 // Return whether or not this register is ever used as an argument.
2238 // This function is used on startup to build the trampoline stubs in
2239 // generateOptoStub.  Registers not mentioned will be killed by the VM
2240 // call in the trampoline, and arguments in those registers not be
2241 // available to the callee.
2242 bool Matcher::can_be_java_arg(int reg)
2243 {
2244   return
2245     reg ==  R0_num || reg == R0_H_num ||
2246     reg ==  R1_num || reg == R1_H_num ||
2247     reg ==  R2_num || reg == R2_H_num ||
2248     reg ==  R3_num || reg == R3_H_num ||
2249     reg ==  R4_num || reg == R4_H_num ||
2250     reg ==  R5_num || reg == R5_H_num ||
2251     reg ==  R6_num || reg == R6_H_num ||
2252     reg ==  R7_num || reg == R7_H_num ||
2253     reg ==  V0_num || reg == V0_H_num ||
2254     reg ==  V1_num || reg == V1_H_num ||
2255     reg ==  V2_num || reg == V2_H_num ||
2256     reg ==  V3_num || reg == V3_H_num ||
2257     reg ==  V4_num || reg == V4_H_num ||
2258     reg ==  V5_num || reg == V5_H_num ||
2259     reg ==  V6_num || reg == V6_H_num ||
2260     reg ==  V7_num || reg == V7_H_num;
2261 }
2262 
2263 bool Matcher::is_spillable_arg(int reg)
2264 {
2265   return can_be_java_arg(reg);
2266 }
2267 
2268 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2269   return false;
2270 }
2271 
2272 RegMask Matcher::divI_proj_mask() {
2273   ShouldNotReachHere();
2274   return RegMask();
2275 }
2276 
2277 // Register for MODI projection of divmodI.
2278 RegMask Matcher::modI_proj_mask() {
2279   ShouldNotReachHere();
2280   return RegMask();
2281 }
2282 
2283 // Register for DIVL projection of divmodL.
2284 RegMask Matcher::divL_proj_mask() {
2285   ShouldNotReachHere();
2286   return RegMask();
2287 }
2288 
2289 // Register for MODL projection of divmodL.
2290 RegMask Matcher::modL_proj_mask() {
2291   ShouldNotReachHere();
2292   return RegMask();
2293 }
2294 
2295 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2296   return FP_REG_mask();
2297 }
2298 
2299 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2300   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2301     Node* u = addp->fast_out(i);
2302     if (u->is_Mem()) {
2303       int opsize = u->as_Mem()->memory_size();
2304       assert(opsize > 0, "unexpected memory operand size");
2305       if (u->as_Mem()->memory_size() != (1<<shift)) {
2306         return false;
2307       }
2308     }
2309   }
2310   return true;
2311 }
2312 
2313 const bool Matcher::convi2l_type_required = false;
2314 
2315 // Should the Matcher clone shifts on addressing modes, expecting them
2316 // to be subsumed into complex addressing expressions or compute them
2317 // into registers?
2318 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2319   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2320     return true;
2321   }
2322 
2323   Node *off = m->in(AddPNode::Offset);
2324   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2325       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2326       // Are there other uses besides address expressions?
2327       !is_visited(off)) {
2328     address_visited.set(off->_idx); // Flag as address_visited
2329     mstack.push(off->in(2), Visit);
2330     Node *conv = off->in(1);
2331     if (conv->Opcode() == Op_ConvI2L &&
2332         // Are there other uses besides address expressions?
2333         !is_visited(conv)) {
2334       address_visited.set(conv->_idx); // Flag as address_visited
2335       mstack.push(conv->in(1), Pre_Visit);
2336     } else {
2337       mstack.push(conv, Pre_Visit);
2338     }
2339     address_visited.test_set(m->_idx); // Flag as address_visited
2340     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2341     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2342     return true;
2343   } else if (off->Opcode() == Op_ConvI2L &&
2344              // Are there other uses besides address expressions?
2345              !is_visited(off)) {
2346     address_visited.test_set(m->_idx); // Flag as address_visited
2347     address_visited.set(off->_idx); // Flag as address_visited
2348     mstack.push(off->in(1), Pre_Visit);
2349     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2350     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2351     return true;
2352   }
2353   return false;
2354 }
2355 
2356 void Compile::reshape_address(AddPNode* addp) {
2357 }
2358 
2359 // helper for encoding java_to_runtime calls on sim
2360 //
2361 // this is needed to compute the extra arguments required when
2362 // planting a call to the simulator blrt instruction. the TypeFunc
2363 // can be queried to identify the counts for integral, and floating
2364 // arguments and the return type
2365 
2366 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2367 {
2368   int gps = 0;
2369   int fps = 0;
2370   const TypeTuple *domain = tf->domain();
2371   int max = domain->cnt();
2372   for (int i = TypeFunc::Parms; i < max; i++) {
2373     const Type *t = domain->field_at(i);
2374     switch(t->basic_type()) {
2375     case T_FLOAT:
2376     case T_DOUBLE:
2377       fps++;
2378     default:
2379       gps++;
2380     }
2381   }
2382   gpcnt = gps;
2383   fpcnt = fps;
2384   BasicType rt = tf->return_type();
2385   switch (rt) {
2386   case T_VOID:
2387     rtype = MacroAssembler::ret_type_void;
2388     break;
2389   default:
2390     rtype = MacroAssembler::ret_type_integral;
2391     break;
2392   case T_FLOAT:
2393     rtype = MacroAssembler::ret_type_float;
2394     break;
2395   case T_DOUBLE:
2396     rtype = MacroAssembler::ret_type_double;
2397     break;
2398   }
2399 }
2400 
2401 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2402   MacroAssembler _masm(&cbuf);                                          \
2403   {                                                                     \
2404     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2405     guarantee(DISP == 0, "mode not permitted for volatile");            \
2406     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2407     __ INSN(REG, as_Register(BASE));                                    \
2408   }
2409 
2410 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2411 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2412 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2413                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2414 
2415   // Used for all non-volatile memory accesses.  The use of
2416   // $mem->opcode() to discover whether this pattern uses sign-extended
2417   // offsets is something of a kludge.
2418   static void loadStore(MacroAssembler masm, mem_insn insn,
2419                          Register reg, int opcode,
2420                          Register base, int index, int size, int disp)
2421   {
2422     Address::extend scale;
2423 
2424     // Hooboy, this is fugly.  We need a way to communicate to the
2425     // encoder that the index needs to be sign extended, so we have to
2426     // enumerate all the cases.
2427     switch (opcode) {
2428     case INDINDEXSCALEDI2L:
2429     case INDINDEXSCALEDI2LN:
2430     case INDINDEXI2L:
2431     case INDINDEXI2LN:
2432       scale = Address::sxtw(size);
2433       break;
2434     default:
2435       scale = Address::lsl(size);
2436     }
2437 
2438     if (index == -1) {
2439       (masm.*insn)(reg, Address(base, disp));
2440     } else {
2441       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2442       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2443     }
2444   }
2445 
2446   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2447                          FloatRegister reg, int opcode,
2448                          Register base, int index, int size, int disp)
2449   {
2450     Address::extend scale;
2451 
2452     switch (opcode) {
2453     case INDINDEXSCALEDI2L:
2454     case INDINDEXSCALEDI2LN:
2455       scale = Address::sxtw(size);
2456       break;
2457     default:
2458       scale = Address::lsl(size);
2459     }
2460 
2461      if (index == -1) {
2462       (masm.*insn)(reg, Address(base, disp));
2463     } else {
2464       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2465       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2466     }
2467   }
2468 
2469   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2470                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2471                          int opcode, Register base, int index, int size, int disp)
2472   {
2473     if (index == -1) {
2474       (masm.*insn)(reg, T, Address(base, disp));
2475     } else {
2476       assert(disp == 0, "unsupported address mode");
2477       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2478     }
2479   }
2480 
2481 %}
2482 
2483 
2484 
2485 //----------ENCODING BLOCK-----------------------------------------------------
2486 // This block specifies the encoding classes used by the compiler to
2487 // output byte streams.  Encoding classes are parameterized macros
2488 // used by Machine Instruction Nodes in order to generate the bit
2489 // encoding of the instruction.  Operands specify their base encoding
2490 // interface with the interface keyword.  There are currently
2491 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2492 // COND_INTER.  REG_INTER causes an operand to generate a function
2493 // which returns its register number when queried.  CONST_INTER causes
2494 // an operand to generate a function which returns the value of the
2495 // constant when queried.  MEMORY_INTER causes an operand to generate
2496 // four functions which return the Base Register, the Index Register,
2497 // the Scale Value, and the Offset Value of the operand when queried.
2498 // COND_INTER causes an operand to generate six functions which return
2499 // the encoding code (ie - encoding bits for the instruction)
2500 // associated with each basic boolean condition for a conditional
2501 // instruction.
2502 //
2503 // Instructions specify two basic values for encoding.  Again, a
2504 // function is available to check if the constant displacement is an
2505 // oop. They use the ins_encode keyword to specify their encoding
2506 // classes (which must be a sequence of enc_class names, and their
2507 // parameters, specified in the encoding block), and they use the
2508 // opcode keyword to specify, in order, their primary, secondary, and
2509 // tertiary opcode.  Only the opcode sections which a particular
2510 // instruction needs for encoding need to be specified.
2511 encode %{
2512   // Build emit functions for each basic byte or larger field in the
2513   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2514   // from C++ code in the enc_class source block.  Emit functions will
2515   // live in the main source block for now.  In future, we can
2516   // generalize this by adding a syntax that specifies the sizes of
2517   // fields in an order, so that the adlc can build the emit functions
2518   // automagically
2519 
2520   // catch all for unimplemented encodings
2521   enc_class enc_unimplemented %{
2522     MacroAssembler _masm(&cbuf);
2523     __ unimplemented("C2 catch all");
2524   %}
2525 
2526   // BEGIN Non-volatile memory access
2527 
2528   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2529     Register dst_reg = as_Register($dst$$reg);
2530     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2531                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2532   %}
2533 
2534   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2535     Register dst_reg = as_Register($dst$$reg);
2536     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2537                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2538   %}
2539 
2540   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2541     Register dst_reg = as_Register($dst$$reg);
2542     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2543                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2544   %}
2545 
2546   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2547     Register dst_reg = as_Register($dst$$reg);
2548     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2549                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2550   %}
2551 
2552   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2553     Register dst_reg = as_Register($dst$$reg);
2554     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2555                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2556   %}
2557 
2558   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2559     Register dst_reg = as_Register($dst$$reg);
2560     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2561                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2562   %}
2563 
2564   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2565     Register dst_reg = as_Register($dst$$reg);
2566     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2567                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2568   %}
2569 
2570   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2571     Register dst_reg = as_Register($dst$$reg);
2572     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2573                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2574   %}
2575 
2576   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2577     Register dst_reg = as_Register($dst$$reg);
2578     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2579                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2580   %}
2581 
2582   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2583     Register dst_reg = as_Register($dst$$reg);
2584     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2585                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2586   %}
2587 
2588   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2589     Register dst_reg = as_Register($dst$$reg);
2590     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2591                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2592   %}
2593 
2594   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2595     Register dst_reg = as_Register($dst$$reg);
2596     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2597                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2598   %}
2599 
2600   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2601     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2602     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2603                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2604   %}
2605 
2606   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2607     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2608     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2609                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2610   %}
2611 
2612   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2613     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2614     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2615        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2616   %}
2617 
2618   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2619     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2620     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2621        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2622   %}
2623 
2624   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2625     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2626     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2627        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2628   %}
2629 
2630   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2631     Register src_reg = as_Register($src$$reg);
2632     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2633                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2634   %}
2635 
2636   enc_class aarch64_enc_strb0(memory mem) %{
2637     MacroAssembler _masm(&cbuf);
2638     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2639                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2640   %}
2641 
2642   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2643     MacroAssembler _masm(&cbuf);
2644     __ membar(Assembler::StoreStore);
2645     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2646                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2647   %}
2648 
2649   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2650     Register src_reg = as_Register($src$$reg);
2651     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2652                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2653   %}
2654 
2655   enc_class aarch64_enc_strh0(memory mem) %{
2656     MacroAssembler _masm(&cbuf);
2657     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2658                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2659   %}
2660 
2661   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2662     Register src_reg = as_Register($src$$reg);
2663     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2664                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2665   %}
2666 
2667   enc_class aarch64_enc_strw0(memory mem) %{
2668     MacroAssembler _masm(&cbuf);
2669     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2670                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2671   %}
2672 
2673   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2674     Register src_reg = as_Register($src$$reg);
2675     // we sometimes get asked to store the stack pointer into the
2676     // current thread -- we cannot do that directly on AArch64
2677     if (src_reg == r31_sp) {
2678       MacroAssembler _masm(&cbuf);
2679       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2680       __ mov(rscratch2, sp);
2681       src_reg = rscratch2;
2682     }
2683     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2684                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2685   %}
2686 
2687   enc_class aarch64_enc_str0(memory mem) %{
2688     MacroAssembler _masm(&cbuf);
2689     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2690                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2691   %}
2692 
2693   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2694     FloatRegister src_reg = as_FloatRegister($src$$reg);
2695     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2696                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2697   %}
2698 
2699   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2700     FloatRegister src_reg = as_FloatRegister($src$$reg);
2701     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2702                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2703   %}
2704 
2705   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2706     FloatRegister src_reg = as_FloatRegister($src$$reg);
2707     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2708        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2709   %}
2710 
2711   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2712     FloatRegister src_reg = as_FloatRegister($src$$reg);
2713     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2714        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2715   %}
2716 
2717   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2718     FloatRegister src_reg = as_FloatRegister($src$$reg);
2719     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2720        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2721   %}
2722 
2723   // END Non-volatile memory access
2724 
2725   // volatile loads and stores
2726 
2727   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2728     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2729                  rscratch1, stlrb);
2730   %}
2731 
2732   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2733     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2734                  rscratch1, stlrh);
2735   %}
2736 
2737   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2738     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2739                  rscratch1, stlrw);
2740   %}
2741 
2742 
2743   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2744     Register dst_reg = as_Register($dst$$reg);
2745     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2746              rscratch1, ldarb);
2747     __ sxtbw(dst_reg, dst_reg);
2748   %}
2749 
2750   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2751     Register dst_reg = as_Register($dst$$reg);
2752     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2753              rscratch1, ldarb);
2754     __ sxtb(dst_reg, dst_reg);
2755   %}
2756 
2757   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2758     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2759              rscratch1, ldarb);
2760   %}
2761 
2762   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2763     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2764              rscratch1, ldarb);
2765   %}
2766 
2767   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2768     Register dst_reg = as_Register($dst$$reg);
2769     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2770              rscratch1, ldarh);
2771     __ sxthw(dst_reg, dst_reg);
2772   %}
2773 
2774   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2775     Register dst_reg = as_Register($dst$$reg);
2776     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2777              rscratch1, ldarh);
2778     __ sxth(dst_reg, dst_reg);
2779   %}
2780 
2781   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2782     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2783              rscratch1, ldarh);
2784   %}
2785 
2786   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2787     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2788              rscratch1, ldarh);
2789   %}
2790 
2791   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2792     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2793              rscratch1, ldarw);
2794   %}
2795 
2796   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2797     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2798              rscratch1, ldarw);
2799   %}
2800 
2801   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2802     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2803              rscratch1, ldar);
2804   %}
2805 
2806   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2807     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2808              rscratch1, ldarw);
2809     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2810   %}
2811 
2812   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2813     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2814              rscratch1, ldar);
2815     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2816   %}
2817 
2818   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2819     Register src_reg = as_Register($src$$reg);
2820     // we sometimes get asked to store the stack pointer into the
2821     // current thread -- we cannot do that directly on AArch64
2822     if (src_reg == r31_sp) {
2823         MacroAssembler _masm(&cbuf);
2824       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2825       __ mov(rscratch2, sp);
2826       src_reg = rscratch2;
2827     }
2828     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2829                  rscratch1, stlr);
2830   %}
2831 
2832   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2833     {
2834       MacroAssembler _masm(&cbuf);
2835       FloatRegister src_reg = as_FloatRegister($src$$reg);
2836       __ fmovs(rscratch2, src_reg);
2837     }
2838     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2839                  rscratch1, stlrw);
2840   %}
2841 
2842   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2843     {
2844       MacroAssembler _masm(&cbuf);
2845       FloatRegister src_reg = as_FloatRegister($src$$reg);
2846       __ fmovd(rscratch2, src_reg);
2847     }
2848     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2849                  rscratch1, stlr);
2850   %}
2851 
2852   // synchronized read/update encodings
2853 
2854   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2855     MacroAssembler _masm(&cbuf);
2856     Register dst_reg = as_Register($dst$$reg);
2857     Register base = as_Register($mem$$base);
2858     int index = $mem$$index;
2859     int scale = $mem$$scale;
2860     int disp = $mem$$disp;
2861     if (index == -1) {
2862        if (disp != 0) {
2863         __ lea(rscratch1, Address(base, disp));
2864         __ ldaxr(dst_reg, rscratch1);
2865       } else {
2866         // TODO
2867         // should we ever get anything other than this case?
2868         __ ldaxr(dst_reg, base);
2869       }
2870     } else {
2871       Register index_reg = as_Register(index);
2872       if (disp == 0) {
2873         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2874         __ ldaxr(dst_reg, rscratch1);
2875       } else {
2876         __ lea(rscratch1, Address(base, disp));
2877         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2878         __ ldaxr(dst_reg, rscratch1);
2879       }
2880     }
2881   %}
2882 
2883   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2884     MacroAssembler _masm(&cbuf);
2885     Register src_reg = as_Register($src$$reg);
2886     Register base = as_Register($mem$$base);
2887     int index = $mem$$index;
2888     int scale = $mem$$scale;
2889     int disp = $mem$$disp;
2890     if (index == -1) {
2891        if (disp != 0) {
2892         __ lea(rscratch2, Address(base, disp));
2893         __ stlxr(rscratch1, src_reg, rscratch2);
2894       } else {
2895         // TODO
2896         // should we ever get anything other than this case?
2897         __ stlxr(rscratch1, src_reg, base);
2898       }
2899     } else {
2900       Register index_reg = as_Register(index);
2901       if (disp == 0) {
2902         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
2903         __ stlxr(rscratch1, src_reg, rscratch2);
2904       } else {
2905         __ lea(rscratch2, Address(base, disp));
2906         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
2907         __ stlxr(rscratch1, src_reg, rscratch2);
2908       }
2909     }
2910     __ cmpw(rscratch1, zr);
2911   %}
2912 
2913   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2914     MacroAssembler _masm(&cbuf);
2915     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2916     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2917                Assembler::xword, /*acquire*/ false, /*release*/ true,
2918                /*weak*/ false, noreg);
2919   %}
2920 
2921   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2922     MacroAssembler _masm(&cbuf);
2923     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2924     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2925                Assembler::word, /*acquire*/ false, /*release*/ true,
2926                /*weak*/ false, noreg);
2927   %}
2928 
2929   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2930     MacroAssembler _masm(&cbuf);
2931     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2932     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2933                Assembler::halfword, /*acquire*/ false, /*release*/ true,
2934                /*weak*/ false, noreg);
2935   %}
2936 
2937   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2938     MacroAssembler _masm(&cbuf);
2939     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2940     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2941                Assembler::byte, /*acquire*/ false, /*release*/ true,
2942                /*weak*/ false, noreg);
2943   %}
2944 
2945 
2946   // The only difference between aarch64_enc_cmpxchg and
2947   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
2948   // CompareAndSwap sequence to serve as a barrier on acquiring a
2949   // lock.
2950   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2951     MacroAssembler _masm(&cbuf);
2952     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2953     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2954                Assembler::xword, /*acquire*/ true, /*release*/ true,
2955                /*weak*/ false, noreg);
2956   %}
2957 
2958   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2959     MacroAssembler _masm(&cbuf);
2960     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2961     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2962                Assembler::word, /*acquire*/ true, /*release*/ true,
2963                /*weak*/ false, noreg);
2964   %}
2965 
2966   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2967     MacroAssembler _masm(&cbuf);
2968     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2969     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2970                Assembler::halfword, /*acquire*/ true, /*release*/ true,
2971                /*weak*/ false, noreg);
2972   %}
2973 
2974   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2975     MacroAssembler _masm(&cbuf);
2976     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2977     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2978                Assembler::byte, /*acquire*/ true, /*release*/ true,
2979                /*weak*/ false, noreg);
2980   %}
2981 
2982   // auxiliary used for CompareAndSwapX to set result register
2983   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
2984     MacroAssembler _masm(&cbuf);
2985     Register res_reg = as_Register($res$$reg);
2986     __ cset(res_reg, Assembler::EQ);
2987   %}
2988 
2989   // prefetch encodings
2990 
2991   enc_class aarch64_enc_prefetchw(memory mem) %{
2992     MacroAssembler _masm(&cbuf);
2993     Register base = as_Register($mem$$base);
2994     int index = $mem$$index;
2995     int scale = $mem$$scale;
2996     int disp = $mem$$disp;
2997     if (index == -1) {
2998       __ prfm(Address(base, disp), PSTL1KEEP);
2999     } else {
3000       Register index_reg = as_Register(index);
3001       if (disp == 0) {
3002         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3003       } else {
3004         __ lea(rscratch1, Address(base, disp));
3005         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3006       }
3007     }
3008   %}
3009 
3010   /// mov envcodings
3011 
3012   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3013     MacroAssembler _masm(&cbuf);
3014     u_int32_t con = (u_int32_t)$src$$constant;
3015     Register dst_reg = as_Register($dst$$reg);
3016     if (con == 0) {
3017       __ movw(dst_reg, zr);
3018     } else {
3019       __ movw(dst_reg, con);
3020     }
3021   %}
3022 
3023   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3024     MacroAssembler _masm(&cbuf);
3025     Register dst_reg = as_Register($dst$$reg);
3026     u_int64_t con = (u_int64_t)$src$$constant;
3027     if (con == 0) {
3028       __ mov(dst_reg, zr);
3029     } else {
3030       __ mov(dst_reg, con);
3031     }
3032   %}
3033 
3034   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3035     MacroAssembler _masm(&cbuf);
3036     Register dst_reg = as_Register($dst$$reg);
3037     address con = (address)$src$$constant;
3038     if (con == NULL || con == (address)1) {
3039       ShouldNotReachHere();
3040     } else {
3041       relocInfo::relocType rtype = $src->constant_reloc();
3042       if (rtype == relocInfo::oop_type) {
3043         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3044       } else if (rtype == relocInfo::metadata_type) {
3045         __ mov_metadata(dst_reg, (Metadata*)con);
3046       } else {
3047         assert(rtype == relocInfo::none, "unexpected reloc type");
3048         if (con < (address)(uintptr_t)os::vm_page_size()) {
3049           __ mov(dst_reg, con);
3050         } else {
3051           unsigned long offset;
3052           __ adrp(dst_reg, con, offset);
3053           __ add(dst_reg, dst_reg, offset);
3054         }
3055       }
3056     }
3057   %}
3058 
3059   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3060     MacroAssembler _masm(&cbuf);
3061     Register dst_reg = as_Register($dst$$reg);
3062     __ mov(dst_reg, zr);
3063   %}
3064 
3065   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3066     MacroAssembler _masm(&cbuf);
3067     Register dst_reg = as_Register($dst$$reg);
3068     __ mov(dst_reg, (u_int64_t)1);
3069   %}
3070 
3071   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3072     MacroAssembler _masm(&cbuf);
3073     address page = (address)$src$$constant;
3074     Register dst_reg = as_Register($dst$$reg);
3075     unsigned long off;
3076     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3077     assert(off == 0, "assumed offset == 0");
3078   %}
3079 
3080   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3081     MacroAssembler _masm(&cbuf);
3082     __ load_byte_map_base($dst$$Register);
3083   %}
3084 
3085   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3086     MacroAssembler _masm(&cbuf);
3087     Register dst_reg = as_Register($dst$$reg);
3088     address con = (address)$src$$constant;
3089     if (con == NULL) {
3090       ShouldNotReachHere();
3091     } else {
3092       relocInfo::relocType rtype = $src->constant_reloc();
3093       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3094       __ set_narrow_oop(dst_reg, (jobject)con);
3095     }
3096   %}
3097 
3098   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3099     MacroAssembler _masm(&cbuf);
3100     Register dst_reg = as_Register($dst$$reg);
3101     __ mov(dst_reg, zr);
3102   %}
3103 
3104   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3105     MacroAssembler _masm(&cbuf);
3106     Register dst_reg = as_Register($dst$$reg);
3107     address con = (address)$src$$constant;
3108     if (con == NULL) {
3109       ShouldNotReachHere();
3110     } else {
3111       relocInfo::relocType rtype = $src->constant_reloc();
3112       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3113       __ set_narrow_klass(dst_reg, (Klass *)con);
3114     }
3115   %}
3116 
3117   // arithmetic encodings
3118 
3119   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3120     MacroAssembler _masm(&cbuf);
3121     Register dst_reg = as_Register($dst$$reg);
3122     Register src_reg = as_Register($src1$$reg);
3123     int32_t con = (int32_t)$src2$$constant;
3124     // add has primary == 0, subtract has primary == 1
3125     if ($primary) { con = -con; }
3126     if (con < 0) {
3127       __ subw(dst_reg, src_reg, -con);
3128     } else {
3129       __ addw(dst_reg, src_reg, con);
3130     }
3131   %}
3132 
3133   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3134     MacroAssembler _masm(&cbuf);
3135     Register dst_reg = as_Register($dst$$reg);
3136     Register src_reg = as_Register($src1$$reg);
3137     int32_t con = (int32_t)$src2$$constant;
3138     // add has primary == 0, subtract has primary == 1
3139     if ($primary) { con = -con; }
3140     if (con < 0) {
3141       __ sub(dst_reg, src_reg, -con);
3142     } else {
3143       __ add(dst_reg, src_reg, con);
3144     }
3145   %}
3146 
3147   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3148     MacroAssembler _masm(&cbuf);
3149    Register dst_reg = as_Register($dst$$reg);
3150    Register src1_reg = as_Register($src1$$reg);
3151    Register src2_reg = as_Register($src2$$reg);
3152     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3153   %}
3154 
3155   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3156     MacroAssembler _masm(&cbuf);
3157    Register dst_reg = as_Register($dst$$reg);
3158    Register src1_reg = as_Register($src1$$reg);
3159    Register src2_reg = as_Register($src2$$reg);
3160     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3161   %}
3162 
3163   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3164     MacroAssembler _masm(&cbuf);
3165    Register dst_reg = as_Register($dst$$reg);
3166    Register src1_reg = as_Register($src1$$reg);
3167    Register src2_reg = as_Register($src2$$reg);
3168     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3169   %}
3170 
3171   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3172     MacroAssembler _masm(&cbuf);
3173    Register dst_reg = as_Register($dst$$reg);
3174    Register src1_reg = as_Register($src1$$reg);
3175    Register src2_reg = as_Register($src2$$reg);
3176     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3177   %}
3178 
3179   // compare instruction encodings
3180 
3181   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3182     MacroAssembler _masm(&cbuf);
3183     Register reg1 = as_Register($src1$$reg);
3184     Register reg2 = as_Register($src2$$reg);
3185     __ cmpw(reg1, reg2);
3186   %}
3187 
3188   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3189     MacroAssembler _masm(&cbuf);
3190     Register reg = as_Register($src1$$reg);
3191     int32_t val = $src2$$constant;
3192     if (val >= 0) {
3193       __ subsw(zr, reg, val);
3194     } else {
3195       __ addsw(zr, reg, -val);
3196     }
3197   %}
3198 
3199   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3200     MacroAssembler _masm(&cbuf);
3201     Register reg1 = as_Register($src1$$reg);
3202     u_int32_t val = (u_int32_t)$src2$$constant;
3203     __ movw(rscratch1, val);
3204     __ cmpw(reg1, rscratch1);
3205   %}
3206 
3207   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3208     MacroAssembler _masm(&cbuf);
3209     Register reg1 = as_Register($src1$$reg);
3210     Register reg2 = as_Register($src2$$reg);
3211     __ cmp(reg1, reg2);
3212   %}
3213 
3214   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3215     MacroAssembler _masm(&cbuf);
3216     Register reg = as_Register($src1$$reg);
3217     int64_t val = $src2$$constant;
3218     if (val >= 0) {
3219       __ subs(zr, reg, val);
3220     } else if (val != -val) {
3221       __ adds(zr, reg, -val);
3222     } else {
3223     // aargh, Long.MIN_VALUE is a special case
3224       __ orr(rscratch1, zr, (u_int64_t)val);
3225       __ subs(zr, reg, rscratch1);
3226     }
3227   %}
3228 
3229   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3230     MacroAssembler _masm(&cbuf);
3231     Register reg1 = as_Register($src1$$reg);
3232     u_int64_t val = (u_int64_t)$src2$$constant;
3233     __ mov(rscratch1, val);
3234     __ cmp(reg1, rscratch1);
3235   %}
3236 
3237   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3238     MacroAssembler _masm(&cbuf);
3239     Register reg1 = as_Register($src1$$reg);
3240     Register reg2 = as_Register($src2$$reg);
3241     __ cmp(reg1, reg2);
3242   %}
3243 
3244   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3245     MacroAssembler _masm(&cbuf);
3246     Register reg1 = as_Register($src1$$reg);
3247     Register reg2 = as_Register($src2$$reg);
3248     __ cmpw(reg1, reg2);
3249   %}
3250 
3251   enc_class aarch64_enc_testp(iRegP src) %{
3252     MacroAssembler _masm(&cbuf);
3253     Register reg = as_Register($src$$reg);
3254     __ cmp(reg, zr);
3255   %}
3256 
3257   enc_class aarch64_enc_testn(iRegN src) %{
3258     MacroAssembler _masm(&cbuf);
3259     Register reg = as_Register($src$$reg);
3260     __ cmpw(reg, zr);
3261   %}
3262 
3263   enc_class aarch64_enc_b(label lbl) %{
3264     MacroAssembler _masm(&cbuf);
3265     Label *L = $lbl$$label;
3266     __ b(*L);
3267   %}
3268 
3269   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3270     MacroAssembler _masm(&cbuf);
3271     Label *L = $lbl$$label;
3272     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3273   %}
3274 
3275   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3276     MacroAssembler _masm(&cbuf);
3277     Label *L = $lbl$$label;
3278     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3279   %}
3280 
3281   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3282   %{
3283      Register sub_reg = as_Register($sub$$reg);
3284      Register super_reg = as_Register($super$$reg);
3285      Register temp_reg = as_Register($temp$$reg);
3286      Register result_reg = as_Register($result$$reg);
3287 
3288      Label miss;
3289      MacroAssembler _masm(&cbuf);
3290      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3291                                      NULL, &miss,
3292                                      /*set_cond_codes:*/ true);
3293      if ($primary) {
3294        __ mov(result_reg, zr);
3295      }
3296      __ bind(miss);
3297   %}
3298 
3299   enc_class aarch64_enc_java_static_call(method meth) %{
3300     MacroAssembler _masm(&cbuf);
3301 
3302     address addr = (address)$meth$$method;
3303     address call;
3304     if (!_method) {
3305       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3306       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3307     } else {
3308       int method_index = resolved_method_index(cbuf);
3309       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3310                                                   : static_call_Relocation::spec(method_index);
3311       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3312 
3313       // Emit stub for static call
3314       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3315       if (stub == NULL) {
3316         ciEnv::current()->record_failure("CodeCache is full");
3317         return;
3318       }
3319     }
3320     if (call == NULL) {
3321       ciEnv::current()->record_failure("CodeCache is full");
3322       return;
3323     }
3324   %}
3325 
3326   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3327     MacroAssembler _masm(&cbuf);
3328     int method_index = resolved_method_index(cbuf);
3329     address call = __ ic_call((address)$meth$$method, method_index);
3330     if (call == NULL) {
3331       ciEnv::current()->record_failure("CodeCache is full");
3332       return;
3333     }
3334   %}
3335 
3336   enc_class aarch64_enc_call_epilog() %{
3337     MacroAssembler _masm(&cbuf);
3338     if (VerifyStackAtCalls) {
3339       // Check that stack depth is unchanged: find majik cookie on stack
3340       __ call_Unimplemented();
3341     }
3342   %}
3343 
3344   enc_class aarch64_enc_java_to_runtime(method meth) %{
3345     MacroAssembler _masm(&cbuf);
3346 
3347     // some calls to generated routines (arraycopy code) are scheduled
3348     // by C2 as runtime calls. if so we can call them using a br (they
3349     // will be in a reachable segment) otherwise we have to use a blrt
3350     // which loads the absolute address into a register.
3351     address entry = (address)$meth$$method;
3352     CodeBlob *cb = CodeCache::find_blob(entry);
3353     if (cb) {
3354       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3355       if (call == NULL) {
3356         ciEnv::current()->record_failure("CodeCache is full");
3357         return;
3358       }
3359     } else {
3360       int gpcnt;
3361       int fpcnt;
3362       int rtype;
3363       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3364       Label retaddr;
3365       __ adr(rscratch2, retaddr);
3366       __ lea(rscratch1, RuntimeAddress(entry));
3367       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3368       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3369       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3370       __ bind(retaddr);
3371       __ add(sp, sp, 2 * wordSize);
3372     }
3373   %}
3374 
3375   enc_class aarch64_enc_rethrow() %{
3376     MacroAssembler _masm(&cbuf);
3377     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3378   %}
3379 
3380   enc_class aarch64_enc_ret() %{
3381     MacroAssembler _masm(&cbuf);
3382     __ ret(lr);
3383   %}
3384 
3385   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3386     MacroAssembler _masm(&cbuf);
3387     Register target_reg = as_Register($jump_target$$reg);
3388     __ br(target_reg);
3389   %}
3390 
3391   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3392     MacroAssembler _masm(&cbuf);
3393     Register target_reg = as_Register($jump_target$$reg);
3394     // exception oop should be in r0
3395     // ret addr has been popped into lr
3396     // callee expects it in r3
3397     __ mov(r3, lr);
3398     __ br(target_reg);
3399   %}
3400 
3401   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3402     MacroAssembler _masm(&cbuf);
3403     Register oop = as_Register($object$$reg);
3404     Register box = as_Register($box$$reg);
3405     Register disp_hdr = as_Register($tmp$$reg);
3406     Register tmp = as_Register($tmp2$$reg);
3407     Label cont;
3408     Label object_has_monitor;
3409     Label cas_failed;
3410 
3411     assert_different_registers(oop, box, tmp, disp_hdr);
3412 
3413     // Load markOop from object into displaced_header.
3414     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3415 
3416     if (UseBiasedLocking && !UseOptoBiasInlining) {
3417       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3418     }
3419 
3420     // Handle existing monitor
3421     __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3422 
3423     // Set tmp to be (markOop of object | UNLOCK_VALUE).
3424     __ orr(tmp, disp_hdr, markOopDesc::unlocked_value);
3425 
3426     // Load Compare Value application register.
3427 
3428     // Initialize the box. (Must happen before we update the object mark!)
3429     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3430 
3431     // Compare object markOop with an unlocked value (tmp) and if
3432     // equal exchange the stack address of our box with object markOop.
3433     // On failure disp_hdr contains the possibly locked markOop.
3434     if (UseLSE) {
3435       __ mov(disp_hdr, tmp);
3436       __ casal(Assembler::xword, disp_hdr, box, oop);  // Updates disp_hdr
3437       __ cmp(tmp, disp_hdr);
3438       __ br(Assembler::EQ, cont);
3439     } else {
3440       Label retry_load;
3441       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3442         __ prfm(Address(oop), PSTL1STRM);
3443       __ bind(retry_load);
3444       __ ldaxr(disp_hdr, oop);
3445       __ cmp(tmp, disp_hdr);
3446       __ br(Assembler::NE, cas_failed);
3447       // use stlxr to ensure update is immediately visible
3448       __ stlxr(disp_hdr, box, oop);
3449       __ cbzw(disp_hdr, cont);
3450       __ b(retry_load);
3451     }
3452 
3453     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3454 
3455     // If the compare-and-exchange succeeded, then we found an unlocked
3456     // object, will have now locked it will continue at label cont
3457 
3458     __ bind(cas_failed);
3459     // We did not see an unlocked object so try the fast recursive case.
3460 
3461     // Check if the owner is self by comparing the value in the
3462     // markOop of object (disp_hdr) with the stack pointer.
3463     __ mov(rscratch1, sp);
3464     __ sub(disp_hdr, disp_hdr, rscratch1);
3465     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3466     // If condition is true we are cont and hence we can store 0 as the
3467     // displaced header in the box, which indicates that it is a recursive lock.
3468     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3469     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3470 
3471     // Handle existing monitor.
3472     __ b(cont);
3473 
3474     __ bind(object_has_monitor);
3475     // The object's monitor m is unlocked iff m->owner == NULL,
3476     // otherwise m->owner may contain a thread or a stack address.
3477     //
3478     // Try to CAS m->owner from NULL to current thread.
3479     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3480     __ mov(disp_hdr, zr);
3481 
3482     if (UseLSE) {
3483       __ mov(rscratch1, disp_hdr);
3484       __ casal(Assembler::xword, rscratch1, rthread, tmp);
3485       __ cmp(rscratch1, disp_hdr);
3486     } else {
3487       Label retry_load, fail;
3488       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) {
3489         __ prfm(Address(tmp), PSTL1STRM);
3490       }
3491       __ bind(retry_load);
3492       __ ldaxr(rscratch1, tmp);
3493       __ cmp(disp_hdr, rscratch1);
3494       __ br(Assembler::NE, fail);
3495       // use stlxr to ensure update is immediately visible
3496       __ stlxr(rscratch1, rthread, tmp);
3497       __ cbnzw(rscratch1, retry_load);
3498       __ bind(fail);
3499     }
3500 
3501     // Store a non-null value into the box to avoid looking like a re-entrant
3502     // lock. The fast-path monitor unlock code checks for
3503     // markOopDesc::monitor_value so use markOopDesc::unused_mark which has the
3504     // relevant bit set, and also matches ObjectSynchronizer::slow_enter.
3505     __ mov(tmp, (address)markOopDesc::unused_mark());
3506     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3507 
3508     __ bind(cont);
3509     // flag == EQ indicates success
3510     // flag == NE indicates failure
3511   %}
3512 
3513   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3514     MacroAssembler _masm(&cbuf);
3515     Register oop = as_Register($object$$reg);
3516     Register box = as_Register($box$$reg);
3517     Register disp_hdr = as_Register($tmp$$reg);
3518     Register tmp = as_Register($tmp2$$reg);
3519     Label cont;
3520     Label object_has_monitor;
3521 
3522     assert_different_registers(oop, box, tmp, disp_hdr);
3523 
3524     if (UseBiasedLocking && !UseOptoBiasInlining) {
3525       __ biased_locking_exit(oop, tmp, cont);
3526     }
3527 
3528     // Find the lock address and load the displaced header from the stack.
3529     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3530 
3531     // If the displaced header is 0, we have a recursive unlock.
3532     __ cmp(disp_hdr, zr);
3533     __ br(Assembler::EQ, cont);
3534 
3535     // Handle existing monitor.
3536     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3537     __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3538 
3539     // Check if it is still a light weight lock, this is is true if we
3540     // see the stack address of the basicLock in the markOop of the
3541     // object.
3542 
3543     if (UseLSE) {
3544       __ mov(tmp, box);
3545       __ casl(Assembler::xword, tmp, disp_hdr, oop);
3546       __ cmp(tmp, box);
3547       __ b(cont);
3548     } else {
3549       Label retry_load;
3550       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3551         __ prfm(Address(oop), PSTL1STRM);
3552       __ bind(retry_load);
3553       __ ldxr(tmp, oop);
3554       __ cmp(box, tmp);
3555       __ br(Assembler::NE, cont);
3556       // use stlxr to ensure update is immediately visible
3557       __ stlxr(tmp, disp_hdr, oop);
3558       __ cbzw(tmp, cont);
3559       __ b(retry_load);
3560     }
3561 
3562     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3563 
3564     // Handle existing monitor.
3565     __ bind(object_has_monitor);
3566     __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3567     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3568     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3569     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3570     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3571     __ cmp(rscratch1, zr);
3572     __ br(Assembler::NE, cont);
3573 
3574     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3575     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3576     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3577     __ cmp(rscratch1, zr);
3578     __ cbnz(rscratch1, cont);
3579     // need a release store here
3580     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3581     __ stlr(zr, tmp); // set unowned
3582 
3583     __ bind(cont);
3584     // flag == EQ indicates success
3585     // flag == NE indicates failure
3586   %}
3587 
3588 %}
3589 
3590 //----------FRAME--------------------------------------------------------------
3591 // Definition of frame structure and management information.
3592 //
3593 //  S T A C K   L A Y O U T    Allocators stack-slot number
3594 //                             |   (to get allocators register number
3595 //  G  Owned by    |        |  v    add OptoReg::stack0())
3596 //  r   CALLER     |        |
3597 //  o     |        +--------+      pad to even-align allocators stack-slot
3598 //  w     V        |  pad0  |        numbers; owned by CALLER
3599 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3600 //  h     ^        |   in   |  5
3601 //        |        |  args  |  4   Holes in incoming args owned by SELF
3602 //  |     |        |        |  3
3603 //  |     |        +--------+
3604 //  V     |        | old out|      Empty on Intel, window on Sparc
3605 //        |    old |preserve|      Must be even aligned.
3606 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3607 //        |        |   in   |  3   area for Intel ret address
3608 //     Owned by    |preserve|      Empty on Sparc.
3609 //       SELF      +--------+
3610 //        |        |  pad2  |  2   pad to align old SP
3611 //        |        +--------+  1
3612 //        |        | locks  |  0
3613 //        |        +--------+----> OptoReg::stack0(), even aligned
3614 //        |        |  pad1  | 11   pad to align new SP
3615 //        |        +--------+
3616 //        |        |        | 10
3617 //        |        | spills |  9   spills
3618 //        V        |        |  8   (pad0 slot for callee)
3619 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3620 //        ^        |  out   |  7
3621 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3622 //     Owned by    +--------+
3623 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3624 //        |    new |preserve|      Must be even-aligned.
3625 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3626 //        |        |        |
3627 //
3628 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3629 //         known from SELF's arguments and the Java calling convention.
3630 //         Region 6-7 is determined per call site.
3631 // Note 2: If the calling convention leaves holes in the incoming argument
3632 //         area, those holes are owned by SELF.  Holes in the outgoing area
3633 //         are owned by the CALLEE.  Holes should not be nessecary in the
3634 //         incoming area, as the Java calling convention is completely under
3635 //         the control of the AD file.  Doubles can be sorted and packed to
3636 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3637 //         varargs C calling conventions.
3638 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3639 //         even aligned with pad0 as needed.
3640 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3641 //           (the latter is true on Intel but is it false on AArch64?)
3642 //         region 6-11 is even aligned; it may be padded out more so that
3643 //         the region from SP to FP meets the minimum stack alignment.
3644 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3645 //         alignment.  Region 11, pad1, may be dynamically extended so that
3646 //         SP meets the minimum alignment.
3647 
3648 frame %{
3649   // What direction does stack grow in (assumed to be same for C & Java)
3650   stack_direction(TOWARDS_LOW);
3651 
3652   // These three registers define part of the calling convention
3653   // between compiled code and the interpreter.
3654 
3655   // Inline Cache Register or methodOop for I2C.
3656   inline_cache_reg(R12);
3657 
3658   // Method Oop Register when calling interpreter.
3659   interpreter_method_oop_reg(R12);
3660 
3661   // Number of stack slots consumed by locking an object
3662   sync_stack_slots(2);
3663 
3664   // Compiled code's Frame Pointer
3665   frame_pointer(R31);
3666 
3667   // Interpreter stores its frame pointer in a register which is
3668   // stored to the stack by I2CAdaptors.
3669   // I2CAdaptors convert from interpreted java to compiled java.
3670   interpreter_frame_pointer(R29);
3671 
3672   // Stack alignment requirement
3673   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3674 
3675   // Number of stack slots between incoming argument block and the start of
3676   // a new frame.  The PROLOG must add this many slots to the stack.  The
3677   // EPILOG must remove this many slots. aarch64 needs two slots for
3678   // return address and fp.
3679   // TODO think this is correct but check
3680   in_preserve_stack_slots(4);
3681 
3682   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3683   // for calls to C.  Supports the var-args backing area for register parms.
3684   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3685 
3686   // The after-PROLOG location of the return address.  Location of
3687   // return address specifies a type (REG or STACK) and a number
3688   // representing the register number (i.e. - use a register name) or
3689   // stack slot.
3690   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3691   // Otherwise, it is above the locks and verification slot and alignment word
3692   // TODO this may well be correct but need to check why that - 2 is there
3693   // ppc port uses 0 but we definitely need to allow for fixed_slots
3694   // which folds in the space used for monitors
3695   return_addr(STACK - 2 +
3696               align_up((Compile::current()->in_preserve_stack_slots() +
3697                         Compile::current()->fixed_slots()),
3698                        stack_alignment_in_slots()));
3699 
3700   // Body of function which returns an integer array locating
3701   // arguments either in registers or in stack slots.  Passed an array
3702   // of ideal registers called "sig" and a "length" count.  Stack-slot
3703   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3704   // arguments for a CALLEE.  Incoming stack arguments are
3705   // automatically biased by the preserve_stack_slots field above.
3706 
3707   calling_convention
3708   %{
3709     // No difference between ingoing/outgoing just pass false
3710     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3711   %}
3712 
3713   c_calling_convention
3714   %{
3715     // This is obviously always outgoing
3716     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3717   %}
3718 
3719   // Location of compiled Java return values.  Same as C for now.
3720   return_value
3721   %{
3722     // TODO do we allow ideal_reg == Op_RegN???
3723     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3724            "only return normal values");
3725 
3726     static const int lo[Op_RegL + 1] = { // enum name
3727       0,                                 // Op_Node
3728       0,                                 // Op_Set
3729       R0_num,                            // Op_RegN
3730       R0_num,                            // Op_RegI
3731       R0_num,                            // Op_RegP
3732       V0_num,                            // Op_RegF
3733       V0_num,                            // Op_RegD
3734       R0_num                             // Op_RegL
3735     };
3736 
3737     static const int hi[Op_RegL + 1] = { // enum name
3738       0,                                 // Op_Node
3739       0,                                 // Op_Set
3740       OptoReg::Bad,                       // Op_RegN
3741       OptoReg::Bad,                      // Op_RegI
3742       R0_H_num,                          // Op_RegP
3743       OptoReg::Bad,                      // Op_RegF
3744       V0_H_num,                          // Op_RegD
3745       R0_H_num                           // Op_RegL
3746     };
3747 
3748     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3749   %}
3750 %}
3751 
3752 //----------ATTRIBUTES---------------------------------------------------------
3753 //----------Operand Attributes-------------------------------------------------
3754 op_attrib op_cost(1);        // Required cost attribute
3755 
3756 //----------Instruction Attributes---------------------------------------------
3757 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3758 ins_attrib ins_size(32);        // Required size attribute (in bits)
3759 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3760                                 // a non-matching short branch variant
3761                                 // of some long branch?
3762 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3763                                 // be a power of 2) specifies the
3764                                 // alignment that some part of the
3765                                 // instruction (not necessarily the
3766                                 // start) requires.  If > 1, a
3767                                 // compute_padding() function must be
3768                                 // provided for the instruction
3769 
3770 //----------OPERANDS-----------------------------------------------------------
3771 // Operand definitions must precede instruction definitions for correct parsing
3772 // in the ADLC because operands constitute user defined types which are used in
3773 // instruction definitions.
3774 
3775 //----------Simple Operands----------------------------------------------------
3776 
3777 // Integer operands 32 bit
3778 // 32 bit immediate
3779 operand immI()
3780 %{
3781   match(ConI);
3782 
3783   op_cost(0);
3784   format %{ %}
3785   interface(CONST_INTER);
3786 %}
3787 
3788 // 32 bit zero
3789 operand immI0()
3790 %{
3791   predicate(n->get_int() == 0);
3792   match(ConI);
3793 
3794   op_cost(0);
3795   format %{ %}
3796   interface(CONST_INTER);
3797 %}
3798 
3799 // 32 bit unit increment
3800 operand immI_1()
3801 %{
3802   predicate(n->get_int() == 1);
3803   match(ConI);
3804 
3805   op_cost(0);
3806   format %{ %}
3807   interface(CONST_INTER);
3808 %}
3809 
3810 // 32 bit unit decrement
3811 operand immI_M1()
3812 %{
3813   predicate(n->get_int() == -1);
3814   match(ConI);
3815 
3816   op_cost(0);
3817   format %{ %}
3818   interface(CONST_INTER);
3819 %}
3820 
3821 // Shift values for add/sub extension shift
3822 operand immIExt()
3823 %{
3824   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3825   match(ConI);
3826 
3827   op_cost(0);
3828   format %{ %}
3829   interface(CONST_INTER);
3830 %}
3831 
3832 operand immI_le_4()
3833 %{
3834   predicate(n->get_int() <= 4);
3835   match(ConI);
3836 
3837   op_cost(0);
3838   format %{ %}
3839   interface(CONST_INTER);
3840 %}
3841 
3842 operand immI_31()
3843 %{
3844   predicate(n->get_int() == 31);
3845   match(ConI);
3846 
3847   op_cost(0);
3848   format %{ %}
3849   interface(CONST_INTER);
3850 %}
3851 
3852 operand immI_8()
3853 %{
3854   predicate(n->get_int() == 8);
3855   match(ConI);
3856 
3857   op_cost(0);
3858   format %{ %}
3859   interface(CONST_INTER);
3860 %}
3861 
3862 operand immI_16()
3863 %{
3864   predicate(n->get_int() == 16);
3865   match(ConI);
3866 
3867   op_cost(0);
3868   format %{ %}
3869   interface(CONST_INTER);
3870 %}
3871 
3872 operand immI_24()
3873 %{
3874   predicate(n->get_int() == 24);
3875   match(ConI);
3876 
3877   op_cost(0);
3878   format %{ %}
3879   interface(CONST_INTER);
3880 %}
3881 
3882 operand immI_32()
3883 %{
3884   predicate(n->get_int() == 32);
3885   match(ConI);
3886 
3887   op_cost(0);
3888   format %{ %}
3889   interface(CONST_INTER);
3890 %}
3891 
3892 operand immI_48()
3893 %{
3894   predicate(n->get_int() == 48);
3895   match(ConI);
3896 
3897   op_cost(0);
3898   format %{ %}
3899   interface(CONST_INTER);
3900 %}
3901 
3902 operand immI_56()
3903 %{
3904   predicate(n->get_int() == 56);
3905   match(ConI);
3906 
3907   op_cost(0);
3908   format %{ %}
3909   interface(CONST_INTER);
3910 %}
3911 
3912 operand immI_63()
3913 %{
3914   predicate(n->get_int() == 63);
3915   match(ConI);
3916 
3917   op_cost(0);
3918   format %{ %}
3919   interface(CONST_INTER);
3920 %}
3921 
3922 operand immI_64()
3923 %{
3924   predicate(n->get_int() == 64);
3925   match(ConI);
3926 
3927   op_cost(0);
3928   format %{ %}
3929   interface(CONST_INTER);
3930 %}
3931 
3932 operand immI_255()
3933 %{
3934   predicate(n->get_int() == 255);
3935   match(ConI);
3936 
3937   op_cost(0);
3938   format %{ %}
3939   interface(CONST_INTER);
3940 %}
3941 
3942 operand immI_65535()
3943 %{
3944   predicate(n->get_int() == 65535);
3945   match(ConI);
3946 
3947   op_cost(0);
3948   format %{ %}
3949   interface(CONST_INTER);
3950 %}
3951 
3952 operand immL_255()
3953 %{
3954   predicate(n->get_long() == 255L);
3955   match(ConL);
3956 
3957   op_cost(0);
3958   format %{ %}
3959   interface(CONST_INTER);
3960 %}
3961 
3962 operand immL_65535()
3963 %{
3964   predicate(n->get_long() == 65535L);
3965   match(ConL);
3966 
3967   op_cost(0);
3968   format %{ %}
3969   interface(CONST_INTER);
3970 %}
3971 
3972 operand immL_4294967295()
3973 %{
3974   predicate(n->get_long() == 4294967295L);
3975   match(ConL);
3976 
3977   op_cost(0);
3978   format %{ %}
3979   interface(CONST_INTER);
3980 %}
3981 
3982 operand immL_bitmask()
3983 %{
3984   predicate(((n->get_long() & 0xc000000000000000l) == 0)
3985             && is_power_of_2(n->get_long() + 1));
3986   match(ConL);
3987 
3988   op_cost(0);
3989   format %{ %}
3990   interface(CONST_INTER);
3991 %}
3992 
3993 operand immI_bitmask()
3994 %{
3995   predicate(((n->get_int() & 0xc0000000) == 0)
3996             && is_power_of_2(n->get_int() + 1));
3997   match(ConI);
3998 
3999   op_cost(0);
4000   format %{ %}
4001   interface(CONST_INTER);
4002 %}
4003 
4004 // Scale values for scaled offset addressing modes (up to long but not quad)
4005 operand immIScale()
4006 %{
4007   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4008   match(ConI);
4009 
4010   op_cost(0);
4011   format %{ %}
4012   interface(CONST_INTER);
4013 %}
4014 
4015 // 26 bit signed offset -- for pc-relative branches
4016 operand immI26()
4017 %{
4018   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4019   match(ConI);
4020 
4021   op_cost(0);
4022   format %{ %}
4023   interface(CONST_INTER);
4024 %}
4025 
4026 // 19 bit signed offset -- for pc-relative loads
4027 operand immI19()
4028 %{
4029   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4030   match(ConI);
4031 
4032   op_cost(0);
4033   format %{ %}
4034   interface(CONST_INTER);
4035 %}
4036 
4037 // 12 bit unsigned offset -- for base plus immediate loads
4038 operand immIU12()
4039 %{
4040   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4041   match(ConI);
4042 
4043   op_cost(0);
4044   format %{ %}
4045   interface(CONST_INTER);
4046 %}
4047 
4048 operand immLU12()
4049 %{
4050   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4051   match(ConL);
4052 
4053   op_cost(0);
4054   format %{ %}
4055   interface(CONST_INTER);
4056 %}
4057 
4058 // Offset for scaled or unscaled immediate loads and stores
4059 operand immIOffset()
4060 %{
4061   predicate(Address::offset_ok_for_immed(n->get_int()));
4062   match(ConI);
4063 
4064   op_cost(0);
4065   format %{ %}
4066   interface(CONST_INTER);
4067 %}
4068 
4069 operand immIOffset4()
4070 %{
4071   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4072   match(ConI);
4073 
4074   op_cost(0);
4075   format %{ %}
4076   interface(CONST_INTER);
4077 %}
4078 
4079 operand immIOffset8()
4080 %{
4081   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4082   match(ConI);
4083 
4084   op_cost(0);
4085   format %{ %}
4086   interface(CONST_INTER);
4087 %}
4088 
4089 operand immIOffset16()
4090 %{
4091   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4092   match(ConI);
4093 
4094   op_cost(0);
4095   format %{ %}
4096   interface(CONST_INTER);
4097 %}
4098 
4099 operand immLoffset()
4100 %{
4101   predicate(Address::offset_ok_for_immed(n->get_long()));
4102   match(ConL);
4103 
4104   op_cost(0);
4105   format %{ %}
4106   interface(CONST_INTER);
4107 %}
4108 
4109 operand immLoffset4()
4110 %{
4111   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4112   match(ConL);
4113 
4114   op_cost(0);
4115   format %{ %}
4116   interface(CONST_INTER);
4117 %}
4118 
4119 operand immLoffset8()
4120 %{
4121   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4122   match(ConL);
4123 
4124   op_cost(0);
4125   format %{ %}
4126   interface(CONST_INTER);
4127 %}
4128 
4129 operand immLoffset16()
4130 %{
4131   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4132   match(ConL);
4133 
4134   op_cost(0);
4135   format %{ %}
4136   interface(CONST_INTER);
4137 %}
4138 
4139 // 32 bit integer valid for add sub immediate
4140 operand immIAddSub()
4141 %{
4142   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4143   match(ConI);
4144   op_cost(0);
4145   format %{ %}
4146   interface(CONST_INTER);
4147 %}
4148 
4149 // 32 bit unsigned integer valid for logical immediate
4150 // TODO -- check this is right when e.g the mask is 0x80000000
4151 operand immILog()
4152 %{
4153   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4154   match(ConI);
4155 
4156   op_cost(0);
4157   format %{ %}
4158   interface(CONST_INTER);
4159 %}
4160 
4161 // Integer operands 64 bit
4162 // 64 bit immediate
4163 operand immL()
4164 %{
4165   match(ConL);
4166 
4167   op_cost(0);
4168   format %{ %}
4169   interface(CONST_INTER);
4170 %}
4171 
4172 // 64 bit zero
4173 operand immL0()
4174 %{
4175   predicate(n->get_long() == 0);
4176   match(ConL);
4177 
4178   op_cost(0);
4179   format %{ %}
4180   interface(CONST_INTER);
4181 %}
4182 
4183 // 64 bit unit increment
4184 operand immL_1()
4185 %{
4186   predicate(n->get_long() == 1);
4187   match(ConL);
4188 
4189   op_cost(0);
4190   format %{ %}
4191   interface(CONST_INTER);
4192 %}
4193 
4194 // 64 bit unit decrement
4195 operand immL_M1()
4196 %{
4197   predicate(n->get_long() == -1);
4198   match(ConL);
4199 
4200   op_cost(0);
4201   format %{ %}
4202   interface(CONST_INTER);
4203 %}
4204 
4205 // 32 bit offset of pc in thread anchor
4206 
4207 operand immL_pc_off()
4208 %{
4209   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4210                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4211   match(ConL);
4212 
4213   op_cost(0);
4214   format %{ %}
4215   interface(CONST_INTER);
4216 %}
4217 
4218 // 64 bit integer valid for add sub immediate
4219 operand immLAddSub()
4220 %{
4221   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4222   match(ConL);
4223   op_cost(0);
4224   format %{ %}
4225   interface(CONST_INTER);
4226 %}
4227 
4228 // 64 bit integer valid for logical immediate
4229 operand immLLog()
4230 %{
4231   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4232   match(ConL);
4233   op_cost(0);
4234   format %{ %}
4235   interface(CONST_INTER);
4236 %}
4237 
4238 // Long Immediate: low 32-bit mask
4239 operand immL_32bits()
4240 %{
4241   predicate(n->get_long() == 0xFFFFFFFFL);
4242   match(ConL);
4243   op_cost(0);
4244   format %{ %}
4245   interface(CONST_INTER);
4246 %}
4247 
4248 // Pointer operands
4249 // Pointer Immediate
4250 operand immP()
4251 %{
4252   match(ConP);
4253 
4254   op_cost(0);
4255   format %{ %}
4256   interface(CONST_INTER);
4257 %}
4258 
4259 // NULL Pointer Immediate
4260 operand immP0()
4261 %{
4262   predicate(n->get_ptr() == 0);
4263   match(ConP);
4264 
4265   op_cost(0);
4266   format %{ %}
4267   interface(CONST_INTER);
4268 %}
4269 
4270 // Pointer Immediate One
4271 // this is used in object initialization (initial object header)
4272 operand immP_1()
4273 %{
4274   predicate(n->get_ptr() == 1);
4275   match(ConP);
4276 
4277   op_cost(0);
4278   format %{ %}
4279   interface(CONST_INTER);
4280 %}
4281 
4282 // Polling Page Pointer Immediate
4283 operand immPollPage()
4284 %{
4285   predicate((address)n->get_ptr() == os::get_polling_page());
4286   match(ConP);
4287 
4288   op_cost(0);
4289   format %{ %}
4290   interface(CONST_INTER);
4291 %}
4292 
4293 // Card Table Byte Map Base
4294 operand immByteMapBase()
4295 %{
4296   // Get base of card map
4297   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4298             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4299   match(ConP);
4300 
4301   op_cost(0);
4302   format %{ %}
4303   interface(CONST_INTER);
4304 %}
4305 
4306 // Pointer Immediate Minus One
4307 // this is used when we want to write the current PC to the thread anchor
4308 operand immP_M1()
4309 %{
4310   predicate(n->get_ptr() == -1);
4311   match(ConP);
4312 
4313   op_cost(0);
4314   format %{ %}
4315   interface(CONST_INTER);
4316 %}
4317 
4318 // Pointer Immediate Minus Two
4319 // this is used when we want to write the current PC to the thread anchor
4320 operand immP_M2()
4321 %{
4322   predicate(n->get_ptr() == -2);
4323   match(ConP);
4324 
4325   op_cost(0);
4326   format %{ %}
4327   interface(CONST_INTER);
4328 %}
4329 
4330 // Float and Double operands
4331 // Double Immediate
4332 operand immD()
4333 %{
4334   match(ConD);
4335   op_cost(0);
4336   format %{ %}
4337   interface(CONST_INTER);
4338 %}
4339 
4340 // Double Immediate: +0.0d
4341 operand immD0()
4342 %{
4343   predicate(jlong_cast(n->getd()) == 0);
4344   match(ConD);
4345 
4346   op_cost(0);
4347   format %{ %}
4348   interface(CONST_INTER);
4349 %}
4350 
4351 // constant 'double +0.0'.
4352 operand immDPacked()
4353 %{
4354   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4355   match(ConD);
4356   op_cost(0);
4357   format %{ %}
4358   interface(CONST_INTER);
4359 %}
4360 
4361 // Float Immediate
4362 operand immF()
4363 %{
4364   match(ConF);
4365   op_cost(0);
4366   format %{ %}
4367   interface(CONST_INTER);
4368 %}
4369 
4370 // Float Immediate: +0.0f.
4371 operand immF0()
4372 %{
4373   predicate(jint_cast(n->getf()) == 0);
4374   match(ConF);
4375 
4376   op_cost(0);
4377   format %{ %}
4378   interface(CONST_INTER);
4379 %}
4380 
4381 //
4382 operand immFPacked()
4383 %{
4384   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4385   match(ConF);
4386   op_cost(0);
4387   format %{ %}
4388   interface(CONST_INTER);
4389 %}
4390 
4391 // Narrow pointer operands
4392 // Narrow Pointer Immediate
4393 operand immN()
4394 %{
4395   match(ConN);
4396 
4397   op_cost(0);
4398   format %{ %}
4399   interface(CONST_INTER);
4400 %}
4401 
4402 // Narrow NULL Pointer Immediate
4403 operand immN0()
4404 %{
4405   predicate(n->get_narrowcon() == 0);
4406   match(ConN);
4407 
4408   op_cost(0);
4409   format %{ %}
4410   interface(CONST_INTER);
4411 %}
4412 
4413 operand immNKlass()
4414 %{
4415   match(ConNKlass);
4416 
4417   op_cost(0);
4418   format %{ %}
4419   interface(CONST_INTER);
4420 %}
4421 
4422 // Integer 32 bit Register Operands
4423 // Integer 32 bitRegister (excludes SP)
4424 operand iRegI()
4425 %{
4426   constraint(ALLOC_IN_RC(any_reg32));
4427   match(RegI);
4428   match(iRegINoSp);
4429   op_cost(0);
4430   format %{ %}
4431   interface(REG_INTER);
4432 %}
4433 
4434 // Integer 32 bit Register not Special
4435 operand iRegINoSp()
4436 %{
4437   constraint(ALLOC_IN_RC(no_special_reg32));
4438   match(RegI);
4439   op_cost(0);
4440   format %{ %}
4441   interface(REG_INTER);
4442 %}
4443 
4444 // Integer 64 bit Register Operands
4445 // Integer 64 bit Register (includes SP)
4446 operand iRegL()
4447 %{
4448   constraint(ALLOC_IN_RC(any_reg));
4449   match(RegL);
4450   match(iRegLNoSp);
4451   op_cost(0);
4452   format %{ %}
4453   interface(REG_INTER);
4454 %}
4455 
4456 // Integer 64 bit Register not Special
4457 operand iRegLNoSp()
4458 %{
4459   constraint(ALLOC_IN_RC(no_special_reg));
4460   match(RegL);
4461   match(iRegL_R0);
4462   format %{ %}
4463   interface(REG_INTER);
4464 %}
4465 
4466 // Pointer Register Operands
4467 // Pointer Register
4468 operand iRegP()
4469 %{
4470   constraint(ALLOC_IN_RC(ptr_reg));
4471   match(RegP);
4472   match(iRegPNoSp);
4473   match(iRegP_R0);
4474   //match(iRegP_R2);
4475   //match(iRegP_R4);
4476   //match(iRegP_R5);
4477   match(thread_RegP);
4478   op_cost(0);
4479   format %{ %}
4480   interface(REG_INTER);
4481 %}
4482 
4483 // Pointer 64 bit Register not Special
4484 operand iRegPNoSp()
4485 %{
4486   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4487   match(RegP);
4488   // match(iRegP);
4489   // match(iRegP_R0);
4490   // match(iRegP_R2);
4491   // match(iRegP_R4);
4492   // match(iRegP_R5);
4493   // match(thread_RegP);
4494   op_cost(0);
4495   format %{ %}
4496   interface(REG_INTER);
4497 %}
4498 
4499 // Pointer 64 bit Register R0 only
4500 operand iRegP_R0()
4501 %{
4502   constraint(ALLOC_IN_RC(r0_reg));
4503   match(RegP);
4504   // match(iRegP);
4505   match(iRegPNoSp);
4506   op_cost(0);
4507   format %{ %}
4508   interface(REG_INTER);
4509 %}
4510 
4511 // Pointer 64 bit Register R1 only
4512 operand iRegP_R1()
4513 %{
4514   constraint(ALLOC_IN_RC(r1_reg));
4515   match(RegP);
4516   // match(iRegP);
4517   match(iRegPNoSp);
4518   op_cost(0);
4519   format %{ %}
4520   interface(REG_INTER);
4521 %}
4522 
4523 // Pointer 64 bit Register R2 only
4524 operand iRegP_R2()
4525 %{
4526   constraint(ALLOC_IN_RC(r2_reg));
4527   match(RegP);
4528   // match(iRegP);
4529   match(iRegPNoSp);
4530   op_cost(0);
4531   format %{ %}
4532   interface(REG_INTER);
4533 %}
4534 
4535 // Pointer 64 bit Register R3 only
4536 operand iRegP_R3()
4537 %{
4538   constraint(ALLOC_IN_RC(r3_reg));
4539   match(RegP);
4540   // match(iRegP);
4541   match(iRegPNoSp);
4542   op_cost(0);
4543   format %{ %}
4544   interface(REG_INTER);
4545 %}
4546 
4547 // Pointer 64 bit Register R4 only
4548 operand iRegP_R4()
4549 %{
4550   constraint(ALLOC_IN_RC(r4_reg));
4551   match(RegP);
4552   // match(iRegP);
4553   match(iRegPNoSp);
4554   op_cost(0);
4555   format %{ %}
4556   interface(REG_INTER);
4557 %}
4558 
4559 // Pointer 64 bit Register R5 only
4560 operand iRegP_R5()
4561 %{
4562   constraint(ALLOC_IN_RC(r5_reg));
4563   match(RegP);
4564   // match(iRegP);
4565   match(iRegPNoSp);
4566   op_cost(0);
4567   format %{ %}
4568   interface(REG_INTER);
4569 %}
4570 
4571 // Pointer 64 bit Register R10 only
4572 operand iRegP_R10()
4573 %{
4574   constraint(ALLOC_IN_RC(r10_reg));
4575   match(RegP);
4576   // match(iRegP);
4577   match(iRegPNoSp);
4578   op_cost(0);
4579   format %{ %}
4580   interface(REG_INTER);
4581 %}
4582 
4583 // Long 64 bit Register R0 only
4584 operand iRegL_R0()
4585 %{
4586   constraint(ALLOC_IN_RC(r0_reg));
4587   match(RegL);
4588   match(iRegLNoSp);
4589   op_cost(0);
4590   format %{ %}
4591   interface(REG_INTER);
4592 %}
4593 
4594 // Long 64 bit Register R2 only
4595 operand iRegL_R2()
4596 %{
4597   constraint(ALLOC_IN_RC(r2_reg));
4598   match(RegL);
4599   match(iRegLNoSp);
4600   op_cost(0);
4601   format %{ %}
4602   interface(REG_INTER);
4603 %}
4604 
4605 // Long 64 bit Register R3 only
4606 operand iRegL_R3()
4607 %{
4608   constraint(ALLOC_IN_RC(r3_reg));
4609   match(RegL);
4610   match(iRegLNoSp);
4611   op_cost(0);
4612   format %{ %}
4613   interface(REG_INTER);
4614 %}
4615 
4616 // Long 64 bit Register R11 only
4617 operand iRegL_R11()
4618 %{
4619   constraint(ALLOC_IN_RC(r11_reg));
4620   match(RegL);
4621   match(iRegLNoSp);
4622   op_cost(0);
4623   format %{ %}
4624   interface(REG_INTER);
4625 %}
4626 
4627 // Pointer 64 bit Register FP only
4628 operand iRegP_FP()
4629 %{
4630   constraint(ALLOC_IN_RC(fp_reg));
4631   match(RegP);
4632   // match(iRegP);
4633   op_cost(0);
4634   format %{ %}
4635   interface(REG_INTER);
4636 %}
4637 
4638 // Register R0 only
4639 operand iRegI_R0()
4640 %{
4641   constraint(ALLOC_IN_RC(int_r0_reg));
4642   match(RegI);
4643   match(iRegINoSp);
4644   op_cost(0);
4645   format %{ %}
4646   interface(REG_INTER);
4647 %}
4648 
4649 // Register R2 only
4650 operand iRegI_R2()
4651 %{
4652   constraint(ALLOC_IN_RC(int_r2_reg));
4653   match(RegI);
4654   match(iRegINoSp);
4655   op_cost(0);
4656   format %{ %}
4657   interface(REG_INTER);
4658 %}
4659 
4660 // Register R3 only
4661 operand iRegI_R3()
4662 %{
4663   constraint(ALLOC_IN_RC(int_r3_reg));
4664   match(RegI);
4665   match(iRegINoSp);
4666   op_cost(0);
4667   format %{ %}
4668   interface(REG_INTER);
4669 %}
4670 
4671 
4672 // Register R4 only
4673 operand iRegI_R4()
4674 %{
4675   constraint(ALLOC_IN_RC(int_r4_reg));
4676   match(RegI);
4677   match(iRegINoSp);
4678   op_cost(0);
4679   format %{ %}
4680   interface(REG_INTER);
4681 %}
4682 
4683 
4684 // Pointer Register Operands
4685 // Narrow Pointer Register
4686 operand iRegN()
4687 %{
4688   constraint(ALLOC_IN_RC(any_reg32));
4689   match(RegN);
4690   match(iRegNNoSp);
4691   op_cost(0);
4692   format %{ %}
4693   interface(REG_INTER);
4694 %}
4695 
4696 operand iRegN_R0()
4697 %{
4698   constraint(ALLOC_IN_RC(r0_reg));
4699   match(iRegN);
4700   op_cost(0);
4701   format %{ %}
4702   interface(REG_INTER);
4703 %}
4704 
4705 operand iRegN_R2()
4706 %{
4707   constraint(ALLOC_IN_RC(r2_reg));
4708   match(iRegN);
4709   op_cost(0);
4710   format %{ %}
4711   interface(REG_INTER);
4712 %}
4713 
4714 operand iRegN_R3()
4715 %{
4716   constraint(ALLOC_IN_RC(r3_reg));
4717   match(iRegN);
4718   op_cost(0);
4719   format %{ %}
4720   interface(REG_INTER);
4721 %}
4722 
4723 // Integer 64 bit Register not Special
4724 operand iRegNNoSp()
4725 %{
4726   constraint(ALLOC_IN_RC(no_special_reg32));
4727   match(RegN);
4728   op_cost(0);
4729   format %{ %}
4730   interface(REG_INTER);
4731 %}
4732 
4733 // heap base register -- used for encoding immN0
4734 
4735 operand iRegIHeapbase()
4736 %{
4737   constraint(ALLOC_IN_RC(heapbase_reg));
4738   match(RegI);
4739   op_cost(0);
4740   format %{ %}
4741   interface(REG_INTER);
4742 %}
4743 
4744 // Float Register
4745 // Float register operands
4746 operand vRegF()
4747 %{
4748   constraint(ALLOC_IN_RC(float_reg));
4749   match(RegF);
4750 
4751   op_cost(0);
4752   format %{ %}
4753   interface(REG_INTER);
4754 %}
4755 
4756 // Double Register
4757 // Double register operands
4758 operand vRegD()
4759 %{
4760   constraint(ALLOC_IN_RC(double_reg));
4761   match(RegD);
4762 
4763   op_cost(0);
4764   format %{ %}
4765   interface(REG_INTER);
4766 %}
4767 
4768 operand vecD()
4769 %{
4770   constraint(ALLOC_IN_RC(vectord_reg));
4771   match(VecD);
4772 
4773   op_cost(0);
4774   format %{ %}
4775   interface(REG_INTER);
4776 %}
4777 
4778 operand vecX()
4779 %{
4780   constraint(ALLOC_IN_RC(vectorx_reg));
4781   match(VecX);
4782 
4783   op_cost(0);
4784   format %{ %}
4785   interface(REG_INTER);
4786 %}
4787 
4788 operand vRegD_V0()
4789 %{
4790   constraint(ALLOC_IN_RC(v0_reg));
4791   match(RegD);
4792   op_cost(0);
4793   format %{ %}
4794   interface(REG_INTER);
4795 %}
4796 
4797 operand vRegD_V1()
4798 %{
4799   constraint(ALLOC_IN_RC(v1_reg));
4800   match(RegD);
4801   op_cost(0);
4802   format %{ %}
4803   interface(REG_INTER);
4804 %}
4805 
4806 operand vRegD_V2()
4807 %{
4808   constraint(ALLOC_IN_RC(v2_reg));
4809   match(RegD);
4810   op_cost(0);
4811   format %{ %}
4812   interface(REG_INTER);
4813 %}
4814 
4815 operand vRegD_V3()
4816 %{
4817   constraint(ALLOC_IN_RC(v3_reg));
4818   match(RegD);
4819   op_cost(0);
4820   format %{ %}
4821   interface(REG_INTER);
4822 %}
4823 
4824 // Flags register, used as output of signed compare instructions
4825 
4826 // note that on AArch64 we also use this register as the output for
4827 // for floating point compare instructions (CmpF CmpD). this ensures
4828 // that ordered inequality tests use GT, GE, LT or LE none of which
4829 // pass through cases where the result is unordered i.e. one or both
4830 // inputs to the compare is a NaN. this means that the ideal code can
4831 // replace e.g. a GT with an LE and not end up capturing the NaN case
4832 // (where the comparison should always fail). EQ and NE tests are
4833 // always generated in ideal code so that unordered folds into the NE
4834 // case, matching the behaviour of AArch64 NE.
4835 //
4836 // This differs from x86 where the outputs of FP compares use a
4837 // special FP flags registers and where compares based on this
4838 // register are distinguished into ordered inequalities (cmpOpUCF) and
4839 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4840 // to explicitly handle the unordered case in branches. x86 also has
4841 // to include extra CMoveX rules to accept a cmpOpUCF input.
4842 
4843 operand rFlagsReg()
4844 %{
4845   constraint(ALLOC_IN_RC(int_flags));
4846   match(RegFlags);
4847 
4848   op_cost(0);
4849   format %{ "RFLAGS" %}
4850   interface(REG_INTER);
4851 %}
4852 
4853 // Flags register, used as output of unsigned compare instructions
4854 operand rFlagsRegU()
4855 %{
4856   constraint(ALLOC_IN_RC(int_flags));
4857   match(RegFlags);
4858 
4859   op_cost(0);
4860   format %{ "RFLAGSU" %}
4861   interface(REG_INTER);
4862 %}
4863 
4864 // Special Registers
4865 
4866 // Method Register
4867 operand inline_cache_RegP(iRegP reg)
4868 %{
4869   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4870   match(reg);
4871   match(iRegPNoSp);
4872   op_cost(0);
4873   format %{ %}
4874   interface(REG_INTER);
4875 %}
4876 
4877 operand interpreter_method_oop_RegP(iRegP reg)
4878 %{
4879   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4880   match(reg);
4881   match(iRegPNoSp);
4882   op_cost(0);
4883   format %{ %}
4884   interface(REG_INTER);
4885 %}
4886 
4887 // Thread Register
4888 operand thread_RegP(iRegP reg)
4889 %{
4890   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
4891   match(reg);
4892   op_cost(0);
4893   format %{ %}
4894   interface(REG_INTER);
4895 %}
4896 
4897 operand lr_RegP(iRegP reg)
4898 %{
4899   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
4900   match(reg);
4901   op_cost(0);
4902   format %{ %}
4903   interface(REG_INTER);
4904 %}
4905 
4906 //----------Memory Operands----------------------------------------------------
4907 
4908 operand indirect(iRegP reg)
4909 %{
4910   constraint(ALLOC_IN_RC(ptr_reg));
4911   match(reg);
4912   op_cost(0);
4913   format %{ "[$reg]" %}
4914   interface(MEMORY_INTER) %{
4915     base($reg);
4916     index(0xffffffff);
4917     scale(0x0);
4918     disp(0x0);
4919   %}
4920 %}
4921 
4922 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
4923 %{
4924   constraint(ALLOC_IN_RC(ptr_reg));
4925   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4926   match(AddP reg (LShiftL (ConvI2L ireg) scale));
4927   op_cost(0);
4928   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
4929   interface(MEMORY_INTER) %{
4930     base($reg);
4931     index($ireg);
4932     scale($scale);
4933     disp(0x0);
4934   %}
4935 %}
4936 
4937 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
4938 %{
4939   constraint(ALLOC_IN_RC(ptr_reg));
4940   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4941   match(AddP reg (LShiftL lreg scale));
4942   op_cost(0);
4943   format %{ "$reg, $lreg lsl($scale)" %}
4944   interface(MEMORY_INTER) %{
4945     base($reg);
4946     index($lreg);
4947     scale($scale);
4948     disp(0x0);
4949   %}
4950 %}
4951 
4952 operand indIndexI2L(iRegP reg, iRegI ireg)
4953 %{
4954   constraint(ALLOC_IN_RC(ptr_reg));
4955   match(AddP reg (ConvI2L ireg));
4956   op_cost(0);
4957   format %{ "$reg, $ireg, 0, I2L" %}
4958   interface(MEMORY_INTER) %{
4959     base($reg);
4960     index($ireg);
4961     scale(0x0);
4962     disp(0x0);
4963   %}
4964 %}
4965 
4966 operand indIndex(iRegP reg, iRegL lreg)
4967 %{
4968   constraint(ALLOC_IN_RC(ptr_reg));
4969   match(AddP reg lreg);
4970   op_cost(0);
4971   format %{ "$reg, $lreg" %}
4972   interface(MEMORY_INTER) %{
4973     base($reg);
4974     index($lreg);
4975     scale(0x0);
4976     disp(0x0);
4977   %}
4978 %}
4979 
4980 operand indOffI(iRegP reg, immIOffset off)
4981 %{
4982   constraint(ALLOC_IN_RC(ptr_reg));
4983   match(AddP reg off);
4984   op_cost(0);
4985   format %{ "[$reg, $off]" %}
4986   interface(MEMORY_INTER) %{
4987     base($reg);
4988     index(0xffffffff);
4989     scale(0x0);
4990     disp($off);
4991   %}
4992 %}
4993 
4994 operand indOffI4(iRegP reg, immIOffset4 off)
4995 %{
4996   constraint(ALLOC_IN_RC(ptr_reg));
4997   match(AddP reg off);
4998   op_cost(0);
4999   format %{ "[$reg, $off]" %}
5000   interface(MEMORY_INTER) %{
5001     base($reg);
5002     index(0xffffffff);
5003     scale(0x0);
5004     disp($off);
5005   %}
5006 %}
5007 
5008 operand indOffI8(iRegP reg, immIOffset8 off)
5009 %{
5010   constraint(ALLOC_IN_RC(ptr_reg));
5011   match(AddP reg off);
5012   op_cost(0);
5013   format %{ "[$reg, $off]" %}
5014   interface(MEMORY_INTER) %{
5015     base($reg);
5016     index(0xffffffff);
5017     scale(0x0);
5018     disp($off);
5019   %}
5020 %}
5021 
5022 operand indOffI16(iRegP reg, immIOffset16 off)
5023 %{
5024   constraint(ALLOC_IN_RC(ptr_reg));
5025   match(AddP reg off);
5026   op_cost(0);
5027   format %{ "[$reg, $off]" %}
5028   interface(MEMORY_INTER) %{
5029     base($reg);
5030     index(0xffffffff);
5031     scale(0x0);
5032     disp($off);
5033   %}
5034 %}
5035 
5036 operand indOffL(iRegP reg, immLoffset off)
5037 %{
5038   constraint(ALLOC_IN_RC(ptr_reg));
5039   match(AddP reg off);
5040   op_cost(0);
5041   format %{ "[$reg, $off]" %}
5042   interface(MEMORY_INTER) %{
5043     base($reg);
5044     index(0xffffffff);
5045     scale(0x0);
5046     disp($off);
5047   %}
5048 %}
5049 
5050 operand indOffL4(iRegP reg, immLoffset4 off)
5051 %{
5052   constraint(ALLOC_IN_RC(ptr_reg));
5053   match(AddP reg off);
5054   op_cost(0);
5055   format %{ "[$reg, $off]" %}
5056   interface(MEMORY_INTER) %{
5057     base($reg);
5058     index(0xffffffff);
5059     scale(0x0);
5060     disp($off);
5061   %}
5062 %}
5063 
5064 operand indOffL8(iRegP reg, immLoffset8 off)
5065 %{
5066   constraint(ALLOC_IN_RC(ptr_reg));
5067   match(AddP reg off);
5068   op_cost(0);
5069   format %{ "[$reg, $off]" %}
5070   interface(MEMORY_INTER) %{
5071     base($reg);
5072     index(0xffffffff);
5073     scale(0x0);
5074     disp($off);
5075   %}
5076 %}
5077 
5078 operand indOffL16(iRegP reg, immLoffset16 off)
5079 %{
5080   constraint(ALLOC_IN_RC(ptr_reg));
5081   match(AddP reg off);
5082   op_cost(0);
5083   format %{ "[$reg, $off]" %}
5084   interface(MEMORY_INTER) %{
5085     base($reg);
5086     index(0xffffffff);
5087     scale(0x0);
5088     disp($off);
5089   %}
5090 %}
5091 
5092 operand indirectN(iRegN reg)
5093 %{
5094   predicate(Universe::narrow_oop_shift() == 0);
5095   constraint(ALLOC_IN_RC(ptr_reg));
5096   match(DecodeN reg);
5097   op_cost(0);
5098   format %{ "[$reg]\t# narrow" %}
5099   interface(MEMORY_INTER) %{
5100     base($reg);
5101     index(0xffffffff);
5102     scale(0x0);
5103     disp(0x0);
5104   %}
5105 %}
5106 
5107 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5108 %{
5109   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5110   constraint(ALLOC_IN_RC(ptr_reg));
5111   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5112   op_cost(0);
5113   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5114   interface(MEMORY_INTER) %{
5115     base($reg);
5116     index($ireg);
5117     scale($scale);
5118     disp(0x0);
5119   %}
5120 %}
5121 
5122 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5123 %{
5124   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5125   constraint(ALLOC_IN_RC(ptr_reg));
5126   match(AddP (DecodeN reg) (LShiftL lreg scale));
5127   op_cost(0);
5128   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5129   interface(MEMORY_INTER) %{
5130     base($reg);
5131     index($lreg);
5132     scale($scale);
5133     disp(0x0);
5134   %}
5135 %}
5136 
5137 operand indIndexI2LN(iRegN reg, iRegI ireg)
5138 %{
5139   predicate(Universe::narrow_oop_shift() == 0);
5140   constraint(ALLOC_IN_RC(ptr_reg));
5141   match(AddP (DecodeN reg) (ConvI2L ireg));
5142   op_cost(0);
5143   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5144   interface(MEMORY_INTER) %{
5145     base($reg);
5146     index($ireg);
5147     scale(0x0);
5148     disp(0x0);
5149   %}
5150 %}
5151 
5152 operand indIndexN(iRegN reg, iRegL lreg)
5153 %{
5154   predicate(Universe::narrow_oop_shift() == 0);
5155   constraint(ALLOC_IN_RC(ptr_reg));
5156   match(AddP (DecodeN reg) lreg);
5157   op_cost(0);
5158   format %{ "$reg, $lreg\t# narrow" %}
5159   interface(MEMORY_INTER) %{
5160     base($reg);
5161     index($lreg);
5162     scale(0x0);
5163     disp(0x0);
5164   %}
5165 %}
5166 
5167 operand indOffIN(iRegN reg, immIOffset off)
5168 %{
5169   predicate(Universe::narrow_oop_shift() == 0);
5170   constraint(ALLOC_IN_RC(ptr_reg));
5171   match(AddP (DecodeN reg) off);
5172   op_cost(0);
5173   format %{ "[$reg, $off]\t# narrow" %}
5174   interface(MEMORY_INTER) %{
5175     base($reg);
5176     index(0xffffffff);
5177     scale(0x0);
5178     disp($off);
5179   %}
5180 %}
5181 
5182 operand indOffLN(iRegN reg, immLoffset off)
5183 %{
5184   predicate(Universe::narrow_oop_shift() == 0);
5185   constraint(ALLOC_IN_RC(ptr_reg));
5186   match(AddP (DecodeN reg) off);
5187   op_cost(0);
5188   format %{ "[$reg, $off]\t# narrow" %}
5189   interface(MEMORY_INTER) %{
5190     base($reg);
5191     index(0xffffffff);
5192     scale(0x0);
5193     disp($off);
5194   %}
5195 %}
5196 
5197 
5198 
5199 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5200 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5201 %{
5202   constraint(ALLOC_IN_RC(ptr_reg));
5203   match(AddP reg off);
5204   op_cost(0);
5205   format %{ "[$reg, $off]" %}
5206   interface(MEMORY_INTER) %{
5207     base($reg);
5208     index(0xffffffff);
5209     scale(0x0);
5210     disp($off);
5211   %}
5212 %}
5213 
5214 //----------Special Memory Operands--------------------------------------------
5215 // Stack Slot Operand - This operand is used for loading and storing temporary
5216 //                      values on the stack where a match requires a value to
5217 //                      flow through memory.
5218 operand stackSlotP(sRegP reg)
5219 %{
5220   constraint(ALLOC_IN_RC(stack_slots));
5221   op_cost(100);
5222   // No match rule because this operand is only generated in matching
5223   // match(RegP);
5224   format %{ "[$reg]" %}
5225   interface(MEMORY_INTER) %{
5226     base(0x1e);  // RSP
5227     index(0x0);  // No Index
5228     scale(0x0);  // No Scale
5229     disp($reg);  // Stack Offset
5230   %}
5231 %}
5232 
5233 operand stackSlotI(sRegI reg)
5234 %{
5235   constraint(ALLOC_IN_RC(stack_slots));
5236   // No match rule because this operand is only generated in matching
5237   // match(RegI);
5238   format %{ "[$reg]" %}
5239   interface(MEMORY_INTER) %{
5240     base(0x1e);  // RSP
5241     index(0x0);  // No Index
5242     scale(0x0);  // No Scale
5243     disp($reg);  // Stack Offset
5244   %}
5245 %}
5246 
5247 operand stackSlotF(sRegF reg)
5248 %{
5249   constraint(ALLOC_IN_RC(stack_slots));
5250   // No match rule because this operand is only generated in matching
5251   // match(RegF);
5252   format %{ "[$reg]" %}
5253   interface(MEMORY_INTER) %{
5254     base(0x1e);  // RSP
5255     index(0x0);  // No Index
5256     scale(0x0);  // No Scale
5257     disp($reg);  // Stack Offset
5258   %}
5259 %}
5260 
5261 operand stackSlotD(sRegD reg)
5262 %{
5263   constraint(ALLOC_IN_RC(stack_slots));
5264   // No match rule because this operand is only generated in matching
5265   // match(RegD);
5266   format %{ "[$reg]" %}
5267   interface(MEMORY_INTER) %{
5268     base(0x1e);  // RSP
5269     index(0x0);  // No Index
5270     scale(0x0);  // No Scale
5271     disp($reg);  // Stack Offset
5272   %}
5273 %}
5274 
5275 operand stackSlotL(sRegL reg)
5276 %{
5277   constraint(ALLOC_IN_RC(stack_slots));
5278   // No match rule because this operand is only generated in matching
5279   // match(RegL);
5280   format %{ "[$reg]" %}
5281   interface(MEMORY_INTER) %{
5282     base(0x1e);  // RSP
5283     index(0x0);  // No Index
5284     scale(0x0);  // No Scale
5285     disp($reg);  // Stack Offset
5286   %}
5287 %}
5288 
5289 // Operands for expressing Control Flow
5290 // NOTE: Label is a predefined operand which should not be redefined in
5291 //       the AD file. It is generically handled within the ADLC.
5292 
5293 //----------Conditional Branch Operands----------------------------------------
5294 // Comparison Op  - This is the operation of the comparison, and is limited to
5295 //                  the following set of codes:
5296 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5297 //
5298 // Other attributes of the comparison, such as unsignedness, are specified
5299 // by the comparison instruction that sets a condition code flags register.
5300 // That result is represented by a flags operand whose subtype is appropriate
5301 // to the unsignedness (etc.) of the comparison.
5302 //
5303 // Later, the instruction which matches both the Comparison Op (a Bool) and
5304 // the flags (produced by the Cmp) specifies the coding of the comparison op
5305 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5306 
5307 // used for signed integral comparisons and fp comparisons
5308 
5309 operand cmpOp()
5310 %{
5311   match(Bool);
5312 
5313   format %{ "" %}
5314   interface(COND_INTER) %{
5315     equal(0x0, "eq");
5316     not_equal(0x1, "ne");
5317     less(0xb, "lt");
5318     greater_equal(0xa, "ge");
5319     less_equal(0xd, "le");
5320     greater(0xc, "gt");
5321     overflow(0x6, "vs");
5322     no_overflow(0x7, "vc");
5323   %}
5324 %}
5325 
5326 // used for unsigned integral comparisons
5327 
5328 operand cmpOpU()
5329 %{
5330   match(Bool);
5331 
5332   format %{ "" %}
5333   interface(COND_INTER) %{
5334     equal(0x0, "eq");
5335     not_equal(0x1, "ne");
5336     less(0x3, "lo");
5337     greater_equal(0x2, "hs");
5338     less_equal(0x9, "ls");
5339     greater(0x8, "hi");
5340     overflow(0x6, "vs");
5341     no_overflow(0x7, "vc");
5342   %}
5343 %}
5344 
5345 // used for certain integral comparisons which can be
5346 // converted to cbxx or tbxx instructions
5347 
5348 operand cmpOpEqNe()
5349 %{
5350   match(Bool);
5351   match(CmpOp);
5352   op_cost(0);
5353   predicate(n->as_Bool()->_test._test == BoolTest::ne
5354             || n->as_Bool()->_test._test == BoolTest::eq);
5355 
5356   format %{ "" %}
5357   interface(COND_INTER) %{
5358     equal(0x0, "eq");
5359     not_equal(0x1, "ne");
5360     less(0xb, "lt");
5361     greater_equal(0xa, "ge");
5362     less_equal(0xd, "le");
5363     greater(0xc, "gt");
5364     overflow(0x6, "vs");
5365     no_overflow(0x7, "vc");
5366   %}
5367 %}
5368 
5369 // used for certain integral comparisons which can be
5370 // converted to cbxx or tbxx instructions
5371 
5372 operand cmpOpLtGe()
5373 %{
5374   match(Bool);
5375   match(CmpOp);
5376   op_cost(0);
5377 
5378   predicate(n->as_Bool()->_test._test == BoolTest::lt
5379             || n->as_Bool()->_test._test == BoolTest::ge);
5380 
5381   format %{ "" %}
5382   interface(COND_INTER) %{
5383     equal(0x0, "eq");
5384     not_equal(0x1, "ne");
5385     less(0xb, "lt");
5386     greater_equal(0xa, "ge");
5387     less_equal(0xd, "le");
5388     greater(0xc, "gt");
5389     overflow(0x6, "vs");
5390     no_overflow(0x7, "vc");
5391   %}
5392 %}
5393 
5394 // used for certain unsigned integral comparisons which can be
5395 // converted to cbxx or tbxx instructions
5396 
5397 operand cmpOpUEqNeLtGe()
5398 %{
5399   match(Bool);
5400   match(CmpOp);
5401   op_cost(0);
5402 
5403   predicate(n->as_Bool()->_test._test == BoolTest::eq
5404             || n->as_Bool()->_test._test == BoolTest::ne
5405             || n->as_Bool()->_test._test == BoolTest::lt
5406             || n->as_Bool()->_test._test == BoolTest::ge);
5407 
5408   format %{ "" %}
5409   interface(COND_INTER) %{
5410     equal(0x0, "eq");
5411     not_equal(0x1, "ne");
5412     less(0xb, "lt");
5413     greater_equal(0xa, "ge");
5414     less_equal(0xd, "le");
5415     greater(0xc, "gt");
5416     overflow(0x6, "vs");
5417     no_overflow(0x7, "vc");
5418   %}
5419 %}
5420 
5421 // Special operand allowing long args to int ops to be truncated for free
5422 
5423 operand iRegL2I(iRegL reg) %{
5424 
5425   op_cost(0);
5426 
5427   match(ConvL2I reg);
5428 
5429   format %{ "l2i($reg)" %}
5430 
5431   interface(REG_INTER)
5432 %}
5433 
5434 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5435 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5436 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5437 
5438 //----------OPERAND CLASSES----------------------------------------------------
5439 // Operand Classes are groups of operands that are used as to simplify
5440 // instruction definitions by not requiring the AD writer to specify
5441 // separate instructions for every form of operand when the
5442 // instruction accepts multiple operand types with the same basic
5443 // encoding and format. The classic case of this is memory operands.
5444 
5445 // memory is used to define read/write location for load/store
5446 // instruction defs. we can turn a memory op into an Address
5447 
5448 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5449                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5450 
5451 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5452 // operations. it allows the src to be either an iRegI or a (ConvL2I
5453 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5454 // can be elided because the 32-bit instruction will just employ the
5455 // lower 32 bits anyway.
5456 //
5457 // n.b. this does not elide all L2I conversions. if the truncated
5458 // value is consumed by more than one operation then the ConvL2I
5459 // cannot be bundled into the consuming nodes so an l2i gets planted
5460 // (actually a movw $dst $src) and the downstream instructions consume
5461 // the result of the l2i as an iRegI input. That's a shame since the
5462 // movw is actually redundant but its not too costly.
5463 
5464 opclass iRegIorL2I(iRegI, iRegL2I);
5465 
5466 //----------PIPELINE-----------------------------------------------------------
5467 // Rules which define the behavior of the target architectures pipeline.
5468 
5469 // For specific pipelines, eg A53, define the stages of that pipeline
5470 //pipe_desc(ISS, EX1, EX2, WR);
5471 #define ISS S0
5472 #define EX1 S1
5473 #define EX2 S2
5474 #define WR  S3
5475 
5476 // Integer ALU reg operation
5477 pipeline %{
5478 
5479 attributes %{
5480   // ARM instructions are of fixed length
5481   fixed_size_instructions;        // Fixed size instructions TODO does
5482   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5483   // ARM instructions come in 32-bit word units
5484   instruction_unit_size = 4;         // An instruction is 4 bytes long
5485   instruction_fetch_unit_size = 64;  // The processor fetches one line
5486   instruction_fetch_units = 1;       // of 64 bytes
5487 
5488   // List of nop instructions
5489   nops( MachNop );
5490 %}
5491 
5492 // We don't use an actual pipeline model so don't care about resources
5493 // or description. we do use pipeline classes to introduce fixed
5494 // latencies
5495 
5496 //----------RESOURCES----------------------------------------------------------
5497 // Resources are the functional units available to the machine
5498 
5499 resources( INS0, INS1, INS01 = INS0 | INS1,
5500            ALU0, ALU1, ALU = ALU0 | ALU1,
5501            MAC,
5502            DIV,
5503            BRANCH,
5504            LDST,
5505            NEON_FP);
5506 
5507 //----------PIPELINE DESCRIPTION-----------------------------------------------
5508 // Pipeline Description specifies the stages in the machine's pipeline
5509 
5510 // Define the pipeline as a generic 6 stage pipeline
5511 pipe_desc(S0, S1, S2, S3, S4, S5);
5512 
5513 //----------PIPELINE CLASSES---------------------------------------------------
5514 // Pipeline Classes describe the stages in which input and output are
5515 // referenced by the hardware pipeline.
5516 
5517 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5518 %{
5519   single_instruction;
5520   src1   : S1(read);
5521   src2   : S2(read);
5522   dst    : S5(write);
5523   INS01  : ISS;
5524   NEON_FP : S5;
5525 %}
5526 
5527 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5528 %{
5529   single_instruction;
5530   src1   : S1(read);
5531   src2   : S2(read);
5532   dst    : S5(write);
5533   INS01  : ISS;
5534   NEON_FP : S5;
5535 %}
5536 
5537 pipe_class fp_uop_s(vRegF dst, vRegF src)
5538 %{
5539   single_instruction;
5540   src    : S1(read);
5541   dst    : S5(write);
5542   INS01  : ISS;
5543   NEON_FP : S5;
5544 %}
5545 
5546 pipe_class fp_uop_d(vRegD dst, vRegD src)
5547 %{
5548   single_instruction;
5549   src    : S1(read);
5550   dst    : S5(write);
5551   INS01  : ISS;
5552   NEON_FP : S5;
5553 %}
5554 
5555 pipe_class fp_d2f(vRegF dst, vRegD src)
5556 %{
5557   single_instruction;
5558   src    : S1(read);
5559   dst    : S5(write);
5560   INS01  : ISS;
5561   NEON_FP : S5;
5562 %}
5563 
5564 pipe_class fp_f2d(vRegD dst, vRegF src)
5565 %{
5566   single_instruction;
5567   src    : S1(read);
5568   dst    : S5(write);
5569   INS01  : ISS;
5570   NEON_FP : S5;
5571 %}
5572 
5573 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5574 %{
5575   single_instruction;
5576   src    : S1(read);
5577   dst    : S5(write);
5578   INS01  : ISS;
5579   NEON_FP : S5;
5580 %}
5581 
5582 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5583 %{
5584   single_instruction;
5585   src    : S1(read);
5586   dst    : S5(write);
5587   INS01  : ISS;
5588   NEON_FP : S5;
5589 %}
5590 
5591 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5592 %{
5593   single_instruction;
5594   src    : S1(read);
5595   dst    : S5(write);
5596   INS01  : ISS;
5597   NEON_FP : S5;
5598 %}
5599 
5600 pipe_class fp_l2f(vRegF dst, iRegL src)
5601 %{
5602   single_instruction;
5603   src    : S1(read);
5604   dst    : S5(write);
5605   INS01  : ISS;
5606   NEON_FP : S5;
5607 %}
5608 
5609 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5610 %{
5611   single_instruction;
5612   src    : S1(read);
5613   dst    : S5(write);
5614   INS01  : ISS;
5615   NEON_FP : S5;
5616 %}
5617 
5618 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5619 %{
5620   single_instruction;
5621   src    : S1(read);
5622   dst    : S5(write);
5623   INS01  : ISS;
5624   NEON_FP : S5;
5625 %}
5626 
5627 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5628 %{
5629   single_instruction;
5630   src    : S1(read);
5631   dst    : S5(write);
5632   INS01  : ISS;
5633   NEON_FP : S5;
5634 %}
5635 
5636 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5637 %{
5638   single_instruction;
5639   src    : S1(read);
5640   dst    : S5(write);
5641   INS01  : ISS;
5642   NEON_FP : S5;
5643 %}
5644 
5645 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5646 %{
5647   single_instruction;
5648   src1   : S1(read);
5649   src2   : S2(read);
5650   dst    : S5(write);
5651   INS0   : ISS;
5652   NEON_FP : S5;
5653 %}
5654 
5655 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5656 %{
5657   single_instruction;
5658   src1   : S1(read);
5659   src2   : S2(read);
5660   dst    : S5(write);
5661   INS0   : ISS;
5662   NEON_FP : S5;
5663 %}
5664 
5665 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5666 %{
5667   single_instruction;
5668   cr     : S1(read);
5669   src1   : S1(read);
5670   src2   : S1(read);
5671   dst    : S3(write);
5672   INS01  : ISS;
5673   NEON_FP : S3;
5674 %}
5675 
5676 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5677 %{
5678   single_instruction;
5679   cr     : S1(read);
5680   src1   : S1(read);
5681   src2   : S1(read);
5682   dst    : S3(write);
5683   INS01  : ISS;
5684   NEON_FP : S3;
5685 %}
5686 
5687 pipe_class fp_imm_s(vRegF dst)
5688 %{
5689   single_instruction;
5690   dst    : S3(write);
5691   INS01  : ISS;
5692   NEON_FP : S3;
5693 %}
5694 
5695 pipe_class fp_imm_d(vRegD dst)
5696 %{
5697   single_instruction;
5698   dst    : S3(write);
5699   INS01  : ISS;
5700   NEON_FP : S3;
5701 %}
5702 
5703 pipe_class fp_load_constant_s(vRegF dst)
5704 %{
5705   single_instruction;
5706   dst    : S4(write);
5707   INS01  : ISS;
5708   NEON_FP : S4;
5709 %}
5710 
5711 pipe_class fp_load_constant_d(vRegD dst)
5712 %{
5713   single_instruction;
5714   dst    : S4(write);
5715   INS01  : ISS;
5716   NEON_FP : S4;
5717 %}
5718 
5719 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
5720 %{
5721   single_instruction;
5722   dst    : S5(write);
5723   src1   : S1(read);
5724   src2   : S1(read);
5725   INS01  : ISS;
5726   NEON_FP : S5;
5727 %}
5728 
5729 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
5730 %{
5731   single_instruction;
5732   dst    : S5(write);
5733   src1   : S1(read);
5734   src2   : S1(read);
5735   INS0   : ISS;
5736   NEON_FP : S5;
5737 %}
5738 
5739 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
5740 %{
5741   single_instruction;
5742   dst    : S5(write);
5743   src1   : S1(read);
5744   src2   : S1(read);
5745   dst    : S1(read);
5746   INS01  : ISS;
5747   NEON_FP : S5;
5748 %}
5749 
5750 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
5751 %{
5752   single_instruction;
5753   dst    : S5(write);
5754   src1   : S1(read);
5755   src2   : S1(read);
5756   dst    : S1(read);
5757   INS0   : ISS;
5758   NEON_FP : S5;
5759 %}
5760 
5761 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
5762 %{
5763   single_instruction;
5764   dst    : S4(write);
5765   src1   : S2(read);
5766   src2   : S2(read);
5767   INS01  : ISS;
5768   NEON_FP : S4;
5769 %}
5770 
5771 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
5772 %{
5773   single_instruction;
5774   dst    : S4(write);
5775   src1   : S2(read);
5776   src2   : S2(read);
5777   INS0   : ISS;
5778   NEON_FP : S4;
5779 %}
5780 
5781 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
5782 %{
5783   single_instruction;
5784   dst    : S3(write);
5785   src1   : S2(read);
5786   src2   : S2(read);
5787   INS01  : ISS;
5788   NEON_FP : S3;
5789 %}
5790 
5791 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
5792 %{
5793   single_instruction;
5794   dst    : S3(write);
5795   src1   : S2(read);
5796   src2   : S2(read);
5797   INS0   : ISS;
5798   NEON_FP : S3;
5799 %}
5800 
5801 pipe_class vshift64(vecD dst, vecD src, vecX shift)
5802 %{
5803   single_instruction;
5804   dst    : S3(write);
5805   src    : S1(read);
5806   shift  : S1(read);
5807   INS01  : ISS;
5808   NEON_FP : S3;
5809 %}
5810 
5811 pipe_class vshift128(vecX dst, vecX src, vecX shift)
5812 %{
5813   single_instruction;
5814   dst    : S3(write);
5815   src    : S1(read);
5816   shift  : S1(read);
5817   INS0   : ISS;
5818   NEON_FP : S3;
5819 %}
5820 
5821 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
5822 %{
5823   single_instruction;
5824   dst    : S3(write);
5825   src    : S1(read);
5826   INS01  : ISS;
5827   NEON_FP : S3;
5828 %}
5829 
5830 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
5831 %{
5832   single_instruction;
5833   dst    : S3(write);
5834   src    : S1(read);
5835   INS0   : ISS;
5836   NEON_FP : S3;
5837 %}
5838 
5839 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
5840 %{
5841   single_instruction;
5842   dst    : S5(write);
5843   src1   : S1(read);
5844   src2   : S1(read);
5845   INS01  : ISS;
5846   NEON_FP : S5;
5847 %}
5848 
5849 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
5850 %{
5851   single_instruction;
5852   dst    : S5(write);
5853   src1   : S1(read);
5854   src2   : S1(read);
5855   INS0   : ISS;
5856   NEON_FP : S5;
5857 %}
5858 
5859 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
5860 %{
5861   single_instruction;
5862   dst    : S5(write);
5863   src1   : S1(read);
5864   src2   : S1(read);
5865   INS0   : ISS;
5866   NEON_FP : S5;
5867 %}
5868 
5869 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
5870 %{
5871   single_instruction;
5872   dst    : S5(write);
5873   src1   : S1(read);
5874   src2   : S1(read);
5875   INS0   : ISS;
5876   NEON_FP : S5;
5877 %}
5878 
5879 pipe_class vsqrt_fp128(vecX dst, vecX src)
5880 %{
5881   single_instruction;
5882   dst    : S5(write);
5883   src    : S1(read);
5884   INS0   : ISS;
5885   NEON_FP : S5;
5886 %}
5887 
5888 pipe_class vunop_fp64(vecD dst, vecD src)
5889 %{
5890   single_instruction;
5891   dst    : S5(write);
5892   src    : S1(read);
5893   INS01  : ISS;
5894   NEON_FP : S5;
5895 %}
5896 
5897 pipe_class vunop_fp128(vecX dst, vecX src)
5898 %{
5899   single_instruction;
5900   dst    : S5(write);
5901   src    : S1(read);
5902   INS0   : ISS;
5903   NEON_FP : S5;
5904 %}
5905 
5906 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
5907 %{
5908   single_instruction;
5909   dst    : S3(write);
5910   src    : S1(read);
5911   INS01  : ISS;
5912   NEON_FP : S3;
5913 %}
5914 
5915 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
5916 %{
5917   single_instruction;
5918   dst    : S3(write);
5919   src    : S1(read);
5920   INS01  : ISS;
5921   NEON_FP : S3;
5922 %}
5923 
5924 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
5925 %{
5926   single_instruction;
5927   dst    : S3(write);
5928   src    : S1(read);
5929   INS01  : ISS;
5930   NEON_FP : S3;
5931 %}
5932 
5933 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
5934 %{
5935   single_instruction;
5936   dst    : S3(write);
5937   src    : S1(read);
5938   INS01  : ISS;
5939   NEON_FP : S3;
5940 %}
5941 
5942 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
5943 %{
5944   single_instruction;
5945   dst    : S3(write);
5946   src    : S1(read);
5947   INS01  : ISS;
5948   NEON_FP : S3;
5949 %}
5950 
5951 pipe_class vmovi_reg_imm64(vecD dst)
5952 %{
5953   single_instruction;
5954   dst    : S3(write);
5955   INS01  : ISS;
5956   NEON_FP : S3;
5957 %}
5958 
5959 pipe_class vmovi_reg_imm128(vecX dst)
5960 %{
5961   single_instruction;
5962   dst    : S3(write);
5963   INS0   : ISS;
5964   NEON_FP : S3;
5965 %}
5966 
5967 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
5968 %{
5969   single_instruction;
5970   dst    : S5(write);
5971   mem    : ISS(read);
5972   INS01  : ISS;
5973   NEON_FP : S3;
5974 %}
5975 
5976 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
5977 %{
5978   single_instruction;
5979   dst    : S5(write);
5980   mem    : ISS(read);
5981   INS01  : ISS;
5982   NEON_FP : S3;
5983 %}
5984 
5985 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
5986 %{
5987   single_instruction;
5988   mem    : ISS(read);
5989   src    : S2(read);
5990   INS01  : ISS;
5991   NEON_FP : S3;
5992 %}
5993 
5994 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
5995 %{
5996   single_instruction;
5997   mem    : ISS(read);
5998   src    : S2(read);
5999   INS01  : ISS;
6000   NEON_FP : S3;
6001 %}
6002 
6003 //------- Integer ALU operations --------------------------
6004 
6005 // Integer ALU reg-reg operation
6006 // Operands needed in EX1, result generated in EX2
6007 // Eg.  ADD     x0, x1, x2
6008 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6009 %{
6010   single_instruction;
6011   dst    : EX2(write);
6012   src1   : EX1(read);
6013   src2   : EX1(read);
6014   INS01  : ISS; // Dual issue as instruction 0 or 1
6015   ALU    : EX2;
6016 %}
6017 
6018 // Integer ALU reg-reg operation with constant shift
6019 // Shifted register must be available in LATE_ISS instead of EX1
6020 // Eg.  ADD     x0, x1, x2, LSL #2
6021 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6022 %{
6023   single_instruction;
6024   dst    : EX2(write);
6025   src1   : EX1(read);
6026   src2   : ISS(read);
6027   INS01  : ISS;
6028   ALU    : EX2;
6029 %}
6030 
6031 // Integer ALU reg operation with constant shift
6032 // Eg.  LSL     x0, x1, #shift
6033 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6034 %{
6035   single_instruction;
6036   dst    : EX2(write);
6037   src1   : ISS(read);
6038   INS01  : ISS;
6039   ALU    : EX2;
6040 %}
6041 
6042 // Integer ALU reg-reg operation with variable shift
6043 // Both operands must be available in LATE_ISS instead of EX1
6044 // Result is available in EX1 instead of EX2
6045 // Eg.  LSLV    x0, x1, x2
6046 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6047 %{
6048   single_instruction;
6049   dst    : EX1(write);
6050   src1   : ISS(read);
6051   src2   : ISS(read);
6052   INS01  : ISS;
6053   ALU    : EX1;
6054 %}
6055 
6056 // Integer ALU reg-reg operation with extract
6057 // As for _vshift above, but result generated in EX2
6058 // Eg.  EXTR    x0, x1, x2, #N
6059 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6060 %{
6061   single_instruction;
6062   dst    : EX2(write);
6063   src1   : ISS(read);
6064   src2   : ISS(read);
6065   INS1   : ISS; // Can only dual issue as Instruction 1
6066   ALU    : EX1;
6067 %}
6068 
6069 // Integer ALU reg operation
6070 // Eg.  NEG     x0, x1
6071 pipe_class ialu_reg(iRegI dst, iRegI src)
6072 %{
6073   single_instruction;
6074   dst    : EX2(write);
6075   src    : EX1(read);
6076   INS01  : ISS;
6077   ALU    : EX2;
6078 %}
6079 
6080 // Integer ALU reg mmediate operation
6081 // Eg.  ADD     x0, x1, #N
6082 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6083 %{
6084   single_instruction;
6085   dst    : EX2(write);
6086   src1   : EX1(read);
6087   INS01  : ISS;
6088   ALU    : EX2;
6089 %}
6090 
6091 // Integer ALU immediate operation (no source operands)
6092 // Eg.  MOV     x0, #N
6093 pipe_class ialu_imm(iRegI dst)
6094 %{
6095   single_instruction;
6096   dst    : EX1(write);
6097   INS01  : ISS;
6098   ALU    : EX1;
6099 %}
6100 
6101 //------- Compare operation -------------------------------
6102 
6103 // Compare reg-reg
6104 // Eg.  CMP     x0, x1
6105 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6106 %{
6107   single_instruction;
6108 //  fixed_latency(16);
6109   cr     : EX2(write);
6110   op1    : EX1(read);
6111   op2    : EX1(read);
6112   INS01  : ISS;
6113   ALU    : EX2;
6114 %}
6115 
6116 // Compare reg-reg
6117 // Eg.  CMP     x0, #N
6118 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6119 %{
6120   single_instruction;
6121 //  fixed_latency(16);
6122   cr     : EX2(write);
6123   op1    : EX1(read);
6124   INS01  : ISS;
6125   ALU    : EX2;
6126 %}
6127 
6128 //------- Conditional instructions ------------------------
6129 
6130 // Conditional no operands
6131 // Eg.  CSINC   x0, zr, zr, <cond>
6132 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6133 %{
6134   single_instruction;
6135   cr     : EX1(read);
6136   dst    : EX2(write);
6137   INS01  : ISS;
6138   ALU    : EX2;
6139 %}
6140 
6141 // Conditional 2 operand
6142 // EG.  CSEL    X0, X1, X2, <cond>
6143 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6144 %{
6145   single_instruction;
6146   cr     : EX1(read);
6147   src1   : EX1(read);
6148   src2   : EX1(read);
6149   dst    : EX2(write);
6150   INS01  : ISS;
6151   ALU    : EX2;
6152 %}
6153 
6154 // Conditional 2 operand
6155 // EG.  CSEL    X0, X1, X2, <cond>
6156 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6157 %{
6158   single_instruction;
6159   cr     : EX1(read);
6160   src    : EX1(read);
6161   dst    : EX2(write);
6162   INS01  : ISS;
6163   ALU    : EX2;
6164 %}
6165 
6166 //------- Multiply pipeline operations --------------------
6167 
6168 // Multiply reg-reg
6169 // Eg.  MUL     w0, w1, w2
6170 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6171 %{
6172   single_instruction;
6173   dst    : WR(write);
6174   src1   : ISS(read);
6175   src2   : ISS(read);
6176   INS01  : ISS;
6177   MAC    : WR;
6178 %}
6179 
6180 // Multiply accumulate
6181 // Eg.  MADD    w0, w1, w2, w3
6182 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6183 %{
6184   single_instruction;
6185   dst    : WR(write);
6186   src1   : ISS(read);
6187   src2   : ISS(read);
6188   src3   : ISS(read);
6189   INS01  : ISS;
6190   MAC    : WR;
6191 %}
6192 
6193 // Eg.  MUL     w0, w1, w2
6194 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6195 %{
6196   single_instruction;
6197   fixed_latency(3); // Maximum latency for 64 bit mul
6198   dst    : WR(write);
6199   src1   : ISS(read);
6200   src2   : ISS(read);
6201   INS01  : ISS;
6202   MAC    : WR;
6203 %}
6204 
6205 // Multiply accumulate
6206 // Eg.  MADD    w0, w1, w2, w3
6207 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6208 %{
6209   single_instruction;
6210   fixed_latency(3); // Maximum latency for 64 bit mul
6211   dst    : WR(write);
6212   src1   : ISS(read);
6213   src2   : ISS(read);
6214   src3   : ISS(read);
6215   INS01  : ISS;
6216   MAC    : WR;
6217 %}
6218 
6219 //------- Divide pipeline operations --------------------
6220 
6221 // Eg.  SDIV    w0, w1, w2
6222 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6223 %{
6224   single_instruction;
6225   fixed_latency(8); // Maximum latency for 32 bit divide
6226   dst    : WR(write);
6227   src1   : ISS(read);
6228   src2   : ISS(read);
6229   INS0   : ISS; // Can only dual issue as instruction 0
6230   DIV    : WR;
6231 %}
6232 
6233 // Eg.  SDIV    x0, x1, x2
6234 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6235 %{
6236   single_instruction;
6237   fixed_latency(16); // Maximum latency for 64 bit divide
6238   dst    : WR(write);
6239   src1   : ISS(read);
6240   src2   : ISS(read);
6241   INS0   : ISS; // Can only dual issue as instruction 0
6242   DIV    : WR;
6243 %}
6244 
6245 //------- Load pipeline operations ------------------------
6246 
6247 // Load - prefetch
6248 // Eg.  PFRM    <mem>
6249 pipe_class iload_prefetch(memory mem)
6250 %{
6251   single_instruction;
6252   mem    : ISS(read);
6253   INS01  : ISS;
6254   LDST   : WR;
6255 %}
6256 
6257 // Load - reg, mem
6258 // Eg.  LDR     x0, <mem>
6259 pipe_class iload_reg_mem(iRegI dst, memory mem)
6260 %{
6261   single_instruction;
6262   dst    : WR(write);
6263   mem    : ISS(read);
6264   INS01  : ISS;
6265   LDST   : WR;
6266 %}
6267 
6268 // Load - reg, reg
6269 // Eg.  LDR     x0, [sp, x1]
6270 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6271 %{
6272   single_instruction;
6273   dst    : WR(write);
6274   src    : ISS(read);
6275   INS01  : ISS;
6276   LDST   : WR;
6277 %}
6278 
6279 //------- Store pipeline operations -----------------------
6280 
6281 // Store - zr, mem
6282 // Eg.  STR     zr, <mem>
6283 pipe_class istore_mem(memory mem)
6284 %{
6285   single_instruction;
6286   mem    : ISS(read);
6287   INS01  : ISS;
6288   LDST   : WR;
6289 %}
6290 
6291 // Store - reg, mem
6292 // Eg.  STR     x0, <mem>
6293 pipe_class istore_reg_mem(iRegI src, memory mem)
6294 %{
6295   single_instruction;
6296   mem    : ISS(read);
6297   src    : EX2(read);
6298   INS01  : ISS;
6299   LDST   : WR;
6300 %}
6301 
6302 // Store - reg, reg
6303 // Eg. STR      x0, [sp, x1]
6304 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6305 %{
6306   single_instruction;
6307   dst    : ISS(read);
6308   src    : EX2(read);
6309   INS01  : ISS;
6310   LDST   : WR;
6311 %}
6312 
6313 //------- Store pipeline operations -----------------------
6314 
6315 // Branch
6316 pipe_class pipe_branch()
6317 %{
6318   single_instruction;
6319   INS01  : ISS;
6320   BRANCH : EX1;
6321 %}
6322 
6323 // Conditional branch
6324 pipe_class pipe_branch_cond(rFlagsReg cr)
6325 %{
6326   single_instruction;
6327   cr     : EX1(read);
6328   INS01  : ISS;
6329   BRANCH : EX1;
6330 %}
6331 
6332 // Compare & Branch
6333 // EG.  CBZ/CBNZ
6334 pipe_class pipe_cmp_branch(iRegI op1)
6335 %{
6336   single_instruction;
6337   op1    : EX1(read);
6338   INS01  : ISS;
6339   BRANCH : EX1;
6340 %}
6341 
6342 //------- Synchronisation operations ----------------------
6343 
6344 // Any operation requiring serialization.
6345 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6346 pipe_class pipe_serial()
6347 %{
6348   single_instruction;
6349   force_serialization;
6350   fixed_latency(16);
6351   INS01  : ISS(2); // Cannot dual issue with any other instruction
6352   LDST   : WR;
6353 %}
6354 
6355 // Generic big/slow expanded idiom - also serialized
6356 pipe_class pipe_slow()
6357 %{
6358   instruction_count(10);
6359   multiple_bundles;
6360   force_serialization;
6361   fixed_latency(16);
6362   INS01  : ISS(2); // Cannot dual issue with any other instruction
6363   LDST   : WR;
6364 %}
6365 
6366 // Empty pipeline class
6367 pipe_class pipe_class_empty()
6368 %{
6369   single_instruction;
6370   fixed_latency(0);
6371 %}
6372 
6373 // Default pipeline class.
6374 pipe_class pipe_class_default()
6375 %{
6376   single_instruction;
6377   fixed_latency(2);
6378 %}
6379 
6380 // Pipeline class for compares.
6381 pipe_class pipe_class_compare()
6382 %{
6383   single_instruction;
6384   fixed_latency(16);
6385 %}
6386 
6387 // Pipeline class for memory operations.
6388 pipe_class pipe_class_memory()
6389 %{
6390   single_instruction;
6391   fixed_latency(16);
6392 %}
6393 
6394 // Pipeline class for call.
6395 pipe_class pipe_class_call()
6396 %{
6397   single_instruction;
6398   fixed_latency(100);
6399 %}
6400 
6401 // Define the class for the Nop node.
6402 define %{
6403    MachNop = pipe_class_empty;
6404 %}
6405 
6406 %}
6407 //----------INSTRUCTIONS-------------------------------------------------------
6408 //
6409 // match      -- States which machine-independent subtree may be replaced
6410 //               by this instruction.
6411 // ins_cost   -- The estimated cost of this instruction is used by instruction
6412 //               selection to identify a minimum cost tree of machine
6413 //               instructions that matches a tree of machine-independent
6414 //               instructions.
6415 // format     -- A string providing the disassembly for this instruction.
6416 //               The value of an instruction's operand may be inserted
6417 //               by referring to it with a '$' prefix.
6418 // opcode     -- Three instruction opcodes may be provided.  These are referred
6419 //               to within an encode class as $primary, $secondary, and $tertiary
6420 //               rrspectively.  The primary opcode is commonly used to
6421 //               indicate the type of machine instruction, while secondary
6422 //               and tertiary are often used for prefix options or addressing
6423 //               modes.
6424 // ins_encode -- A list of encode classes with parameters. The encode class
6425 //               name must have been defined in an 'enc_class' specification
6426 //               in the encode section of the architecture description.
6427 
6428 // ============================================================================
6429 // Memory (Load/Store) Instructions
6430 
6431 // Load Instructions
6432 
6433 // Load Byte (8 bit signed)
6434 instruct loadB(iRegINoSp dst, memory mem)
6435 %{
6436   match(Set dst (LoadB mem));
6437   predicate(!needs_acquiring_load(n));
6438 
6439   ins_cost(4 * INSN_COST);
6440   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6441 
6442   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6443 
6444   ins_pipe(iload_reg_mem);
6445 %}
6446 
6447 // Load Byte (8 bit signed) into long
6448 instruct loadB2L(iRegLNoSp dst, memory mem)
6449 %{
6450   match(Set dst (ConvI2L (LoadB mem)));
6451   predicate(!needs_acquiring_load(n->in(1)));
6452 
6453   ins_cost(4 * INSN_COST);
6454   format %{ "ldrsb  $dst, $mem\t# byte" %}
6455 
6456   ins_encode(aarch64_enc_ldrsb(dst, mem));
6457 
6458   ins_pipe(iload_reg_mem);
6459 %}
6460 
6461 // Load Byte (8 bit unsigned)
6462 instruct loadUB(iRegINoSp dst, memory mem)
6463 %{
6464   match(Set dst (LoadUB mem));
6465   predicate(!needs_acquiring_load(n));
6466 
6467   ins_cost(4 * INSN_COST);
6468   format %{ "ldrbw  $dst, $mem\t# byte" %}
6469 
6470   ins_encode(aarch64_enc_ldrb(dst, mem));
6471 
6472   ins_pipe(iload_reg_mem);
6473 %}
6474 
6475 // Load Byte (8 bit unsigned) into long
6476 instruct loadUB2L(iRegLNoSp dst, memory mem)
6477 %{
6478   match(Set dst (ConvI2L (LoadUB mem)));
6479   predicate(!needs_acquiring_load(n->in(1)));
6480 
6481   ins_cost(4 * INSN_COST);
6482   format %{ "ldrb  $dst, $mem\t# byte" %}
6483 
6484   ins_encode(aarch64_enc_ldrb(dst, mem));
6485 
6486   ins_pipe(iload_reg_mem);
6487 %}
6488 
6489 // Load Short (16 bit signed)
6490 instruct loadS(iRegINoSp dst, memory mem)
6491 %{
6492   match(Set dst (LoadS mem));
6493   predicate(!needs_acquiring_load(n));
6494 
6495   ins_cost(4 * INSN_COST);
6496   format %{ "ldrshw  $dst, $mem\t# short" %}
6497 
6498   ins_encode(aarch64_enc_ldrshw(dst, mem));
6499 
6500   ins_pipe(iload_reg_mem);
6501 %}
6502 
6503 // Load Short (16 bit signed) into long
6504 instruct loadS2L(iRegLNoSp dst, memory mem)
6505 %{
6506   match(Set dst (ConvI2L (LoadS mem)));
6507   predicate(!needs_acquiring_load(n->in(1)));
6508 
6509   ins_cost(4 * INSN_COST);
6510   format %{ "ldrsh  $dst, $mem\t# short" %}
6511 
6512   ins_encode(aarch64_enc_ldrsh(dst, mem));
6513 
6514   ins_pipe(iload_reg_mem);
6515 %}
6516 
6517 // Load Char (16 bit unsigned)
6518 instruct loadUS(iRegINoSp dst, memory mem)
6519 %{
6520   match(Set dst (LoadUS mem));
6521   predicate(!needs_acquiring_load(n));
6522 
6523   ins_cost(4 * INSN_COST);
6524   format %{ "ldrh  $dst, $mem\t# short" %}
6525 
6526   ins_encode(aarch64_enc_ldrh(dst, mem));
6527 
6528   ins_pipe(iload_reg_mem);
6529 %}
6530 
6531 // Load Short/Char (16 bit unsigned) into long
6532 instruct loadUS2L(iRegLNoSp dst, memory mem)
6533 %{
6534   match(Set dst (ConvI2L (LoadUS mem)));
6535   predicate(!needs_acquiring_load(n->in(1)));
6536 
6537   ins_cost(4 * INSN_COST);
6538   format %{ "ldrh  $dst, $mem\t# short" %}
6539 
6540   ins_encode(aarch64_enc_ldrh(dst, mem));
6541 
6542   ins_pipe(iload_reg_mem);
6543 %}
6544 
6545 // Load Integer (32 bit signed)
6546 instruct loadI(iRegINoSp dst, memory mem)
6547 %{
6548   match(Set dst (LoadI mem));
6549   predicate(!needs_acquiring_load(n));
6550 
6551   ins_cost(4 * INSN_COST);
6552   format %{ "ldrw  $dst, $mem\t# int" %}
6553 
6554   ins_encode(aarch64_enc_ldrw(dst, mem));
6555 
6556   ins_pipe(iload_reg_mem);
6557 %}
6558 
6559 // Load Integer (32 bit signed) into long
6560 instruct loadI2L(iRegLNoSp dst, memory mem)
6561 %{
6562   match(Set dst (ConvI2L (LoadI mem)));
6563   predicate(!needs_acquiring_load(n->in(1)));
6564 
6565   ins_cost(4 * INSN_COST);
6566   format %{ "ldrsw  $dst, $mem\t# int" %}
6567 
6568   ins_encode(aarch64_enc_ldrsw(dst, mem));
6569 
6570   ins_pipe(iload_reg_mem);
6571 %}
6572 
6573 // Load Integer (32 bit unsigned) into long
6574 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6575 %{
6576   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6577   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6578 
6579   ins_cost(4 * INSN_COST);
6580   format %{ "ldrw  $dst, $mem\t# int" %}
6581 
6582   ins_encode(aarch64_enc_ldrw(dst, mem));
6583 
6584   ins_pipe(iload_reg_mem);
6585 %}
6586 
6587 // Load Long (64 bit signed)
6588 instruct loadL(iRegLNoSp dst, memory mem)
6589 %{
6590   match(Set dst (LoadL mem));
6591   predicate(!needs_acquiring_load(n));
6592 
6593   ins_cost(4 * INSN_COST);
6594   format %{ "ldr  $dst, $mem\t# int" %}
6595 
6596   ins_encode(aarch64_enc_ldr(dst, mem));
6597 
6598   ins_pipe(iload_reg_mem);
6599 %}
6600 
6601 // Load Range
6602 instruct loadRange(iRegINoSp dst, memory mem)
6603 %{
6604   match(Set dst (LoadRange mem));
6605 
6606   ins_cost(4 * INSN_COST);
6607   format %{ "ldrw  $dst, $mem\t# range" %}
6608 
6609   ins_encode(aarch64_enc_ldrw(dst, mem));
6610 
6611   ins_pipe(iload_reg_mem);
6612 %}
6613 
6614 // Load Pointer
6615 instruct loadP(iRegPNoSp dst, memory mem)
6616 %{
6617   match(Set dst (LoadP mem));
6618   predicate(!needs_acquiring_load(n));
6619 
6620   ins_cost(4 * INSN_COST);
6621   format %{ "ldr  $dst, $mem\t# ptr" %}
6622 
6623   ins_encode(aarch64_enc_ldr(dst, mem));
6624 
6625   ins_pipe(iload_reg_mem);
6626 %}
6627 
6628 // Load Compressed Pointer
6629 instruct loadN(iRegNNoSp dst, memory mem)
6630 %{
6631   match(Set dst (LoadN mem));
6632   predicate(!needs_acquiring_load(n));
6633 
6634   ins_cost(4 * INSN_COST);
6635   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6636 
6637   ins_encode(aarch64_enc_ldrw(dst, mem));
6638 
6639   ins_pipe(iload_reg_mem);
6640 %}
6641 
6642 // Load Klass Pointer
6643 instruct loadKlass(iRegPNoSp dst, memory mem)
6644 %{
6645   match(Set dst (LoadKlass mem));
6646   predicate(!needs_acquiring_load(n));
6647 
6648   ins_cost(4 * INSN_COST);
6649   format %{ "ldr  $dst, $mem\t# class" %}
6650 
6651   ins_encode(aarch64_enc_ldr(dst, mem));
6652 
6653   ins_pipe(iload_reg_mem);
6654 %}
6655 
6656 // Load Narrow Klass Pointer
6657 instruct loadNKlass(iRegNNoSp dst, memory mem)
6658 %{
6659   match(Set dst (LoadNKlass mem));
6660   predicate(!needs_acquiring_load(n));
6661 
6662   ins_cost(4 * INSN_COST);
6663   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6664 
6665   ins_encode(aarch64_enc_ldrw(dst, mem));
6666 
6667   ins_pipe(iload_reg_mem);
6668 %}
6669 
6670 // Load Float
6671 instruct loadF(vRegF dst, memory mem)
6672 %{
6673   match(Set dst (LoadF mem));
6674   predicate(!needs_acquiring_load(n));
6675 
6676   ins_cost(4 * INSN_COST);
6677   format %{ "ldrs  $dst, $mem\t# float" %}
6678 
6679   ins_encode( aarch64_enc_ldrs(dst, mem) );
6680 
6681   ins_pipe(pipe_class_memory);
6682 %}
6683 
6684 // Load Double
6685 instruct loadD(vRegD dst, memory mem)
6686 %{
6687   match(Set dst (LoadD mem));
6688   predicate(!needs_acquiring_load(n));
6689 
6690   ins_cost(4 * INSN_COST);
6691   format %{ "ldrd  $dst, $mem\t# double" %}
6692 
6693   ins_encode( aarch64_enc_ldrd(dst, mem) );
6694 
6695   ins_pipe(pipe_class_memory);
6696 %}
6697 
6698 
6699 // Load Int Constant
6700 instruct loadConI(iRegINoSp dst, immI src)
6701 %{
6702   match(Set dst src);
6703 
6704   ins_cost(INSN_COST);
6705   format %{ "mov $dst, $src\t# int" %}
6706 
6707   ins_encode( aarch64_enc_movw_imm(dst, src) );
6708 
6709   ins_pipe(ialu_imm);
6710 %}
6711 
6712 // Load Long Constant
6713 instruct loadConL(iRegLNoSp dst, immL src)
6714 %{
6715   match(Set dst src);
6716 
6717   ins_cost(INSN_COST);
6718   format %{ "mov $dst, $src\t# long" %}
6719 
6720   ins_encode( aarch64_enc_mov_imm(dst, src) );
6721 
6722   ins_pipe(ialu_imm);
6723 %}
6724 
6725 // Load Pointer Constant
6726 
6727 instruct loadConP(iRegPNoSp dst, immP con)
6728 %{
6729   match(Set dst con);
6730 
6731   ins_cost(INSN_COST * 4);
6732   format %{
6733     "mov  $dst, $con\t# ptr\n\t"
6734   %}
6735 
6736   ins_encode(aarch64_enc_mov_p(dst, con));
6737 
6738   ins_pipe(ialu_imm);
6739 %}
6740 
6741 // Load Null Pointer Constant
6742 
6743 instruct loadConP0(iRegPNoSp dst, immP0 con)
6744 %{
6745   match(Set dst con);
6746 
6747   ins_cost(INSN_COST);
6748   format %{ "mov  $dst, $con\t# NULL ptr" %}
6749 
6750   ins_encode(aarch64_enc_mov_p0(dst, con));
6751 
6752   ins_pipe(ialu_imm);
6753 %}
6754 
6755 // Load Pointer Constant One
6756 
6757 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6758 %{
6759   match(Set dst con);
6760 
6761   ins_cost(INSN_COST);
6762   format %{ "mov  $dst, $con\t# NULL ptr" %}
6763 
6764   ins_encode(aarch64_enc_mov_p1(dst, con));
6765 
6766   ins_pipe(ialu_imm);
6767 %}
6768 
6769 // Load Poll Page Constant
6770 
6771 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6772 %{
6773   match(Set dst con);
6774 
6775   ins_cost(INSN_COST);
6776   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6777 
6778   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6779 
6780   ins_pipe(ialu_imm);
6781 %}
6782 
6783 // Load Byte Map Base Constant
6784 
6785 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6786 %{
6787   match(Set dst con);
6788 
6789   ins_cost(INSN_COST);
6790   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6791 
6792   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6793 
6794   ins_pipe(ialu_imm);
6795 %}
6796 
6797 // Load Narrow Pointer Constant
6798 
6799 instruct loadConN(iRegNNoSp dst, immN con)
6800 %{
6801   match(Set dst con);
6802 
6803   ins_cost(INSN_COST * 4);
6804   format %{ "mov  $dst, $con\t# compressed ptr" %}
6805 
6806   ins_encode(aarch64_enc_mov_n(dst, con));
6807 
6808   ins_pipe(ialu_imm);
6809 %}
6810 
6811 // Load Narrow Null Pointer Constant
6812 
6813 instruct loadConN0(iRegNNoSp dst, immN0 con)
6814 %{
6815   match(Set dst con);
6816 
6817   ins_cost(INSN_COST);
6818   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6819 
6820   ins_encode(aarch64_enc_mov_n0(dst, con));
6821 
6822   ins_pipe(ialu_imm);
6823 %}
6824 
6825 // Load Narrow Klass Constant
6826 
6827 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6828 %{
6829   match(Set dst con);
6830 
6831   ins_cost(INSN_COST);
6832   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6833 
6834   ins_encode(aarch64_enc_mov_nk(dst, con));
6835 
6836   ins_pipe(ialu_imm);
6837 %}
6838 
6839 // Load Packed Float Constant
6840 
6841 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6842   match(Set dst con);
6843   ins_cost(INSN_COST * 4);
6844   format %{ "fmovs  $dst, $con"%}
6845   ins_encode %{
6846     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6847   %}
6848 
6849   ins_pipe(fp_imm_s);
6850 %}
6851 
6852 // Load Float Constant
6853 
6854 instruct loadConF(vRegF dst, immF con) %{
6855   match(Set dst con);
6856 
6857   ins_cost(INSN_COST * 4);
6858 
6859   format %{
6860     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6861   %}
6862 
6863   ins_encode %{
6864     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6865   %}
6866 
6867   ins_pipe(fp_load_constant_s);
6868 %}
6869 
6870 // Load Packed Double Constant
6871 
6872 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6873   match(Set dst con);
6874   ins_cost(INSN_COST);
6875   format %{ "fmovd  $dst, $con"%}
6876   ins_encode %{
6877     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6878   %}
6879 
6880   ins_pipe(fp_imm_d);
6881 %}
6882 
6883 // Load Double Constant
6884 
6885 instruct loadConD(vRegD dst, immD con) %{
6886   match(Set dst con);
6887 
6888   ins_cost(INSN_COST * 5);
6889   format %{
6890     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6891   %}
6892 
6893   ins_encode %{
6894     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6895   %}
6896 
6897   ins_pipe(fp_load_constant_d);
6898 %}
6899 
6900 // Store Instructions
6901 
6902 // Store CMS card-mark Immediate
6903 instruct storeimmCM0(immI0 zero, memory mem)
6904 %{
6905   match(Set mem (StoreCM mem zero));
6906   predicate(unnecessary_storestore(n));
6907 
6908   ins_cost(INSN_COST);
6909   format %{ "storestore (elided)\n\t"
6910             "strb zr, $mem\t# byte" %}
6911 
6912   ins_encode(aarch64_enc_strb0(mem));
6913 
6914   ins_pipe(istore_mem);
6915 %}
6916 
6917 // Store CMS card-mark Immediate with intervening StoreStore
6918 // needed when using CMS with no conditional card marking
6919 instruct storeimmCM0_ordered(immI0 zero, memory mem)
6920 %{
6921   match(Set mem (StoreCM mem zero));
6922 
6923   ins_cost(INSN_COST * 2);
6924   format %{ "storestore\n\t"
6925             "dmb ishst"
6926             "\n\tstrb zr, $mem\t# byte" %}
6927 
6928   ins_encode(aarch64_enc_strb0_ordered(mem));
6929 
6930   ins_pipe(istore_mem);
6931 %}
6932 
6933 // Store Byte
6934 instruct storeB(iRegIorL2I src, memory mem)
6935 %{
6936   match(Set mem (StoreB mem src));
6937   predicate(!needs_releasing_store(n));
6938 
6939   ins_cost(INSN_COST);
6940   format %{ "strb  $src, $mem\t# byte" %}
6941 
6942   ins_encode(aarch64_enc_strb(src, mem));
6943 
6944   ins_pipe(istore_reg_mem);
6945 %}
6946 
6947 
6948 instruct storeimmB0(immI0 zero, memory mem)
6949 %{
6950   match(Set mem (StoreB mem zero));
6951   predicate(!needs_releasing_store(n));
6952 
6953   ins_cost(INSN_COST);
6954   format %{ "strb rscractch2, $mem\t# byte" %}
6955 
6956   ins_encode(aarch64_enc_strb0(mem));
6957 
6958   ins_pipe(istore_mem);
6959 %}
6960 
6961 // Store Char/Short
6962 instruct storeC(iRegIorL2I src, memory mem)
6963 %{
6964   match(Set mem (StoreC mem src));
6965   predicate(!needs_releasing_store(n));
6966 
6967   ins_cost(INSN_COST);
6968   format %{ "strh  $src, $mem\t# short" %}
6969 
6970   ins_encode(aarch64_enc_strh(src, mem));
6971 
6972   ins_pipe(istore_reg_mem);
6973 %}
6974 
6975 instruct storeimmC0(immI0 zero, memory mem)
6976 %{
6977   match(Set mem (StoreC mem zero));
6978   predicate(!needs_releasing_store(n));
6979 
6980   ins_cost(INSN_COST);
6981   format %{ "strh  zr, $mem\t# short" %}
6982 
6983   ins_encode(aarch64_enc_strh0(mem));
6984 
6985   ins_pipe(istore_mem);
6986 %}
6987 
6988 // Store Integer
6989 
6990 instruct storeI(iRegIorL2I src, memory mem)
6991 %{
6992   match(Set mem(StoreI mem src));
6993   predicate(!needs_releasing_store(n));
6994 
6995   ins_cost(INSN_COST);
6996   format %{ "strw  $src, $mem\t# int" %}
6997 
6998   ins_encode(aarch64_enc_strw(src, mem));
6999 
7000   ins_pipe(istore_reg_mem);
7001 %}
7002 
7003 instruct storeimmI0(immI0 zero, memory mem)
7004 %{
7005   match(Set mem(StoreI mem zero));
7006   predicate(!needs_releasing_store(n));
7007 
7008   ins_cost(INSN_COST);
7009   format %{ "strw  zr, $mem\t# int" %}
7010 
7011   ins_encode(aarch64_enc_strw0(mem));
7012 
7013   ins_pipe(istore_mem);
7014 %}
7015 
7016 // Store Long (64 bit signed)
7017 instruct storeL(iRegL src, memory mem)
7018 %{
7019   match(Set mem (StoreL mem src));
7020   predicate(!needs_releasing_store(n));
7021 
7022   ins_cost(INSN_COST);
7023   format %{ "str  $src, $mem\t# int" %}
7024 
7025   ins_encode(aarch64_enc_str(src, mem));
7026 
7027   ins_pipe(istore_reg_mem);
7028 %}
7029 
7030 // Store Long (64 bit signed)
7031 instruct storeimmL0(immL0 zero, memory mem)
7032 %{
7033   match(Set mem (StoreL mem zero));
7034   predicate(!needs_releasing_store(n));
7035 
7036   ins_cost(INSN_COST);
7037   format %{ "str  zr, $mem\t# int" %}
7038 
7039   ins_encode(aarch64_enc_str0(mem));
7040 
7041   ins_pipe(istore_mem);
7042 %}
7043 
7044 // Store Pointer
7045 instruct storeP(iRegP src, memory mem)
7046 %{
7047   match(Set mem (StoreP mem src));
7048   predicate(!needs_releasing_store(n));
7049 
7050   ins_cost(INSN_COST);
7051   format %{ "str  $src, $mem\t# ptr" %}
7052 
7053   ins_encode(aarch64_enc_str(src, mem));
7054 
7055   ins_pipe(istore_reg_mem);
7056 %}
7057 
7058 // Store Pointer
7059 instruct storeimmP0(immP0 zero, memory mem)
7060 %{
7061   match(Set mem (StoreP mem zero));
7062   predicate(!needs_releasing_store(n));
7063 
7064   ins_cost(INSN_COST);
7065   format %{ "str zr, $mem\t# ptr" %}
7066 
7067   ins_encode(aarch64_enc_str0(mem));
7068 
7069   ins_pipe(istore_mem);
7070 %}
7071 
7072 // Store Compressed Pointer
7073 instruct storeN(iRegN src, memory mem)
7074 %{
7075   match(Set mem (StoreN mem src));
7076   predicate(!needs_releasing_store(n));
7077 
7078   ins_cost(INSN_COST);
7079   format %{ "strw  $src, $mem\t# compressed ptr" %}
7080 
7081   ins_encode(aarch64_enc_strw(src, mem));
7082 
7083   ins_pipe(istore_reg_mem);
7084 %}
7085 
7086 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7087 %{
7088   match(Set mem (StoreN mem zero));
7089   predicate(Universe::narrow_oop_base() == NULL &&
7090             Universe::narrow_klass_base() == NULL &&
7091             (!needs_releasing_store(n)));
7092 
7093   ins_cost(INSN_COST);
7094   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7095 
7096   ins_encode(aarch64_enc_strw(heapbase, mem));
7097 
7098   ins_pipe(istore_reg_mem);
7099 %}
7100 
7101 // Store Float
7102 instruct storeF(vRegF src, memory mem)
7103 %{
7104   match(Set mem (StoreF mem src));
7105   predicate(!needs_releasing_store(n));
7106 
7107   ins_cost(INSN_COST);
7108   format %{ "strs  $src, $mem\t# float" %}
7109 
7110   ins_encode( aarch64_enc_strs(src, mem) );
7111 
7112   ins_pipe(pipe_class_memory);
7113 %}
7114 
7115 // TODO
7116 // implement storeImmF0 and storeFImmPacked
7117 
7118 // Store Double
7119 instruct storeD(vRegD src, memory mem)
7120 %{
7121   match(Set mem (StoreD mem src));
7122   predicate(!needs_releasing_store(n));
7123 
7124   ins_cost(INSN_COST);
7125   format %{ "strd  $src, $mem\t# double" %}
7126 
7127   ins_encode( aarch64_enc_strd(src, mem) );
7128 
7129   ins_pipe(pipe_class_memory);
7130 %}
7131 
7132 // Store Compressed Klass Pointer
7133 instruct storeNKlass(iRegN src, memory mem)
7134 %{
7135   predicate(!needs_releasing_store(n));
7136   match(Set mem (StoreNKlass mem src));
7137 
7138   ins_cost(INSN_COST);
7139   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7140 
7141   ins_encode(aarch64_enc_strw(src, mem));
7142 
7143   ins_pipe(istore_reg_mem);
7144 %}
7145 
7146 // TODO
7147 // implement storeImmD0 and storeDImmPacked
7148 
7149 // prefetch instructions
7150 // Must be safe to execute with invalid address (cannot fault).
7151 
7152 instruct prefetchalloc( memory mem ) %{
7153   match(PrefetchAllocation mem);
7154 
7155   ins_cost(INSN_COST);
7156   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7157 
7158   ins_encode( aarch64_enc_prefetchw(mem) );
7159 
7160   ins_pipe(iload_prefetch);
7161 %}
7162 
7163 //  ---------------- volatile loads and stores ----------------
7164 
7165 // Load Byte (8 bit signed)
7166 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7167 %{
7168   match(Set dst (LoadB mem));
7169 
7170   ins_cost(VOLATILE_REF_COST);
7171   format %{ "ldarsb  $dst, $mem\t# byte" %}
7172 
7173   ins_encode(aarch64_enc_ldarsb(dst, mem));
7174 
7175   ins_pipe(pipe_serial);
7176 %}
7177 
7178 // Load Byte (8 bit signed) into long
7179 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7180 %{
7181   match(Set dst (ConvI2L (LoadB mem)));
7182 
7183   ins_cost(VOLATILE_REF_COST);
7184   format %{ "ldarsb  $dst, $mem\t# byte" %}
7185 
7186   ins_encode(aarch64_enc_ldarsb(dst, mem));
7187 
7188   ins_pipe(pipe_serial);
7189 %}
7190 
7191 // Load Byte (8 bit unsigned)
7192 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7193 %{
7194   match(Set dst (LoadUB mem));
7195 
7196   ins_cost(VOLATILE_REF_COST);
7197   format %{ "ldarb  $dst, $mem\t# byte" %}
7198 
7199   ins_encode(aarch64_enc_ldarb(dst, mem));
7200 
7201   ins_pipe(pipe_serial);
7202 %}
7203 
7204 // Load Byte (8 bit unsigned) into long
7205 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7206 %{
7207   match(Set dst (ConvI2L (LoadUB mem)));
7208 
7209   ins_cost(VOLATILE_REF_COST);
7210   format %{ "ldarb  $dst, $mem\t# byte" %}
7211 
7212   ins_encode(aarch64_enc_ldarb(dst, mem));
7213 
7214   ins_pipe(pipe_serial);
7215 %}
7216 
7217 // Load Short (16 bit signed)
7218 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7219 %{
7220   match(Set dst (LoadS mem));
7221 
7222   ins_cost(VOLATILE_REF_COST);
7223   format %{ "ldarshw  $dst, $mem\t# short" %}
7224 
7225   ins_encode(aarch64_enc_ldarshw(dst, mem));
7226 
7227   ins_pipe(pipe_serial);
7228 %}
7229 
7230 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7231 %{
7232   match(Set dst (LoadUS mem));
7233 
7234   ins_cost(VOLATILE_REF_COST);
7235   format %{ "ldarhw  $dst, $mem\t# short" %}
7236 
7237   ins_encode(aarch64_enc_ldarhw(dst, mem));
7238 
7239   ins_pipe(pipe_serial);
7240 %}
7241 
7242 // Load Short/Char (16 bit unsigned) into long
7243 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7244 %{
7245   match(Set dst (ConvI2L (LoadUS mem)));
7246 
7247   ins_cost(VOLATILE_REF_COST);
7248   format %{ "ldarh  $dst, $mem\t# short" %}
7249 
7250   ins_encode(aarch64_enc_ldarh(dst, mem));
7251 
7252   ins_pipe(pipe_serial);
7253 %}
7254 
7255 // Load Short/Char (16 bit signed) into long
7256 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7257 %{
7258   match(Set dst (ConvI2L (LoadS mem)));
7259 
7260   ins_cost(VOLATILE_REF_COST);
7261   format %{ "ldarh  $dst, $mem\t# short" %}
7262 
7263   ins_encode(aarch64_enc_ldarsh(dst, mem));
7264 
7265   ins_pipe(pipe_serial);
7266 %}
7267 
7268 // Load Integer (32 bit signed)
7269 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7270 %{
7271   match(Set dst (LoadI mem));
7272 
7273   ins_cost(VOLATILE_REF_COST);
7274   format %{ "ldarw  $dst, $mem\t# int" %}
7275 
7276   ins_encode(aarch64_enc_ldarw(dst, mem));
7277 
7278   ins_pipe(pipe_serial);
7279 %}
7280 
7281 // Load Integer (32 bit unsigned) into long
7282 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7283 %{
7284   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7285 
7286   ins_cost(VOLATILE_REF_COST);
7287   format %{ "ldarw  $dst, $mem\t# int" %}
7288 
7289   ins_encode(aarch64_enc_ldarw(dst, mem));
7290 
7291   ins_pipe(pipe_serial);
7292 %}
7293 
7294 // Load Long (64 bit signed)
7295 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7296 %{
7297   match(Set dst (LoadL mem));
7298 
7299   ins_cost(VOLATILE_REF_COST);
7300   format %{ "ldar  $dst, $mem\t# int" %}
7301 
7302   ins_encode(aarch64_enc_ldar(dst, mem));
7303 
7304   ins_pipe(pipe_serial);
7305 %}
7306 
7307 // Load Pointer
7308 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7309 %{
7310   match(Set dst (LoadP mem));
7311 
7312   ins_cost(VOLATILE_REF_COST);
7313   format %{ "ldar  $dst, $mem\t# ptr" %}
7314 
7315   ins_encode(aarch64_enc_ldar(dst, mem));
7316 
7317   ins_pipe(pipe_serial);
7318 %}
7319 
7320 // Load Compressed Pointer
7321 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7322 %{
7323   match(Set dst (LoadN mem));
7324 
7325   ins_cost(VOLATILE_REF_COST);
7326   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7327 
7328   ins_encode(aarch64_enc_ldarw(dst, mem));
7329 
7330   ins_pipe(pipe_serial);
7331 %}
7332 
7333 // Load Float
7334 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7335 %{
7336   match(Set dst (LoadF mem));
7337 
7338   ins_cost(VOLATILE_REF_COST);
7339   format %{ "ldars  $dst, $mem\t# float" %}
7340 
7341   ins_encode( aarch64_enc_fldars(dst, mem) );
7342 
7343   ins_pipe(pipe_serial);
7344 %}
7345 
7346 // Load Double
7347 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7348 %{
7349   match(Set dst (LoadD mem));
7350 
7351   ins_cost(VOLATILE_REF_COST);
7352   format %{ "ldard  $dst, $mem\t# double" %}
7353 
7354   ins_encode( aarch64_enc_fldard(dst, mem) );
7355 
7356   ins_pipe(pipe_serial);
7357 %}
7358 
7359 // Store Byte
7360 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7361 %{
7362   match(Set mem (StoreB mem src));
7363 
7364   ins_cost(VOLATILE_REF_COST);
7365   format %{ "stlrb  $src, $mem\t# byte" %}
7366 
7367   ins_encode(aarch64_enc_stlrb(src, mem));
7368 
7369   ins_pipe(pipe_class_memory);
7370 %}
7371 
7372 // Store Char/Short
7373 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7374 %{
7375   match(Set mem (StoreC mem src));
7376 
7377   ins_cost(VOLATILE_REF_COST);
7378   format %{ "stlrh  $src, $mem\t# short" %}
7379 
7380   ins_encode(aarch64_enc_stlrh(src, mem));
7381 
7382   ins_pipe(pipe_class_memory);
7383 %}
7384 
7385 // Store Integer
7386 
7387 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7388 %{
7389   match(Set mem(StoreI mem src));
7390 
7391   ins_cost(VOLATILE_REF_COST);
7392   format %{ "stlrw  $src, $mem\t# int" %}
7393 
7394   ins_encode(aarch64_enc_stlrw(src, mem));
7395 
7396   ins_pipe(pipe_class_memory);
7397 %}
7398 
7399 // Store Long (64 bit signed)
7400 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7401 %{
7402   match(Set mem (StoreL mem src));
7403 
7404   ins_cost(VOLATILE_REF_COST);
7405   format %{ "stlr  $src, $mem\t# int" %}
7406 
7407   ins_encode(aarch64_enc_stlr(src, mem));
7408 
7409   ins_pipe(pipe_class_memory);
7410 %}
7411 
7412 // Store Pointer
7413 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7414 %{
7415   match(Set mem (StoreP mem src));
7416 
7417   ins_cost(VOLATILE_REF_COST);
7418   format %{ "stlr  $src, $mem\t# ptr" %}
7419 
7420   ins_encode(aarch64_enc_stlr(src, mem));
7421 
7422   ins_pipe(pipe_class_memory);
7423 %}
7424 
7425 // Store Compressed Pointer
7426 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7427 %{
7428   match(Set mem (StoreN mem src));
7429 
7430   ins_cost(VOLATILE_REF_COST);
7431   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7432 
7433   ins_encode(aarch64_enc_stlrw(src, mem));
7434 
7435   ins_pipe(pipe_class_memory);
7436 %}
7437 
7438 // Store Float
7439 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7440 %{
7441   match(Set mem (StoreF mem src));
7442 
7443   ins_cost(VOLATILE_REF_COST);
7444   format %{ "stlrs  $src, $mem\t# float" %}
7445 
7446   ins_encode( aarch64_enc_fstlrs(src, mem) );
7447 
7448   ins_pipe(pipe_class_memory);
7449 %}
7450 
7451 // TODO
7452 // implement storeImmF0 and storeFImmPacked
7453 
7454 // Store Double
7455 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7456 %{
7457   match(Set mem (StoreD mem src));
7458 
7459   ins_cost(VOLATILE_REF_COST);
7460   format %{ "stlrd  $src, $mem\t# double" %}
7461 
7462   ins_encode( aarch64_enc_fstlrd(src, mem) );
7463 
7464   ins_pipe(pipe_class_memory);
7465 %}
7466 
7467 //  ---------------- end of volatile loads and stores ----------------
7468 
7469 // ============================================================================
7470 // BSWAP Instructions
7471 
7472 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7473   match(Set dst (ReverseBytesI src));
7474 
7475   ins_cost(INSN_COST);
7476   format %{ "revw  $dst, $src" %}
7477 
7478   ins_encode %{
7479     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7480   %}
7481 
7482   ins_pipe(ialu_reg);
7483 %}
7484 
7485 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7486   match(Set dst (ReverseBytesL src));
7487 
7488   ins_cost(INSN_COST);
7489   format %{ "rev  $dst, $src" %}
7490 
7491   ins_encode %{
7492     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7493   %}
7494 
7495   ins_pipe(ialu_reg);
7496 %}
7497 
7498 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7499   match(Set dst (ReverseBytesUS src));
7500 
7501   ins_cost(INSN_COST);
7502   format %{ "rev16w  $dst, $src" %}
7503 
7504   ins_encode %{
7505     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7506   %}
7507 
7508   ins_pipe(ialu_reg);
7509 %}
7510 
7511 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7512   match(Set dst (ReverseBytesS src));
7513 
7514   ins_cost(INSN_COST);
7515   format %{ "rev16w  $dst, $src\n\t"
7516             "sbfmw $dst, $dst, #0, #15" %}
7517 
7518   ins_encode %{
7519     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7520     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7521   %}
7522 
7523   ins_pipe(ialu_reg);
7524 %}
7525 
7526 // ============================================================================
7527 // Zero Count Instructions
7528 
7529 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7530   match(Set dst (CountLeadingZerosI src));
7531 
7532   ins_cost(INSN_COST);
7533   format %{ "clzw  $dst, $src" %}
7534   ins_encode %{
7535     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7536   %}
7537 
7538   ins_pipe(ialu_reg);
7539 %}
7540 
7541 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7542   match(Set dst (CountLeadingZerosL src));
7543 
7544   ins_cost(INSN_COST);
7545   format %{ "clz   $dst, $src" %}
7546   ins_encode %{
7547     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7548   %}
7549 
7550   ins_pipe(ialu_reg);
7551 %}
7552 
7553 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7554   match(Set dst (CountTrailingZerosI src));
7555 
7556   ins_cost(INSN_COST * 2);
7557   format %{ "rbitw  $dst, $src\n\t"
7558             "clzw   $dst, $dst" %}
7559   ins_encode %{
7560     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7561     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7562   %}
7563 
7564   ins_pipe(ialu_reg);
7565 %}
7566 
7567 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7568   match(Set dst (CountTrailingZerosL src));
7569 
7570   ins_cost(INSN_COST * 2);
7571   format %{ "rbit   $dst, $src\n\t"
7572             "clz    $dst, $dst" %}
7573   ins_encode %{
7574     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7575     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7576   %}
7577 
7578   ins_pipe(ialu_reg);
7579 %}
7580 
7581 //---------- Population Count Instructions -------------------------------------
7582 //
7583 
7584 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7585   predicate(UsePopCountInstruction);
7586   match(Set dst (PopCountI src));
7587   effect(TEMP tmp);
7588   ins_cost(INSN_COST * 13);
7589 
7590   format %{ "movw   $src, $src\n\t"
7591             "mov    $tmp, $src\t# vector (1D)\n\t"
7592             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7593             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7594             "mov    $dst, $tmp\t# vector (1D)" %}
7595   ins_encode %{
7596     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7597     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7598     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7599     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7600     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7601   %}
7602 
7603   ins_pipe(pipe_class_default);
7604 %}
7605 
7606 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7607   predicate(UsePopCountInstruction);
7608   match(Set dst (PopCountI (LoadI mem)));
7609   effect(TEMP tmp);
7610   ins_cost(INSN_COST * 13);
7611 
7612   format %{ "ldrs   $tmp, $mem\n\t"
7613             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7614             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7615             "mov    $dst, $tmp\t# vector (1D)" %}
7616   ins_encode %{
7617     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7618     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7619                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7620     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7621     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7622     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7623   %}
7624 
7625   ins_pipe(pipe_class_default);
7626 %}
7627 
7628 // Note: Long.bitCount(long) returns an int.
7629 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7630   predicate(UsePopCountInstruction);
7631   match(Set dst (PopCountL src));
7632   effect(TEMP tmp);
7633   ins_cost(INSN_COST * 13);
7634 
7635   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7636             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7637             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7638             "mov    $dst, $tmp\t# vector (1D)" %}
7639   ins_encode %{
7640     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7641     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7642     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7643     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7644   %}
7645 
7646   ins_pipe(pipe_class_default);
7647 %}
7648 
7649 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7650   predicate(UsePopCountInstruction);
7651   match(Set dst (PopCountL (LoadL mem)));
7652   effect(TEMP tmp);
7653   ins_cost(INSN_COST * 13);
7654 
7655   format %{ "ldrd   $tmp, $mem\n\t"
7656             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7657             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7658             "mov    $dst, $tmp\t# vector (1D)" %}
7659   ins_encode %{
7660     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7661     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7662                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7663     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7664     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7665     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7666   %}
7667 
7668   ins_pipe(pipe_class_default);
7669 %}
7670 
7671 // ============================================================================
7672 // MemBar Instruction
7673 
7674 instruct load_fence() %{
7675   match(LoadFence);
7676   ins_cost(VOLATILE_REF_COST);
7677 
7678   format %{ "load_fence" %}
7679 
7680   ins_encode %{
7681     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7682   %}
7683   ins_pipe(pipe_serial);
7684 %}
7685 
7686 instruct unnecessary_membar_acquire() %{
7687   predicate(unnecessary_acquire(n));
7688   match(MemBarAcquire);
7689   ins_cost(0);
7690 
7691   format %{ "membar_acquire (elided)" %}
7692 
7693   ins_encode %{
7694     __ block_comment("membar_acquire (elided)");
7695   %}
7696 
7697   ins_pipe(pipe_class_empty);
7698 %}
7699 
7700 instruct membar_acquire() %{
7701   match(MemBarAcquire);
7702   ins_cost(VOLATILE_REF_COST);
7703 
7704   format %{ "membar_acquire\n\t"
7705             "dmb ish" %}
7706 
7707   ins_encode %{
7708     __ block_comment("membar_acquire");
7709     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7710   %}
7711 
7712   ins_pipe(pipe_serial);
7713 %}
7714 
7715 
7716 instruct membar_acquire_lock() %{
7717   match(MemBarAcquireLock);
7718   ins_cost(VOLATILE_REF_COST);
7719 
7720   format %{ "membar_acquire_lock (elided)" %}
7721 
7722   ins_encode %{
7723     __ block_comment("membar_acquire_lock (elided)");
7724   %}
7725 
7726   ins_pipe(pipe_serial);
7727 %}
7728 
7729 instruct store_fence() %{
7730   match(StoreFence);
7731   ins_cost(VOLATILE_REF_COST);
7732 
7733   format %{ "store_fence" %}
7734 
7735   ins_encode %{
7736     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7737   %}
7738   ins_pipe(pipe_serial);
7739 %}
7740 
7741 instruct unnecessary_membar_release() %{
7742   predicate(unnecessary_release(n));
7743   match(MemBarRelease);
7744   ins_cost(0);
7745 
7746   format %{ "membar_release (elided)" %}
7747 
7748   ins_encode %{
7749     __ block_comment("membar_release (elided)");
7750   %}
7751   ins_pipe(pipe_serial);
7752 %}
7753 
7754 instruct membar_release() %{
7755   match(MemBarRelease);
7756   ins_cost(VOLATILE_REF_COST);
7757 
7758   format %{ "membar_release\n\t"
7759             "dmb ish" %}
7760 
7761   ins_encode %{
7762     __ block_comment("membar_release");
7763     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7764   %}
7765   ins_pipe(pipe_serial);
7766 %}
7767 
7768 instruct membar_storestore() %{
7769   match(MemBarStoreStore);
7770   ins_cost(VOLATILE_REF_COST);
7771 
7772   format %{ "MEMBAR-store-store" %}
7773 
7774   ins_encode %{
7775     __ membar(Assembler::StoreStore);
7776   %}
7777   ins_pipe(pipe_serial);
7778 %}
7779 
7780 instruct membar_release_lock() %{
7781   match(MemBarReleaseLock);
7782   ins_cost(VOLATILE_REF_COST);
7783 
7784   format %{ "membar_release_lock (elided)" %}
7785 
7786   ins_encode %{
7787     __ block_comment("membar_release_lock (elided)");
7788   %}
7789 
7790   ins_pipe(pipe_serial);
7791 %}
7792 
7793 instruct unnecessary_membar_volatile() %{
7794   predicate(unnecessary_volatile(n));
7795   match(MemBarVolatile);
7796   ins_cost(0);
7797 
7798   format %{ "membar_volatile (elided)" %}
7799 
7800   ins_encode %{
7801     __ block_comment("membar_volatile (elided)");
7802   %}
7803 
7804   ins_pipe(pipe_serial);
7805 %}
7806 
7807 instruct membar_volatile() %{
7808   match(MemBarVolatile);
7809   ins_cost(VOLATILE_REF_COST*100);
7810 
7811   format %{ "membar_volatile\n\t"
7812              "dmb ish"%}
7813 
7814   ins_encode %{
7815     __ block_comment("membar_volatile");
7816     __ membar(Assembler::StoreLoad);
7817   %}
7818 
7819   ins_pipe(pipe_serial);
7820 %}
7821 
7822 // ============================================================================
7823 // Cast/Convert Instructions
7824 
7825 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7826   match(Set dst (CastX2P src));
7827 
7828   ins_cost(INSN_COST);
7829   format %{ "mov $dst, $src\t# long -> ptr" %}
7830 
7831   ins_encode %{
7832     if ($dst$$reg != $src$$reg) {
7833       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7834     }
7835   %}
7836 
7837   ins_pipe(ialu_reg);
7838 %}
7839 
7840 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7841   match(Set dst (CastP2X src));
7842 
7843   ins_cost(INSN_COST);
7844   format %{ "mov $dst, $src\t# ptr -> long" %}
7845 
7846   ins_encode %{
7847     if ($dst$$reg != $src$$reg) {
7848       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7849     }
7850   %}
7851 
7852   ins_pipe(ialu_reg);
7853 %}
7854 
7855 // Convert oop into int for vectors alignment masking
7856 instruct convP2I(iRegINoSp dst, iRegP src) %{
7857   match(Set dst (ConvL2I (CastP2X src)));
7858 
7859   ins_cost(INSN_COST);
7860   format %{ "movw $dst, $src\t# ptr -> int" %}
7861   ins_encode %{
7862     __ movw($dst$$Register, $src$$Register);
7863   %}
7864 
7865   ins_pipe(ialu_reg);
7866 %}
7867 
7868 // Convert compressed oop into int for vectors alignment masking
7869 // in case of 32bit oops (heap < 4Gb).
7870 instruct convN2I(iRegINoSp dst, iRegN src)
7871 %{
7872   predicate(Universe::narrow_oop_shift() == 0);
7873   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7874 
7875   ins_cost(INSN_COST);
7876   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7877   ins_encode %{
7878     __ movw($dst$$Register, $src$$Register);
7879   %}
7880 
7881   ins_pipe(ialu_reg);
7882 %}
7883 
7884 
7885 // Convert oop pointer into compressed form
7886 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7887   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7888   match(Set dst (EncodeP src));
7889   effect(KILL cr);
7890   ins_cost(INSN_COST * 3);
7891   format %{ "encode_heap_oop $dst, $src" %}
7892   ins_encode %{
7893     Register s = $src$$Register;
7894     Register d = $dst$$Register;
7895     __ encode_heap_oop(d, s);
7896   %}
7897   ins_pipe(ialu_reg);
7898 %}
7899 
7900 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7901   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7902   match(Set dst (EncodeP src));
7903   ins_cost(INSN_COST * 3);
7904   format %{ "encode_heap_oop_not_null $dst, $src" %}
7905   ins_encode %{
7906     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7907   %}
7908   ins_pipe(ialu_reg);
7909 %}
7910 
7911 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7912   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7913             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7914   match(Set dst (DecodeN src));
7915   ins_cost(INSN_COST * 3);
7916   format %{ "decode_heap_oop $dst, $src" %}
7917   ins_encode %{
7918     Register s = $src$$Register;
7919     Register d = $dst$$Register;
7920     __ decode_heap_oop(d, s);
7921   %}
7922   ins_pipe(ialu_reg);
7923 %}
7924 
7925 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7926   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7927             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7928   match(Set dst (DecodeN src));
7929   ins_cost(INSN_COST * 3);
7930   format %{ "decode_heap_oop_not_null $dst, $src" %}
7931   ins_encode %{
7932     Register s = $src$$Register;
7933     Register d = $dst$$Register;
7934     __ decode_heap_oop_not_null(d, s);
7935   %}
7936   ins_pipe(ialu_reg);
7937 %}
7938 
7939 // n.b. AArch64 implementations of encode_klass_not_null and
7940 // decode_klass_not_null do not modify the flags register so, unlike
7941 // Intel, we don't kill CR as a side effect here
7942 
7943 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7944   match(Set dst (EncodePKlass src));
7945 
7946   ins_cost(INSN_COST * 3);
7947   format %{ "encode_klass_not_null $dst,$src" %}
7948 
7949   ins_encode %{
7950     Register src_reg = as_Register($src$$reg);
7951     Register dst_reg = as_Register($dst$$reg);
7952     __ encode_klass_not_null(dst_reg, src_reg);
7953   %}
7954 
7955    ins_pipe(ialu_reg);
7956 %}
7957 
7958 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
7959   match(Set dst (DecodeNKlass src));
7960 
7961   ins_cost(INSN_COST * 3);
7962   format %{ "decode_klass_not_null $dst,$src" %}
7963 
7964   ins_encode %{
7965     Register src_reg = as_Register($src$$reg);
7966     Register dst_reg = as_Register($dst$$reg);
7967     if (dst_reg != src_reg) {
7968       __ decode_klass_not_null(dst_reg, src_reg);
7969     } else {
7970       __ decode_klass_not_null(dst_reg);
7971     }
7972   %}
7973 
7974    ins_pipe(ialu_reg);
7975 %}
7976 
7977 instruct checkCastPP(iRegPNoSp dst)
7978 %{
7979   match(Set dst (CheckCastPP dst));
7980 
7981   size(0);
7982   format %{ "# checkcastPP of $dst" %}
7983   ins_encode(/* empty encoding */);
7984   ins_pipe(pipe_class_empty);
7985 %}
7986 
7987 instruct castPP(iRegPNoSp dst)
7988 %{
7989   match(Set dst (CastPP dst));
7990 
7991   size(0);
7992   format %{ "# castPP of $dst" %}
7993   ins_encode(/* empty encoding */);
7994   ins_pipe(pipe_class_empty);
7995 %}
7996 
7997 instruct castII(iRegI dst)
7998 %{
7999   match(Set dst (CastII dst));
8000 
8001   size(0);
8002   format %{ "# castII of $dst" %}
8003   ins_encode(/* empty encoding */);
8004   ins_cost(0);
8005   ins_pipe(pipe_class_empty);
8006 %}
8007 
8008 // ============================================================================
8009 // Atomic operation instructions
8010 //
8011 // Intel and SPARC both implement Ideal Node LoadPLocked and
8012 // Store{PIL}Conditional instructions using a normal load for the
8013 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8014 //
8015 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8016 // pair to lock object allocations from Eden space when not using
8017 // TLABs.
8018 //
8019 // There does not appear to be a Load{IL}Locked Ideal Node and the
8020 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8021 // and to use StoreIConditional only for 32-bit and StoreLConditional
8022 // only for 64-bit.
8023 //
8024 // We implement LoadPLocked and StorePLocked instructions using,
8025 // respectively the AArch64 hw load-exclusive and store-conditional
8026 // instructions. Whereas we must implement each of
8027 // Store{IL}Conditional using a CAS which employs a pair of
8028 // instructions comprising a load-exclusive followed by a
8029 // store-conditional.
8030 
8031 
8032 // Locked-load (linked load) of the current heap-top
8033 // used when updating the eden heap top
8034 // implemented using ldaxr on AArch64
8035 
8036 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8037 %{
8038   match(Set dst (LoadPLocked mem));
8039 
8040   ins_cost(VOLATILE_REF_COST);
8041 
8042   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8043 
8044   ins_encode(aarch64_enc_ldaxr(dst, mem));
8045 
8046   ins_pipe(pipe_serial);
8047 %}
8048 
8049 // Conditional-store of the updated heap-top.
8050 // Used during allocation of the shared heap.
8051 // Sets flag (EQ) on success.
8052 // implemented using stlxr on AArch64.
8053 
8054 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8055 %{
8056   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8057 
8058   ins_cost(VOLATILE_REF_COST);
8059 
8060  // TODO
8061  // do we need to do a store-conditional release or can we just use a
8062  // plain store-conditional?
8063 
8064   format %{
8065     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8066     "cmpw rscratch1, zr\t# EQ on successful write"
8067   %}
8068 
8069   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8070 
8071   ins_pipe(pipe_serial);
8072 %}
8073 
8074 
8075 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8076 // when attempting to rebias a lock towards the current thread.  We
8077 // must use the acquire form of cmpxchg in order to guarantee acquire
8078 // semantics in this case.
8079 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8080 %{
8081   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8082 
8083   ins_cost(VOLATILE_REF_COST);
8084 
8085   format %{
8086     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8087     "cmpw rscratch1, zr\t# EQ on successful write"
8088   %}
8089 
8090   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8091 
8092   ins_pipe(pipe_slow);
8093 %}
8094 
8095 // storeIConditional also has acquire semantics, for no better reason
8096 // than matching storeLConditional.  At the time of writing this
8097 // comment storeIConditional was not used anywhere by AArch64.
8098 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8099 %{
8100   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8101 
8102   ins_cost(VOLATILE_REF_COST);
8103 
8104   format %{
8105     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8106     "cmpw rscratch1, zr\t# EQ on successful write"
8107   %}
8108 
8109   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8110 
8111   ins_pipe(pipe_slow);
8112 %}
8113 
8114 // standard CompareAndSwapX when we are using barriers
8115 // these have higher priority than the rules selected by a predicate
8116 
8117 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8118 // can't match them
8119 
8120 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8121 
8122   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8123   ins_cost(2 * VOLATILE_REF_COST);
8124 
8125   effect(KILL cr);
8126 
8127   format %{
8128     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8129     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8130   %}
8131 
8132   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8133             aarch64_enc_cset_eq(res));
8134 
8135   ins_pipe(pipe_slow);
8136 %}
8137 
8138 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8139 
8140   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8141   ins_cost(2 * VOLATILE_REF_COST);
8142 
8143   effect(KILL cr);
8144 
8145   format %{
8146     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8147     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8148   %}
8149 
8150   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8151             aarch64_enc_cset_eq(res));
8152 
8153   ins_pipe(pipe_slow);
8154 %}
8155 
8156 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8157 
8158   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8159   ins_cost(2 * VOLATILE_REF_COST);
8160 
8161   effect(KILL cr);
8162 
8163  format %{
8164     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8165     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8166  %}
8167 
8168  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8169             aarch64_enc_cset_eq(res));
8170 
8171   ins_pipe(pipe_slow);
8172 %}
8173 
8174 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8175 
8176   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8177   ins_cost(2 * VOLATILE_REF_COST);
8178 
8179   effect(KILL cr);
8180 
8181  format %{
8182     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8183     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8184  %}
8185 
8186  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8187             aarch64_enc_cset_eq(res));
8188 
8189   ins_pipe(pipe_slow);
8190 %}
8191 
8192 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8193 
8194   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8195   ins_cost(2 * VOLATILE_REF_COST);
8196 
8197   effect(KILL cr);
8198 
8199  format %{
8200     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8201     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8202  %}
8203 
8204  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8205             aarch64_enc_cset_eq(res));
8206 
8207   ins_pipe(pipe_slow);
8208 %}
8209 
8210 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8211 
8212   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8213   ins_cost(2 * VOLATILE_REF_COST);
8214 
8215   effect(KILL cr);
8216 
8217  format %{
8218     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8219     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8220  %}
8221 
8222  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8223             aarch64_enc_cset_eq(res));
8224 
8225   ins_pipe(pipe_slow);
8226 %}
8227 
8228 // alternative CompareAndSwapX when we are eliding barriers
8229 
8230 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8231 
8232   predicate(needs_acquiring_load_exclusive(n));
8233   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8234   ins_cost(VOLATILE_REF_COST);
8235 
8236   effect(KILL cr);
8237 
8238   format %{
8239     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8240     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8241   %}
8242 
8243   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8244             aarch64_enc_cset_eq(res));
8245 
8246   ins_pipe(pipe_slow);
8247 %}
8248 
8249 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8250 
8251   predicate(needs_acquiring_load_exclusive(n));
8252   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8253   ins_cost(VOLATILE_REF_COST);
8254 
8255   effect(KILL cr);
8256 
8257   format %{
8258     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8259     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8260   %}
8261 
8262   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8263             aarch64_enc_cset_eq(res));
8264 
8265   ins_pipe(pipe_slow);
8266 %}
8267 
8268 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8269 
8270   predicate(needs_acquiring_load_exclusive(n));
8271   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8272   ins_cost(VOLATILE_REF_COST);
8273 
8274   effect(KILL cr);
8275 
8276  format %{
8277     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8278     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8279  %}
8280 
8281  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8282             aarch64_enc_cset_eq(res));
8283 
8284   ins_pipe(pipe_slow);
8285 %}
8286 
8287 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8288 
8289   predicate(needs_acquiring_load_exclusive(n));
8290   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8291   ins_cost(VOLATILE_REF_COST);
8292 
8293   effect(KILL cr);
8294 
8295  format %{
8296     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8297     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8298  %}
8299 
8300  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8301             aarch64_enc_cset_eq(res));
8302 
8303   ins_pipe(pipe_slow);
8304 %}
8305 
8306 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8307 
8308   predicate(needs_acquiring_load_exclusive(n));
8309   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8310   ins_cost(VOLATILE_REF_COST);
8311 
8312   effect(KILL cr);
8313 
8314  format %{
8315     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8316     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8317  %}
8318 
8319  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8320             aarch64_enc_cset_eq(res));
8321 
8322   ins_pipe(pipe_slow);
8323 %}
8324 
8325 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8326 
8327   predicate(needs_acquiring_load_exclusive(n));
8328   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8329   ins_cost(VOLATILE_REF_COST);
8330 
8331   effect(KILL cr);
8332 
8333  format %{
8334     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8335     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8336  %}
8337 
8338  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8339             aarch64_enc_cset_eq(res));
8340 
8341   ins_pipe(pipe_slow);
8342 %}
8343 
8344 
8345 // ---------------------------------------------------------------------
8346 
8347 
8348 // BEGIN This section of the file is automatically generated. Do not edit --------------
8349 
8350 // Sundry CAS operations.  Note that release is always true,
8351 // regardless of the memory ordering of the CAS.  This is because we
8352 // need the volatile case to be sequentially consistent but there is
8353 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8354 // can't check the type of memory ordering here, so we always emit a
8355 // STLXR.
8356 
8357 // This section is generated from aarch64_ad_cas.m4
8358 
8359 
8360 
8361 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8362   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8363   ins_cost(2 * VOLATILE_REF_COST);
8364   effect(TEMP_DEF res, KILL cr);
8365   format %{
8366     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8367   %}
8368   ins_encode %{
8369     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8370                Assembler::byte, /*acquire*/ false, /*release*/ true,
8371                /*weak*/ false, $res$$Register);
8372     __ sxtbw($res$$Register, $res$$Register);
8373   %}
8374   ins_pipe(pipe_slow);
8375 %}
8376 
8377 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8378   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8379   ins_cost(2 * VOLATILE_REF_COST);
8380   effect(TEMP_DEF res, KILL cr);
8381   format %{
8382     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8383   %}
8384   ins_encode %{
8385     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8386                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8387                /*weak*/ false, $res$$Register);
8388     __ sxthw($res$$Register, $res$$Register);
8389   %}
8390   ins_pipe(pipe_slow);
8391 %}
8392 
8393 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8394   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8395   ins_cost(2 * VOLATILE_REF_COST);
8396   effect(TEMP_DEF res, KILL cr);
8397   format %{
8398     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8399   %}
8400   ins_encode %{
8401     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8402                Assembler::word, /*acquire*/ false, /*release*/ true,
8403                /*weak*/ false, $res$$Register);
8404   %}
8405   ins_pipe(pipe_slow);
8406 %}
8407 
8408 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8409   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8410   ins_cost(2 * VOLATILE_REF_COST);
8411   effect(TEMP_DEF res, KILL cr);
8412   format %{
8413     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8414   %}
8415   ins_encode %{
8416     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8417                Assembler::xword, /*acquire*/ false, /*release*/ true,
8418                /*weak*/ false, $res$$Register);
8419   %}
8420   ins_pipe(pipe_slow);
8421 %}
8422 
8423 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8424   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8425   ins_cost(2 * VOLATILE_REF_COST);
8426   effect(TEMP_DEF res, KILL cr);
8427   format %{
8428     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8429   %}
8430   ins_encode %{
8431     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8432                Assembler::word, /*acquire*/ false, /*release*/ true,
8433                /*weak*/ false, $res$$Register);
8434   %}
8435   ins_pipe(pipe_slow);
8436 %}
8437 
8438 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8439   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8440   ins_cost(2 * VOLATILE_REF_COST);
8441   effect(TEMP_DEF res, KILL cr);
8442   format %{
8443     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8444   %}
8445   ins_encode %{
8446     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8447                Assembler::xword, /*acquire*/ false, /*release*/ true,
8448                /*weak*/ false, $res$$Register);
8449   %}
8450   ins_pipe(pipe_slow);
8451 %}
8452 
8453 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8454   predicate(needs_acquiring_load_exclusive(n));
8455   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8456   ins_cost(VOLATILE_REF_COST);
8457   effect(TEMP_DEF res, KILL cr);
8458   format %{
8459     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8460   %}
8461   ins_encode %{
8462     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8463                Assembler::byte, /*acquire*/ true, /*release*/ true,
8464                /*weak*/ false, $res$$Register);
8465     __ sxtbw($res$$Register, $res$$Register);
8466   %}
8467   ins_pipe(pipe_slow);
8468 %}
8469 
8470 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8471   predicate(needs_acquiring_load_exclusive(n));
8472   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8473   ins_cost(VOLATILE_REF_COST);
8474   effect(TEMP_DEF res, KILL cr);
8475   format %{
8476     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8477   %}
8478   ins_encode %{
8479     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8480                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8481                /*weak*/ false, $res$$Register);
8482     __ sxthw($res$$Register, $res$$Register);
8483   %}
8484   ins_pipe(pipe_slow);
8485 %}
8486 
8487 
8488 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8489   predicate(needs_acquiring_load_exclusive(n));
8490   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8491   ins_cost(VOLATILE_REF_COST);
8492   effect(TEMP_DEF res, KILL cr);
8493   format %{
8494     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8495   %}
8496   ins_encode %{
8497     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8498                Assembler::word, /*acquire*/ true, /*release*/ true,
8499                /*weak*/ false, $res$$Register);
8500   %}
8501   ins_pipe(pipe_slow);
8502 %}
8503 
8504 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8505   predicate(needs_acquiring_load_exclusive(n));
8506   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8507   ins_cost(VOLATILE_REF_COST);
8508   effect(TEMP_DEF res, KILL cr);
8509   format %{
8510     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8511   %}
8512   ins_encode %{
8513     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8514                Assembler::xword, /*acquire*/ true, /*release*/ true,
8515                /*weak*/ false, $res$$Register);
8516   %}
8517   ins_pipe(pipe_slow);
8518 %}
8519 
8520 
8521 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8522   predicate(needs_acquiring_load_exclusive(n));
8523   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8524   ins_cost(VOLATILE_REF_COST);
8525   effect(TEMP_DEF res, KILL cr);
8526   format %{
8527     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8528   %}
8529   ins_encode %{
8530     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8531                Assembler::word, /*acquire*/ true, /*release*/ true,
8532                /*weak*/ false, $res$$Register);
8533   %}
8534   ins_pipe(pipe_slow);
8535 %}
8536 
8537 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8538   predicate(needs_acquiring_load_exclusive(n));
8539   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8540   ins_cost(VOLATILE_REF_COST);
8541   effect(TEMP_DEF res, KILL cr);
8542   format %{
8543     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8544   %}
8545   ins_encode %{
8546     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8547                Assembler::xword, /*acquire*/ true, /*release*/ true,
8548                /*weak*/ false, $res$$Register);
8549   %}
8550   ins_pipe(pipe_slow);
8551 %}
8552 
8553 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8554   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8555   ins_cost(2 * VOLATILE_REF_COST);
8556   effect(KILL cr);
8557   format %{
8558     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8559     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8560   %}
8561   ins_encode %{
8562     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8563                Assembler::byte, /*acquire*/ false, /*release*/ true,
8564                /*weak*/ true, noreg);
8565     __ csetw($res$$Register, Assembler::EQ);
8566   %}
8567   ins_pipe(pipe_slow);
8568 %}
8569 
8570 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8571   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8572   ins_cost(2 * VOLATILE_REF_COST);
8573   effect(KILL cr);
8574   format %{
8575     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8576     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8577   %}
8578   ins_encode %{
8579     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8580                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8581                /*weak*/ true, noreg);
8582     __ csetw($res$$Register, Assembler::EQ);
8583   %}
8584   ins_pipe(pipe_slow);
8585 %}
8586 
8587 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8588   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8589   ins_cost(2 * VOLATILE_REF_COST);
8590   effect(KILL cr);
8591   format %{
8592     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8593     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8594   %}
8595   ins_encode %{
8596     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8597                Assembler::word, /*acquire*/ false, /*release*/ true,
8598                /*weak*/ true, noreg);
8599     __ csetw($res$$Register, Assembler::EQ);
8600   %}
8601   ins_pipe(pipe_slow);
8602 %}
8603 
8604 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8605   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8606   ins_cost(2 * VOLATILE_REF_COST);
8607   effect(KILL cr);
8608   format %{
8609     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8610     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8611   %}
8612   ins_encode %{
8613     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8614                Assembler::xword, /*acquire*/ false, /*release*/ true,
8615                /*weak*/ true, noreg);
8616     __ csetw($res$$Register, Assembler::EQ);
8617   %}
8618   ins_pipe(pipe_slow);
8619 %}
8620 
8621 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8622   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8623   ins_cost(2 * VOLATILE_REF_COST);
8624   effect(KILL cr);
8625   format %{
8626     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8627     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8628   %}
8629   ins_encode %{
8630     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8631                Assembler::word, /*acquire*/ false, /*release*/ true,
8632                /*weak*/ true, noreg);
8633     __ csetw($res$$Register, Assembler::EQ);
8634   %}
8635   ins_pipe(pipe_slow);
8636 %}
8637 
8638 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8639   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8640   ins_cost(2 * VOLATILE_REF_COST);
8641   effect(KILL cr);
8642   format %{
8643     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8644     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8645   %}
8646   ins_encode %{
8647     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8648                Assembler::xword, /*acquire*/ false, /*release*/ true,
8649                /*weak*/ true, noreg);
8650     __ csetw($res$$Register, Assembler::EQ);
8651   %}
8652   ins_pipe(pipe_slow);
8653 %}
8654 
8655 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8656   predicate(needs_acquiring_load_exclusive(n));
8657   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8658   ins_cost(VOLATILE_REF_COST);
8659   effect(KILL cr);
8660   format %{
8661     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8662     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8663   %}
8664   ins_encode %{
8665     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8666                Assembler::byte, /*acquire*/ true, /*release*/ true,
8667                /*weak*/ true, noreg);
8668     __ csetw($res$$Register, Assembler::EQ);
8669   %}
8670   ins_pipe(pipe_slow);
8671 %}
8672 
8673 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8674   predicate(needs_acquiring_load_exclusive(n));
8675   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8676   ins_cost(VOLATILE_REF_COST);
8677   effect(KILL cr);
8678   format %{
8679     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8680     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8681   %}
8682   ins_encode %{
8683     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8684                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8685                /*weak*/ true, noreg);
8686     __ csetw($res$$Register, Assembler::EQ);
8687   %}
8688   ins_pipe(pipe_slow);
8689 %}
8690 
8691 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8692   predicate(needs_acquiring_load_exclusive(n));
8693   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8694   ins_cost(VOLATILE_REF_COST);
8695   effect(KILL cr);
8696   format %{
8697     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8698     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8699   %}
8700   ins_encode %{
8701     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8702                Assembler::word, /*acquire*/ true, /*release*/ true,
8703                /*weak*/ true, noreg);
8704     __ csetw($res$$Register, Assembler::EQ);
8705   %}
8706   ins_pipe(pipe_slow);
8707 %}
8708 
8709 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8710   predicate(needs_acquiring_load_exclusive(n));
8711   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8712   ins_cost(VOLATILE_REF_COST);
8713   effect(KILL cr);
8714   format %{
8715     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8716     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8717   %}
8718   ins_encode %{
8719     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8720                Assembler::xword, /*acquire*/ true, /*release*/ true,
8721                /*weak*/ true, noreg);
8722     __ csetw($res$$Register, Assembler::EQ);
8723   %}
8724   ins_pipe(pipe_slow);
8725 %}
8726 
8727 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8728   predicate(needs_acquiring_load_exclusive(n));
8729   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8730   ins_cost(VOLATILE_REF_COST);
8731   effect(KILL cr);
8732   format %{
8733     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8734     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8735   %}
8736   ins_encode %{
8737     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8738                Assembler::word, /*acquire*/ true, /*release*/ true,
8739                /*weak*/ true, noreg);
8740     __ csetw($res$$Register, Assembler::EQ);
8741   %}
8742   ins_pipe(pipe_slow);
8743 %}
8744 
8745 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8746   predicate(needs_acquiring_load_exclusive(n));
8747   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8748   ins_cost(VOLATILE_REF_COST);
8749   effect(KILL cr);
8750   format %{
8751     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8752     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8753   %}
8754   ins_encode %{
8755     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8756                Assembler::xword, /*acquire*/ true, /*release*/ true,
8757                /*weak*/ true, noreg);
8758     __ csetw($res$$Register, Assembler::EQ);
8759   %}
8760   ins_pipe(pipe_slow);
8761 %}
8762 
8763 // END This section of the file is automatically generated. Do not edit --------------
8764 // ---------------------------------------------------------------------
8765 
8766 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
8767   match(Set prev (GetAndSetI mem newv));
8768   ins_cost(2 * VOLATILE_REF_COST);
8769   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8770   ins_encode %{
8771     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8772   %}
8773   ins_pipe(pipe_serial);
8774 %}
8775 
8776 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
8777   match(Set prev (GetAndSetL mem newv));
8778   ins_cost(2 * VOLATILE_REF_COST);
8779   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8780   ins_encode %{
8781     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8782   %}
8783   ins_pipe(pipe_serial);
8784 %}
8785 
8786 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
8787   match(Set prev (GetAndSetN mem newv));
8788   ins_cost(2 * VOLATILE_REF_COST);
8789   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8790   ins_encode %{
8791     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8792   %}
8793   ins_pipe(pipe_serial);
8794 %}
8795 
8796 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
8797   match(Set prev (GetAndSetP mem newv));
8798   ins_cost(2 * VOLATILE_REF_COST);
8799   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8800   ins_encode %{
8801     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8802   %}
8803   ins_pipe(pipe_serial);
8804 %}
8805 
8806 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
8807   predicate(needs_acquiring_load_exclusive(n));
8808   match(Set prev (GetAndSetI mem newv));
8809   ins_cost(VOLATILE_REF_COST);
8810   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
8811   ins_encode %{
8812     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8813   %}
8814   ins_pipe(pipe_serial);
8815 %}
8816 
8817 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
8818   predicate(needs_acquiring_load_exclusive(n));
8819   match(Set prev (GetAndSetL mem newv));
8820   ins_cost(VOLATILE_REF_COST);
8821   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8822   ins_encode %{
8823     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8824   %}
8825   ins_pipe(pipe_serial);
8826 %}
8827 
8828 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
8829   predicate(needs_acquiring_load_exclusive(n));
8830   match(Set prev (GetAndSetN mem newv));
8831   ins_cost(VOLATILE_REF_COST);
8832   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
8833   ins_encode %{
8834     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8835   %}
8836   ins_pipe(pipe_serial);
8837 %}
8838 
8839 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
8840   predicate(needs_acquiring_load_exclusive(n));
8841   match(Set prev (GetAndSetP mem newv));
8842   ins_cost(VOLATILE_REF_COST);
8843   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8844   ins_encode %{
8845     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8846   %}
8847   ins_pipe(pipe_serial);
8848 %}
8849 
8850 
8851 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8852   match(Set newval (GetAndAddL mem incr));
8853   ins_cost(2 * VOLATILE_REF_COST + 1);
8854   format %{ "get_and_addL $newval, [$mem], $incr" %}
8855   ins_encode %{
8856     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8857   %}
8858   ins_pipe(pipe_serial);
8859 %}
8860 
8861 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8862   predicate(n->as_LoadStore()->result_not_used());
8863   match(Set dummy (GetAndAddL mem incr));
8864   ins_cost(2 * VOLATILE_REF_COST);
8865   format %{ "get_and_addL [$mem], $incr" %}
8866   ins_encode %{
8867     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8868   %}
8869   ins_pipe(pipe_serial);
8870 %}
8871 
8872 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8873   match(Set newval (GetAndAddL mem incr));
8874   ins_cost(2 * VOLATILE_REF_COST + 1);
8875   format %{ "get_and_addL $newval, [$mem], $incr" %}
8876   ins_encode %{
8877     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8878   %}
8879   ins_pipe(pipe_serial);
8880 %}
8881 
8882 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8883   predicate(n->as_LoadStore()->result_not_used());
8884   match(Set dummy (GetAndAddL mem incr));
8885   ins_cost(2 * VOLATILE_REF_COST);
8886   format %{ "get_and_addL [$mem], $incr" %}
8887   ins_encode %{
8888     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8889   %}
8890   ins_pipe(pipe_serial);
8891 %}
8892 
8893 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8894   match(Set newval (GetAndAddI mem incr));
8895   ins_cost(2 * VOLATILE_REF_COST + 1);
8896   format %{ "get_and_addI $newval, [$mem], $incr" %}
8897   ins_encode %{
8898     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8899   %}
8900   ins_pipe(pipe_serial);
8901 %}
8902 
8903 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8904   predicate(n->as_LoadStore()->result_not_used());
8905   match(Set dummy (GetAndAddI mem incr));
8906   ins_cost(2 * VOLATILE_REF_COST);
8907   format %{ "get_and_addI [$mem], $incr" %}
8908   ins_encode %{
8909     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8910   %}
8911   ins_pipe(pipe_serial);
8912 %}
8913 
8914 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8915   match(Set newval (GetAndAddI mem incr));
8916   ins_cost(2 * VOLATILE_REF_COST + 1);
8917   format %{ "get_and_addI $newval, [$mem], $incr" %}
8918   ins_encode %{
8919     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8920   %}
8921   ins_pipe(pipe_serial);
8922 %}
8923 
8924 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
8925   predicate(n->as_LoadStore()->result_not_used());
8926   match(Set dummy (GetAndAddI mem incr));
8927   ins_cost(2 * VOLATILE_REF_COST);
8928   format %{ "get_and_addI [$mem], $incr" %}
8929   ins_encode %{
8930     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
8931   %}
8932   ins_pipe(pipe_serial);
8933 %}
8934 
8935 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
8936   predicate(needs_acquiring_load_exclusive(n));
8937   match(Set newval (GetAndAddL mem incr));
8938   ins_cost(VOLATILE_REF_COST + 1);
8939   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
8940   ins_encode %{
8941     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
8942   %}
8943   ins_pipe(pipe_serial);
8944 %}
8945 
8946 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
8947   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8948   match(Set dummy (GetAndAddL mem incr));
8949   ins_cost(VOLATILE_REF_COST);
8950   format %{ "get_and_addL_acq [$mem], $incr" %}
8951   ins_encode %{
8952     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
8953   %}
8954   ins_pipe(pipe_serial);
8955 %}
8956 
8957 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8958   predicate(needs_acquiring_load_exclusive(n));
8959   match(Set newval (GetAndAddL mem incr));
8960   ins_cost(VOLATILE_REF_COST + 1);
8961   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
8962   ins_encode %{
8963     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
8964   %}
8965   ins_pipe(pipe_serial);
8966 %}
8967 
8968 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
8969   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8970   match(Set dummy (GetAndAddL mem incr));
8971   ins_cost(VOLATILE_REF_COST);
8972   format %{ "get_and_addL_acq [$mem], $incr" %}
8973   ins_encode %{
8974     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
8975   %}
8976   ins_pipe(pipe_serial);
8977 %}
8978 
8979 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8980   predicate(needs_acquiring_load_exclusive(n));
8981   match(Set newval (GetAndAddI mem incr));
8982   ins_cost(VOLATILE_REF_COST + 1);
8983   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
8984   ins_encode %{
8985     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8986   %}
8987   ins_pipe(pipe_serial);
8988 %}
8989 
8990 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
8991   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8992   match(Set dummy (GetAndAddI mem incr));
8993   ins_cost(VOLATILE_REF_COST);
8994   format %{ "get_and_addI_acq [$mem], $incr" %}
8995   ins_encode %{
8996     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
8997   %}
8998   ins_pipe(pipe_serial);
8999 %}
9000 
9001 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9002   predicate(needs_acquiring_load_exclusive(n));
9003   match(Set newval (GetAndAddI mem incr));
9004   ins_cost(VOLATILE_REF_COST + 1);
9005   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9006   ins_encode %{
9007     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9008   %}
9009   ins_pipe(pipe_serial);
9010 %}
9011 
9012 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9013   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9014   match(Set dummy (GetAndAddI mem incr));
9015   ins_cost(VOLATILE_REF_COST);
9016   format %{ "get_and_addI_acq [$mem], $incr" %}
9017   ins_encode %{
9018     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9019   %}
9020   ins_pipe(pipe_serial);
9021 %}
9022 
9023 // Manifest a CmpL result in an integer register.
9024 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9025 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9026 %{
9027   match(Set dst (CmpL3 src1 src2));
9028   effect(KILL flags);
9029 
9030   ins_cost(INSN_COST * 6);
9031   format %{
9032       "cmp $src1, $src2"
9033       "csetw $dst, ne"
9034       "cnegw $dst, lt"
9035   %}
9036   // format %{ "CmpL3 $dst, $src1, $src2" %}
9037   ins_encode %{
9038     __ cmp($src1$$Register, $src2$$Register);
9039     __ csetw($dst$$Register, Assembler::NE);
9040     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9041   %}
9042 
9043   ins_pipe(pipe_class_default);
9044 %}
9045 
9046 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9047 %{
9048   match(Set dst (CmpL3 src1 src2));
9049   effect(KILL flags);
9050 
9051   ins_cost(INSN_COST * 6);
9052   format %{
9053       "cmp $src1, $src2"
9054       "csetw $dst, ne"
9055       "cnegw $dst, lt"
9056   %}
9057   ins_encode %{
9058     int32_t con = (int32_t)$src2$$constant;
9059      if (con < 0) {
9060       __ adds(zr, $src1$$Register, -con);
9061     } else {
9062       __ subs(zr, $src1$$Register, con);
9063     }
9064     __ csetw($dst$$Register, Assembler::NE);
9065     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9066   %}
9067 
9068   ins_pipe(pipe_class_default);
9069 %}
9070 
9071 // ============================================================================
9072 // Conditional Move Instructions
9073 
9074 // n.b. we have identical rules for both a signed compare op (cmpOp)
9075 // and an unsigned compare op (cmpOpU). it would be nice if we could
9076 // define an op class which merged both inputs and use it to type the
9077 // argument to a single rule. unfortunatelyt his fails because the
9078 // opclass does not live up to the COND_INTER interface of its
9079 // component operands. When the generic code tries to negate the
9080 // operand it ends up running the generci Machoper::negate method
9081 // which throws a ShouldNotHappen. So, we have to provide two flavours
9082 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9083 
9084 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9085   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9086 
9087   ins_cost(INSN_COST * 2);
9088   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9089 
9090   ins_encode %{
9091     __ cselw(as_Register($dst$$reg),
9092              as_Register($src2$$reg),
9093              as_Register($src1$$reg),
9094              (Assembler::Condition)$cmp$$cmpcode);
9095   %}
9096 
9097   ins_pipe(icond_reg_reg);
9098 %}
9099 
9100 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9101   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9102 
9103   ins_cost(INSN_COST * 2);
9104   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9105 
9106   ins_encode %{
9107     __ cselw(as_Register($dst$$reg),
9108              as_Register($src2$$reg),
9109              as_Register($src1$$reg),
9110              (Assembler::Condition)$cmp$$cmpcode);
9111   %}
9112 
9113   ins_pipe(icond_reg_reg);
9114 %}
9115 
9116 // special cases where one arg is zero
9117 
9118 // n.b. this is selected in preference to the rule above because it
9119 // avoids loading constant 0 into a source register
9120 
9121 // TODO
9122 // we ought only to be able to cull one of these variants as the ideal
9123 // transforms ought always to order the zero consistently (to left/right?)
9124 
9125 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9126   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9127 
9128   ins_cost(INSN_COST * 2);
9129   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9130 
9131   ins_encode %{
9132     __ cselw(as_Register($dst$$reg),
9133              as_Register($src$$reg),
9134              zr,
9135              (Assembler::Condition)$cmp$$cmpcode);
9136   %}
9137 
9138   ins_pipe(icond_reg);
9139 %}
9140 
9141 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9142   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9143 
9144   ins_cost(INSN_COST * 2);
9145   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9146 
9147   ins_encode %{
9148     __ cselw(as_Register($dst$$reg),
9149              as_Register($src$$reg),
9150              zr,
9151              (Assembler::Condition)$cmp$$cmpcode);
9152   %}
9153 
9154   ins_pipe(icond_reg);
9155 %}
9156 
9157 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9158   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9159 
9160   ins_cost(INSN_COST * 2);
9161   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9162 
9163   ins_encode %{
9164     __ cselw(as_Register($dst$$reg),
9165              zr,
9166              as_Register($src$$reg),
9167              (Assembler::Condition)$cmp$$cmpcode);
9168   %}
9169 
9170   ins_pipe(icond_reg);
9171 %}
9172 
9173 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9174   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9175 
9176   ins_cost(INSN_COST * 2);
9177   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9178 
9179   ins_encode %{
9180     __ cselw(as_Register($dst$$reg),
9181              zr,
9182              as_Register($src$$reg),
9183              (Assembler::Condition)$cmp$$cmpcode);
9184   %}
9185 
9186   ins_pipe(icond_reg);
9187 %}
9188 
9189 // special case for creating a boolean 0 or 1
9190 
9191 // n.b. this is selected in preference to the rule above because it
9192 // avoids loading constants 0 and 1 into a source register
9193 
9194 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9195   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9196 
9197   ins_cost(INSN_COST * 2);
9198   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9199 
9200   ins_encode %{
9201     // equivalently
9202     // cset(as_Register($dst$$reg),
9203     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9204     __ csincw(as_Register($dst$$reg),
9205              zr,
9206              zr,
9207              (Assembler::Condition)$cmp$$cmpcode);
9208   %}
9209 
9210   ins_pipe(icond_none);
9211 %}
9212 
9213 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9214   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9215 
9216   ins_cost(INSN_COST * 2);
9217   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9218 
9219   ins_encode %{
9220     // equivalently
9221     // cset(as_Register($dst$$reg),
9222     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9223     __ csincw(as_Register($dst$$reg),
9224              zr,
9225              zr,
9226              (Assembler::Condition)$cmp$$cmpcode);
9227   %}
9228 
9229   ins_pipe(icond_none);
9230 %}
9231 
9232 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9233   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9234 
9235   ins_cost(INSN_COST * 2);
9236   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9237 
9238   ins_encode %{
9239     __ csel(as_Register($dst$$reg),
9240             as_Register($src2$$reg),
9241             as_Register($src1$$reg),
9242             (Assembler::Condition)$cmp$$cmpcode);
9243   %}
9244 
9245   ins_pipe(icond_reg_reg);
9246 %}
9247 
9248 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9249   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9250 
9251   ins_cost(INSN_COST * 2);
9252   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9253 
9254   ins_encode %{
9255     __ csel(as_Register($dst$$reg),
9256             as_Register($src2$$reg),
9257             as_Register($src1$$reg),
9258             (Assembler::Condition)$cmp$$cmpcode);
9259   %}
9260 
9261   ins_pipe(icond_reg_reg);
9262 %}
9263 
9264 // special cases where one arg is zero
9265 
9266 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9267   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9268 
9269   ins_cost(INSN_COST * 2);
9270   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9271 
9272   ins_encode %{
9273     __ csel(as_Register($dst$$reg),
9274             zr,
9275             as_Register($src$$reg),
9276             (Assembler::Condition)$cmp$$cmpcode);
9277   %}
9278 
9279   ins_pipe(icond_reg);
9280 %}
9281 
9282 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9283   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9284 
9285   ins_cost(INSN_COST * 2);
9286   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9287 
9288   ins_encode %{
9289     __ csel(as_Register($dst$$reg),
9290             zr,
9291             as_Register($src$$reg),
9292             (Assembler::Condition)$cmp$$cmpcode);
9293   %}
9294 
9295   ins_pipe(icond_reg);
9296 %}
9297 
9298 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9299   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9300 
9301   ins_cost(INSN_COST * 2);
9302   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9303 
9304   ins_encode %{
9305     __ csel(as_Register($dst$$reg),
9306             as_Register($src$$reg),
9307             zr,
9308             (Assembler::Condition)$cmp$$cmpcode);
9309   %}
9310 
9311   ins_pipe(icond_reg);
9312 %}
9313 
9314 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9315   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9316 
9317   ins_cost(INSN_COST * 2);
9318   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9319 
9320   ins_encode %{
9321     __ csel(as_Register($dst$$reg),
9322             as_Register($src$$reg),
9323             zr,
9324             (Assembler::Condition)$cmp$$cmpcode);
9325   %}
9326 
9327   ins_pipe(icond_reg);
9328 %}
9329 
9330 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9331   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9332 
9333   ins_cost(INSN_COST * 2);
9334   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9335 
9336   ins_encode %{
9337     __ csel(as_Register($dst$$reg),
9338             as_Register($src2$$reg),
9339             as_Register($src1$$reg),
9340             (Assembler::Condition)$cmp$$cmpcode);
9341   %}
9342 
9343   ins_pipe(icond_reg_reg);
9344 %}
9345 
9346 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9347   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9348 
9349   ins_cost(INSN_COST * 2);
9350   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9351 
9352   ins_encode %{
9353     __ csel(as_Register($dst$$reg),
9354             as_Register($src2$$reg),
9355             as_Register($src1$$reg),
9356             (Assembler::Condition)$cmp$$cmpcode);
9357   %}
9358 
9359   ins_pipe(icond_reg_reg);
9360 %}
9361 
9362 // special cases where one arg is zero
9363 
9364 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9365   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9366 
9367   ins_cost(INSN_COST * 2);
9368   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9369 
9370   ins_encode %{
9371     __ csel(as_Register($dst$$reg),
9372             zr,
9373             as_Register($src$$reg),
9374             (Assembler::Condition)$cmp$$cmpcode);
9375   %}
9376 
9377   ins_pipe(icond_reg);
9378 %}
9379 
9380 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9381   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9382 
9383   ins_cost(INSN_COST * 2);
9384   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9385 
9386   ins_encode %{
9387     __ csel(as_Register($dst$$reg),
9388             zr,
9389             as_Register($src$$reg),
9390             (Assembler::Condition)$cmp$$cmpcode);
9391   %}
9392 
9393   ins_pipe(icond_reg);
9394 %}
9395 
9396 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9397   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9398 
9399   ins_cost(INSN_COST * 2);
9400   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9401 
9402   ins_encode %{
9403     __ csel(as_Register($dst$$reg),
9404             as_Register($src$$reg),
9405             zr,
9406             (Assembler::Condition)$cmp$$cmpcode);
9407   %}
9408 
9409   ins_pipe(icond_reg);
9410 %}
9411 
9412 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9413   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9414 
9415   ins_cost(INSN_COST * 2);
9416   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9417 
9418   ins_encode %{
9419     __ csel(as_Register($dst$$reg),
9420             as_Register($src$$reg),
9421             zr,
9422             (Assembler::Condition)$cmp$$cmpcode);
9423   %}
9424 
9425   ins_pipe(icond_reg);
9426 %}
9427 
9428 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9429   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9430 
9431   ins_cost(INSN_COST * 2);
9432   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9433 
9434   ins_encode %{
9435     __ cselw(as_Register($dst$$reg),
9436              as_Register($src2$$reg),
9437              as_Register($src1$$reg),
9438              (Assembler::Condition)$cmp$$cmpcode);
9439   %}
9440 
9441   ins_pipe(icond_reg_reg);
9442 %}
9443 
9444 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9445   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9446 
9447   ins_cost(INSN_COST * 2);
9448   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9449 
9450   ins_encode %{
9451     __ cselw(as_Register($dst$$reg),
9452              as_Register($src2$$reg),
9453              as_Register($src1$$reg),
9454              (Assembler::Condition)$cmp$$cmpcode);
9455   %}
9456 
9457   ins_pipe(icond_reg_reg);
9458 %}
9459 
9460 // special cases where one arg is zero
9461 
9462 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9463   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9464 
9465   ins_cost(INSN_COST * 2);
9466   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9467 
9468   ins_encode %{
9469     __ cselw(as_Register($dst$$reg),
9470              zr,
9471              as_Register($src$$reg),
9472              (Assembler::Condition)$cmp$$cmpcode);
9473   %}
9474 
9475   ins_pipe(icond_reg);
9476 %}
9477 
9478 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9479   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9480 
9481   ins_cost(INSN_COST * 2);
9482   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9483 
9484   ins_encode %{
9485     __ cselw(as_Register($dst$$reg),
9486              zr,
9487              as_Register($src$$reg),
9488              (Assembler::Condition)$cmp$$cmpcode);
9489   %}
9490 
9491   ins_pipe(icond_reg);
9492 %}
9493 
9494 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9495   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9496 
9497   ins_cost(INSN_COST * 2);
9498   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9499 
9500   ins_encode %{
9501     __ cselw(as_Register($dst$$reg),
9502              as_Register($src$$reg),
9503              zr,
9504              (Assembler::Condition)$cmp$$cmpcode);
9505   %}
9506 
9507   ins_pipe(icond_reg);
9508 %}
9509 
9510 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9511   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9512 
9513   ins_cost(INSN_COST * 2);
9514   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9515 
9516   ins_encode %{
9517     __ cselw(as_Register($dst$$reg),
9518              as_Register($src$$reg),
9519              zr,
9520              (Assembler::Condition)$cmp$$cmpcode);
9521   %}
9522 
9523   ins_pipe(icond_reg);
9524 %}
9525 
9526 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9527 %{
9528   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9529 
9530   ins_cost(INSN_COST * 3);
9531 
9532   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9533   ins_encode %{
9534     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9535     __ fcsels(as_FloatRegister($dst$$reg),
9536               as_FloatRegister($src2$$reg),
9537               as_FloatRegister($src1$$reg),
9538               cond);
9539   %}
9540 
9541   ins_pipe(fp_cond_reg_reg_s);
9542 %}
9543 
9544 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9545 %{
9546   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9547 
9548   ins_cost(INSN_COST * 3);
9549 
9550   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9551   ins_encode %{
9552     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9553     __ fcsels(as_FloatRegister($dst$$reg),
9554               as_FloatRegister($src2$$reg),
9555               as_FloatRegister($src1$$reg),
9556               cond);
9557   %}
9558 
9559   ins_pipe(fp_cond_reg_reg_s);
9560 %}
9561 
9562 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9563 %{
9564   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9565 
9566   ins_cost(INSN_COST * 3);
9567 
9568   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9569   ins_encode %{
9570     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9571     __ fcseld(as_FloatRegister($dst$$reg),
9572               as_FloatRegister($src2$$reg),
9573               as_FloatRegister($src1$$reg),
9574               cond);
9575   %}
9576 
9577   ins_pipe(fp_cond_reg_reg_d);
9578 %}
9579 
9580 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9581 %{
9582   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9583 
9584   ins_cost(INSN_COST * 3);
9585 
9586   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9587   ins_encode %{
9588     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9589     __ fcseld(as_FloatRegister($dst$$reg),
9590               as_FloatRegister($src2$$reg),
9591               as_FloatRegister($src1$$reg),
9592               cond);
9593   %}
9594 
9595   ins_pipe(fp_cond_reg_reg_d);
9596 %}
9597 
9598 // ============================================================================
9599 // Arithmetic Instructions
9600 //
9601 
9602 // Integer Addition
9603 
9604 // TODO
9605 // these currently employ operations which do not set CR and hence are
9606 // not flagged as killing CR but we would like to isolate the cases
9607 // where we want to set flags from those where we don't. need to work
9608 // out how to do that.
9609 
9610 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9611   match(Set dst (AddI src1 src2));
9612 
9613   ins_cost(INSN_COST);
9614   format %{ "addw  $dst, $src1, $src2" %}
9615 
9616   ins_encode %{
9617     __ addw(as_Register($dst$$reg),
9618             as_Register($src1$$reg),
9619             as_Register($src2$$reg));
9620   %}
9621 
9622   ins_pipe(ialu_reg_reg);
9623 %}
9624 
9625 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9626   match(Set dst (AddI src1 src2));
9627 
9628   ins_cost(INSN_COST);
9629   format %{ "addw $dst, $src1, $src2" %}
9630 
9631   // use opcode to indicate that this is an add not a sub
9632   opcode(0x0);
9633 
9634   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9635 
9636   ins_pipe(ialu_reg_imm);
9637 %}
9638 
9639 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9640   match(Set dst (AddI (ConvL2I src1) src2));
9641 
9642   ins_cost(INSN_COST);
9643   format %{ "addw $dst, $src1, $src2" %}
9644 
9645   // use opcode to indicate that this is an add not a sub
9646   opcode(0x0);
9647 
9648   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9649 
9650   ins_pipe(ialu_reg_imm);
9651 %}
9652 
9653 // Pointer Addition
9654 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9655   match(Set dst (AddP src1 src2));
9656 
9657   ins_cost(INSN_COST);
9658   format %{ "add $dst, $src1, $src2\t# ptr" %}
9659 
9660   ins_encode %{
9661     __ add(as_Register($dst$$reg),
9662            as_Register($src1$$reg),
9663            as_Register($src2$$reg));
9664   %}
9665 
9666   ins_pipe(ialu_reg_reg);
9667 %}
9668 
9669 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9670   match(Set dst (AddP src1 (ConvI2L src2)));
9671 
9672   ins_cost(1.9 * INSN_COST);
9673   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9674 
9675   ins_encode %{
9676     __ add(as_Register($dst$$reg),
9677            as_Register($src1$$reg),
9678            as_Register($src2$$reg), ext::sxtw);
9679   %}
9680 
9681   ins_pipe(ialu_reg_reg);
9682 %}
9683 
9684 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9685   match(Set dst (AddP src1 (LShiftL src2 scale)));
9686 
9687   ins_cost(1.9 * INSN_COST);
9688   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9689 
9690   ins_encode %{
9691     __ lea(as_Register($dst$$reg),
9692            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9693                    Address::lsl($scale$$constant)));
9694   %}
9695 
9696   ins_pipe(ialu_reg_reg_shift);
9697 %}
9698 
9699 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9700   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9701 
9702   ins_cost(1.9 * INSN_COST);
9703   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9704 
9705   ins_encode %{
9706     __ lea(as_Register($dst$$reg),
9707            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9708                    Address::sxtw($scale$$constant)));
9709   %}
9710 
9711   ins_pipe(ialu_reg_reg_shift);
9712 %}
9713 
9714 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9715   match(Set dst (LShiftL (ConvI2L src) scale));
9716 
9717   ins_cost(INSN_COST);
9718   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9719 
9720   ins_encode %{
9721     __ sbfiz(as_Register($dst$$reg),
9722           as_Register($src$$reg),
9723           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9724   %}
9725 
9726   ins_pipe(ialu_reg_shift);
9727 %}
9728 
9729 // Pointer Immediate Addition
9730 // n.b. this needs to be more expensive than using an indirect memory
9731 // operand
9732 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9733   match(Set dst (AddP src1 src2));
9734 
9735   ins_cost(INSN_COST);
9736   format %{ "add $dst, $src1, $src2\t# ptr" %}
9737 
9738   // use opcode to indicate that this is an add not a sub
9739   opcode(0x0);
9740 
9741   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9742 
9743   ins_pipe(ialu_reg_imm);
9744 %}
9745 
9746 // Long Addition
9747 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9748 
9749   match(Set dst (AddL src1 src2));
9750 
9751   ins_cost(INSN_COST);
9752   format %{ "add  $dst, $src1, $src2" %}
9753 
9754   ins_encode %{
9755     __ add(as_Register($dst$$reg),
9756            as_Register($src1$$reg),
9757            as_Register($src2$$reg));
9758   %}
9759 
9760   ins_pipe(ialu_reg_reg);
9761 %}
9762 
9763 // No constant pool entries requiredLong Immediate Addition.
9764 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9765   match(Set dst (AddL src1 src2));
9766 
9767   ins_cost(INSN_COST);
9768   format %{ "add $dst, $src1, $src2" %}
9769 
9770   // use opcode to indicate that this is an add not a sub
9771   opcode(0x0);
9772 
9773   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9774 
9775   ins_pipe(ialu_reg_imm);
9776 %}
9777 
9778 // Integer Subtraction
9779 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9780   match(Set dst (SubI src1 src2));
9781 
9782   ins_cost(INSN_COST);
9783   format %{ "subw  $dst, $src1, $src2" %}
9784 
9785   ins_encode %{
9786     __ subw(as_Register($dst$$reg),
9787             as_Register($src1$$reg),
9788             as_Register($src2$$reg));
9789   %}
9790 
9791   ins_pipe(ialu_reg_reg);
9792 %}
9793 
9794 // Immediate Subtraction
9795 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9796   match(Set dst (SubI src1 src2));
9797 
9798   ins_cost(INSN_COST);
9799   format %{ "subw $dst, $src1, $src2" %}
9800 
9801   // use opcode to indicate that this is a sub not an add
9802   opcode(0x1);
9803 
9804   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9805 
9806   ins_pipe(ialu_reg_imm);
9807 %}
9808 
9809 // Long Subtraction
9810 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9811 
9812   match(Set dst (SubL src1 src2));
9813 
9814   ins_cost(INSN_COST);
9815   format %{ "sub  $dst, $src1, $src2" %}
9816 
9817   ins_encode %{
9818     __ sub(as_Register($dst$$reg),
9819            as_Register($src1$$reg),
9820            as_Register($src2$$reg));
9821   %}
9822 
9823   ins_pipe(ialu_reg_reg);
9824 %}
9825 
9826 // No constant pool entries requiredLong Immediate Subtraction.
9827 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9828   match(Set dst (SubL src1 src2));
9829 
9830   ins_cost(INSN_COST);
9831   format %{ "sub$dst, $src1, $src2" %}
9832 
9833   // use opcode to indicate that this is a sub not an add
9834   opcode(0x1);
9835 
9836   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9837 
9838   ins_pipe(ialu_reg_imm);
9839 %}
9840 
9841 // Integer Negation (special case for sub)
9842 
9843 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9844   match(Set dst (SubI zero src));
9845 
9846   ins_cost(INSN_COST);
9847   format %{ "negw $dst, $src\t# int" %}
9848 
9849   ins_encode %{
9850     __ negw(as_Register($dst$$reg),
9851             as_Register($src$$reg));
9852   %}
9853 
9854   ins_pipe(ialu_reg);
9855 %}
9856 
9857 // Long Negation
9858 
9859 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
9860   match(Set dst (SubL zero src));
9861 
9862   ins_cost(INSN_COST);
9863   format %{ "neg $dst, $src\t# long" %}
9864 
9865   ins_encode %{
9866     __ neg(as_Register($dst$$reg),
9867            as_Register($src$$reg));
9868   %}
9869 
9870   ins_pipe(ialu_reg);
9871 %}
9872 
9873 // Integer Multiply
9874 
9875 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9876   match(Set dst (MulI src1 src2));
9877 
9878   ins_cost(INSN_COST * 3);
9879   format %{ "mulw  $dst, $src1, $src2" %}
9880 
9881   ins_encode %{
9882     __ mulw(as_Register($dst$$reg),
9883             as_Register($src1$$reg),
9884             as_Register($src2$$reg));
9885   %}
9886 
9887   ins_pipe(imul_reg_reg);
9888 %}
9889 
9890 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9891   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9892 
9893   ins_cost(INSN_COST * 3);
9894   format %{ "smull  $dst, $src1, $src2" %}
9895 
9896   ins_encode %{
9897     __ smull(as_Register($dst$$reg),
9898              as_Register($src1$$reg),
9899              as_Register($src2$$reg));
9900   %}
9901 
9902   ins_pipe(imul_reg_reg);
9903 %}
9904 
9905 // Long Multiply
9906 
9907 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9908   match(Set dst (MulL src1 src2));
9909 
9910   ins_cost(INSN_COST * 5);
9911   format %{ "mul  $dst, $src1, $src2" %}
9912 
9913   ins_encode %{
9914     __ mul(as_Register($dst$$reg),
9915            as_Register($src1$$reg),
9916            as_Register($src2$$reg));
9917   %}
9918 
9919   ins_pipe(lmul_reg_reg);
9920 %}
9921 
9922 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9923 %{
9924   match(Set dst (MulHiL src1 src2));
9925 
9926   ins_cost(INSN_COST * 7);
9927   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9928 
9929   ins_encode %{
9930     __ smulh(as_Register($dst$$reg),
9931              as_Register($src1$$reg),
9932              as_Register($src2$$reg));
9933   %}
9934 
9935   ins_pipe(lmul_reg_reg);
9936 %}
9937 
9938 // Combined Integer Multiply & Add/Sub
9939 
9940 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9941   match(Set dst (AddI src3 (MulI src1 src2)));
9942 
9943   ins_cost(INSN_COST * 3);
9944   format %{ "madd  $dst, $src1, $src2, $src3" %}
9945 
9946   ins_encode %{
9947     __ maddw(as_Register($dst$$reg),
9948              as_Register($src1$$reg),
9949              as_Register($src2$$reg),
9950              as_Register($src3$$reg));
9951   %}
9952 
9953   ins_pipe(imac_reg_reg);
9954 %}
9955 
9956 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9957   match(Set dst (SubI src3 (MulI src1 src2)));
9958 
9959   ins_cost(INSN_COST * 3);
9960   format %{ "msub  $dst, $src1, $src2, $src3" %}
9961 
9962   ins_encode %{
9963     __ msubw(as_Register($dst$$reg),
9964              as_Register($src1$$reg),
9965              as_Register($src2$$reg),
9966              as_Register($src3$$reg));
9967   %}
9968 
9969   ins_pipe(imac_reg_reg);
9970 %}
9971 
9972 // Combined Integer Multiply & Neg
9973 
9974 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
9975   match(Set dst (MulI (SubI zero src1) src2));
9976   match(Set dst (MulI src1 (SubI zero src2)));
9977 
9978   ins_cost(INSN_COST * 3);
9979   format %{ "mneg  $dst, $src1, $src2" %}
9980 
9981   ins_encode %{
9982     __ mnegw(as_Register($dst$$reg),
9983              as_Register($src1$$reg),
9984              as_Register($src2$$reg));
9985   %}
9986 
9987   ins_pipe(imac_reg_reg);
9988 %}
9989 
9990 // Combined Long Multiply & Add/Sub
9991 
9992 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9993   match(Set dst (AddL src3 (MulL src1 src2)));
9994 
9995   ins_cost(INSN_COST * 5);
9996   format %{ "madd  $dst, $src1, $src2, $src3" %}
9997 
9998   ins_encode %{
9999     __ madd(as_Register($dst$$reg),
10000             as_Register($src1$$reg),
10001             as_Register($src2$$reg),
10002             as_Register($src3$$reg));
10003   %}
10004 
10005   ins_pipe(lmac_reg_reg);
10006 %}
10007 
10008 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10009   match(Set dst (SubL src3 (MulL src1 src2)));
10010 
10011   ins_cost(INSN_COST * 5);
10012   format %{ "msub  $dst, $src1, $src2, $src3" %}
10013 
10014   ins_encode %{
10015     __ msub(as_Register($dst$$reg),
10016             as_Register($src1$$reg),
10017             as_Register($src2$$reg),
10018             as_Register($src3$$reg));
10019   %}
10020 
10021   ins_pipe(lmac_reg_reg);
10022 %}
10023 
10024 // Combined Long Multiply & Neg
10025 
10026 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10027   match(Set dst (MulL (SubL zero src1) src2));
10028   match(Set dst (MulL src1 (SubL zero src2)));
10029 
10030   ins_cost(INSN_COST * 5);
10031   format %{ "mneg  $dst, $src1, $src2" %}
10032 
10033   ins_encode %{
10034     __ mneg(as_Register($dst$$reg),
10035             as_Register($src1$$reg),
10036             as_Register($src2$$reg));
10037   %}
10038 
10039   ins_pipe(lmac_reg_reg);
10040 %}
10041 
10042 // Integer Divide
10043 
10044 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10045   match(Set dst (DivI src1 src2));
10046 
10047   ins_cost(INSN_COST * 19);
10048   format %{ "sdivw  $dst, $src1, $src2" %}
10049 
10050   ins_encode(aarch64_enc_divw(dst, src1, src2));
10051   ins_pipe(idiv_reg_reg);
10052 %}
10053 
10054 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10055   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10056   ins_cost(INSN_COST);
10057   format %{ "lsrw $dst, $src1, $div1" %}
10058   ins_encode %{
10059     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10060   %}
10061   ins_pipe(ialu_reg_shift);
10062 %}
10063 
10064 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10065   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10066   ins_cost(INSN_COST);
10067   format %{ "addw $dst, $src, LSR $div1" %}
10068 
10069   ins_encode %{
10070     __ addw(as_Register($dst$$reg),
10071               as_Register($src$$reg),
10072               as_Register($src$$reg),
10073               Assembler::LSR, 31);
10074   %}
10075   ins_pipe(ialu_reg);
10076 %}
10077 
10078 // Long Divide
10079 
10080 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10081   match(Set dst (DivL src1 src2));
10082 
10083   ins_cost(INSN_COST * 35);
10084   format %{ "sdiv   $dst, $src1, $src2" %}
10085 
10086   ins_encode(aarch64_enc_div(dst, src1, src2));
10087   ins_pipe(ldiv_reg_reg);
10088 %}
10089 
10090 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10091   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10092   ins_cost(INSN_COST);
10093   format %{ "lsr $dst, $src1, $div1" %}
10094   ins_encode %{
10095     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10096   %}
10097   ins_pipe(ialu_reg_shift);
10098 %}
10099 
10100 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10101   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10102   ins_cost(INSN_COST);
10103   format %{ "add $dst, $src, $div1" %}
10104 
10105   ins_encode %{
10106     __ add(as_Register($dst$$reg),
10107               as_Register($src$$reg),
10108               as_Register($src$$reg),
10109               Assembler::LSR, 63);
10110   %}
10111   ins_pipe(ialu_reg);
10112 %}
10113 
10114 // Integer Remainder
10115 
10116 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10117   match(Set dst (ModI src1 src2));
10118 
10119   ins_cost(INSN_COST * 22);
10120   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10121             "msubw($dst, rscratch1, $src2, $src1" %}
10122 
10123   ins_encode(aarch64_enc_modw(dst, src1, src2));
10124   ins_pipe(idiv_reg_reg);
10125 %}
10126 
10127 // Long Remainder
10128 
10129 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10130   match(Set dst (ModL src1 src2));
10131 
10132   ins_cost(INSN_COST * 38);
10133   format %{ "sdiv   rscratch1, $src1, $src2\n"
10134             "msub($dst, rscratch1, $src2, $src1" %}
10135 
10136   ins_encode(aarch64_enc_mod(dst, src1, src2));
10137   ins_pipe(ldiv_reg_reg);
10138 %}
10139 
10140 // Integer Shifts
10141 
10142 // Shift Left Register
10143 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10144   match(Set dst (LShiftI src1 src2));
10145 
10146   ins_cost(INSN_COST * 2);
10147   format %{ "lslvw  $dst, $src1, $src2" %}
10148 
10149   ins_encode %{
10150     __ lslvw(as_Register($dst$$reg),
10151              as_Register($src1$$reg),
10152              as_Register($src2$$reg));
10153   %}
10154 
10155   ins_pipe(ialu_reg_reg_vshift);
10156 %}
10157 
10158 // Shift Left Immediate
10159 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10160   match(Set dst (LShiftI src1 src2));
10161 
10162   ins_cost(INSN_COST);
10163   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10164 
10165   ins_encode %{
10166     __ lslw(as_Register($dst$$reg),
10167             as_Register($src1$$reg),
10168             $src2$$constant & 0x1f);
10169   %}
10170 
10171   ins_pipe(ialu_reg_shift);
10172 %}
10173 
10174 // Shift Right Logical Register
10175 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10176   match(Set dst (URShiftI src1 src2));
10177 
10178   ins_cost(INSN_COST * 2);
10179   format %{ "lsrvw  $dst, $src1, $src2" %}
10180 
10181   ins_encode %{
10182     __ lsrvw(as_Register($dst$$reg),
10183              as_Register($src1$$reg),
10184              as_Register($src2$$reg));
10185   %}
10186 
10187   ins_pipe(ialu_reg_reg_vshift);
10188 %}
10189 
10190 // Shift Right Logical Immediate
10191 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10192   match(Set dst (URShiftI src1 src2));
10193 
10194   ins_cost(INSN_COST);
10195   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10196 
10197   ins_encode %{
10198     __ lsrw(as_Register($dst$$reg),
10199             as_Register($src1$$reg),
10200             $src2$$constant & 0x1f);
10201   %}
10202 
10203   ins_pipe(ialu_reg_shift);
10204 %}
10205 
10206 // Shift Right Arithmetic Register
10207 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10208   match(Set dst (RShiftI src1 src2));
10209 
10210   ins_cost(INSN_COST * 2);
10211   format %{ "asrvw  $dst, $src1, $src2" %}
10212 
10213   ins_encode %{
10214     __ asrvw(as_Register($dst$$reg),
10215              as_Register($src1$$reg),
10216              as_Register($src2$$reg));
10217   %}
10218 
10219   ins_pipe(ialu_reg_reg_vshift);
10220 %}
10221 
10222 // Shift Right Arithmetic Immediate
10223 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10224   match(Set dst (RShiftI src1 src2));
10225 
10226   ins_cost(INSN_COST);
10227   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10228 
10229   ins_encode %{
10230     __ asrw(as_Register($dst$$reg),
10231             as_Register($src1$$reg),
10232             $src2$$constant & 0x1f);
10233   %}
10234 
10235   ins_pipe(ialu_reg_shift);
10236 %}
10237 
10238 // Combined Int Mask and Right Shift (using UBFM)
10239 // TODO
10240 
10241 // Long Shifts
10242 
10243 // Shift Left Register
10244 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10245   match(Set dst (LShiftL src1 src2));
10246 
10247   ins_cost(INSN_COST * 2);
10248   format %{ "lslv  $dst, $src1, $src2" %}
10249 
10250   ins_encode %{
10251     __ lslv(as_Register($dst$$reg),
10252             as_Register($src1$$reg),
10253             as_Register($src2$$reg));
10254   %}
10255 
10256   ins_pipe(ialu_reg_reg_vshift);
10257 %}
10258 
10259 // Shift Left Immediate
10260 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10261   match(Set dst (LShiftL src1 src2));
10262 
10263   ins_cost(INSN_COST);
10264   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10265 
10266   ins_encode %{
10267     __ lsl(as_Register($dst$$reg),
10268             as_Register($src1$$reg),
10269             $src2$$constant & 0x3f);
10270   %}
10271 
10272   ins_pipe(ialu_reg_shift);
10273 %}
10274 
10275 // Shift Right Logical Register
10276 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10277   match(Set dst (URShiftL src1 src2));
10278 
10279   ins_cost(INSN_COST * 2);
10280   format %{ "lsrv  $dst, $src1, $src2" %}
10281 
10282   ins_encode %{
10283     __ lsrv(as_Register($dst$$reg),
10284             as_Register($src1$$reg),
10285             as_Register($src2$$reg));
10286   %}
10287 
10288   ins_pipe(ialu_reg_reg_vshift);
10289 %}
10290 
10291 // Shift Right Logical Immediate
10292 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10293   match(Set dst (URShiftL src1 src2));
10294 
10295   ins_cost(INSN_COST);
10296   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10297 
10298   ins_encode %{
10299     __ lsr(as_Register($dst$$reg),
10300            as_Register($src1$$reg),
10301            $src2$$constant & 0x3f);
10302   %}
10303 
10304   ins_pipe(ialu_reg_shift);
10305 %}
10306 
10307 // A special-case pattern for card table stores.
10308 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10309   match(Set dst (URShiftL (CastP2X src1) src2));
10310 
10311   ins_cost(INSN_COST);
10312   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10313 
10314   ins_encode %{
10315     __ lsr(as_Register($dst$$reg),
10316            as_Register($src1$$reg),
10317            $src2$$constant & 0x3f);
10318   %}
10319 
10320   ins_pipe(ialu_reg_shift);
10321 %}
10322 
10323 // Shift Right Arithmetic Register
10324 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10325   match(Set dst (RShiftL src1 src2));
10326 
10327   ins_cost(INSN_COST * 2);
10328   format %{ "asrv  $dst, $src1, $src2" %}
10329 
10330   ins_encode %{
10331     __ asrv(as_Register($dst$$reg),
10332             as_Register($src1$$reg),
10333             as_Register($src2$$reg));
10334   %}
10335 
10336   ins_pipe(ialu_reg_reg_vshift);
10337 %}
10338 
10339 // Shift Right Arithmetic Immediate
10340 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10341   match(Set dst (RShiftL src1 src2));
10342 
10343   ins_cost(INSN_COST);
10344   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10345 
10346   ins_encode %{
10347     __ asr(as_Register($dst$$reg),
10348            as_Register($src1$$reg),
10349            $src2$$constant & 0x3f);
10350   %}
10351 
10352   ins_pipe(ialu_reg_shift);
10353 %}
10354 
10355 // BEGIN This section of the file is automatically generated. Do not edit --------------
10356 
10357 instruct regL_not_reg(iRegLNoSp dst,
10358                          iRegL src1, immL_M1 m1,
10359                          rFlagsReg cr) %{
10360   match(Set dst (XorL src1 m1));
10361   ins_cost(INSN_COST);
10362   format %{ "eon  $dst, $src1, zr" %}
10363 
10364   ins_encode %{
10365     __ eon(as_Register($dst$$reg),
10366               as_Register($src1$$reg),
10367               zr,
10368               Assembler::LSL, 0);
10369   %}
10370 
10371   ins_pipe(ialu_reg);
10372 %}
10373 instruct regI_not_reg(iRegINoSp dst,
10374                          iRegIorL2I src1, immI_M1 m1,
10375                          rFlagsReg cr) %{
10376   match(Set dst (XorI src1 m1));
10377   ins_cost(INSN_COST);
10378   format %{ "eonw  $dst, $src1, zr" %}
10379 
10380   ins_encode %{
10381     __ eonw(as_Register($dst$$reg),
10382               as_Register($src1$$reg),
10383               zr,
10384               Assembler::LSL, 0);
10385   %}
10386 
10387   ins_pipe(ialu_reg);
10388 %}
10389 
10390 instruct AndI_reg_not_reg(iRegINoSp dst,
10391                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10392                          rFlagsReg cr) %{
10393   match(Set dst (AndI src1 (XorI src2 m1)));
10394   ins_cost(INSN_COST);
10395   format %{ "bicw  $dst, $src1, $src2" %}
10396 
10397   ins_encode %{
10398     __ bicw(as_Register($dst$$reg),
10399               as_Register($src1$$reg),
10400               as_Register($src2$$reg),
10401               Assembler::LSL, 0);
10402   %}
10403 
10404   ins_pipe(ialu_reg_reg);
10405 %}
10406 
10407 instruct AndL_reg_not_reg(iRegLNoSp dst,
10408                          iRegL src1, iRegL src2, immL_M1 m1,
10409                          rFlagsReg cr) %{
10410   match(Set dst (AndL src1 (XorL src2 m1)));
10411   ins_cost(INSN_COST);
10412   format %{ "bic  $dst, $src1, $src2" %}
10413 
10414   ins_encode %{
10415     __ bic(as_Register($dst$$reg),
10416               as_Register($src1$$reg),
10417               as_Register($src2$$reg),
10418               Assembler::LSL, 0);
10419   %}
10420 
10421   ins_pipe(ialu_reg_reg);
10422 %}
10423 
10424 instruct OrI_reg_not_reg(iRegINoSp dst,
10425                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10426                          rFlagsReg cr) %{
10427   match(Set dst (OrI src1 (XorI src2 m1)));
10428   ins_cost(INSN_COST);
10429   format %{ "ornw  $dst, $src1, $src2" %}
10430 
10431   ins_encode %{
10432     __ ornw(as_Register($dst$$reg),
10433               as_Register($src1$$reg),
10434               as_Register($src2$$reg),
10435               Assembler::LSL, 0);
10436   %}
10437 
10438   ins_pipe(ialu_reg_reg);
10439 %}
10440 
10441 instruct OrL_reg_not_reg(iRegLNoSp dst,
10442                          iRegL src1, iRegL src2, immL_M1 m1,
10443                          rFlagsReg cr) %{
10444   match(Set dst (OrL src1 (XorL src2 m1)));
10445   ins_cost(INSN_COST);
10446   format %{ "orn  $dst, $src1, $src2" %}
10447 
10448   ins_encode %{
10449     __ orn(as_Register($dst$$reg),
10450               as_Register($src1$$reg),
10451               as_Register($src2$$reg),
10452               Assembler::LSL, 0);
10453   %}
10454 
10455   ins_pipe(ialu_reg_reg);
10456 %}
10457 
10458 instruct XorI_reg_not_reg(iRegINoSp dst,
10459                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10460                          rFlagsReg cr) %{
10461   match(Set dst (XorI m1 (XorI src2 src1)));
10462   ins_cost(INSN_COST);
10463   format %{ "eonw  $dst, $src1, $src2" %}
10464 
10465   ins_encode %{
10466     __ eonw(as_Register($dst$$reg),
10467               as_Register($src1$$reg),
10468               as_Register($src2$$reg),
10469               Assembler::LSL, 0);
10470   %}
10471 
10472   ins_pipe(ialu_reg_reg);
10473 %}
10474 
10475 instruct XorL_reg_not_reg(iRegLNoSp dst,
10476                          iRegL src1, iRegL src2, immL_M1 m1,
10477                          rFlagsReg cr) %{
10478   match(Set dst (XorL m1 (XorL src2 src1)));
10479   ins_cost(INSN_COST);
10480   format %{ "eon  $dst, $src1, $src2" %}
10481 
10482   ins_encode %{
10483     __ eon(as_Register($dst$$reg),
10484               as_Register($src1$$reg),
10485               as_Register($src2$$reg),
10486               Assembler::LSL, 0);
10487   %}
10488 
10489   ins_pipe(ialu_reg_reg);
10490 %}
10491 
10492 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10493                          iRegIorL2I src1, iRegIorL2I src2,
10494                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10495   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10496   ins_cost(1.9 * INSN_COST);
10497   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10498 
10499   ins_encode %{
10500     __ bicw(as_Register($dst$$reg),
10501               as_Register($src1$$reg),
10502               as_Register($src2$$reg),
10503               Assembler::LSR,
10504               $src3$$constant & 0x1f);
10505   %}
10506 
10507   ins_pipe(ialu_reg_reg_shift);
10508 %}
10509 
10510 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10511                          iRegL src1, iRegL src2,
10512                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10513   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10514   ins_cost(1.9 * INSN_COST);
10515   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10516 
10517   ins_encode %{
10518     __ bic(as_Register($dst$$reg),
10519               as_Register($src1$$reg),
10520               as_Register($src2$$reg),
10521               Assembler::LSR,
10522               $src3$$constant & 0x3f);
10523   %}
10524 
10525   ins_pipe(ialu_reg_reg_shift);
10526 %}
10527 
10528 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10529                          iRegIorL2I src1, iRegIorL2I src2,
10530                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10531   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10532   ins_cost(1.9 * INSN_COST);
10533   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10534 
10535   ins_encode %{
10536     __ bicw(as_Register($dst$$reg),
10537               as_Register($src1$$reg),
10538               as_Register($src2$$reg),
10539               Assembler::ASR,
10540               $src3$$constant & 0x1f);
10541   %}
10542 
10543   ins_pipe(ialu_reg_reg_shift);
10544 %}
10545 
10546 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10547                          iRegL src1, iRegL src2,
10548                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10549   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10550   ins_cost(1.9 * INSN_COST);
10551   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10552 
10553   ins_encode %{
10554     __ bic(as_Register($dst$$reg),
10555               as_Register($src1$$reg),
10556               as_Register($src2$$reg),
10557               Assembler::ASR,
10558               $src3$$constant & 0x3f);
10559   %}
10560 
10561   ins_pipe(ialu_reg_reg_shift);
10562 %}
10563 
10564 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10565                          iRegIorL2I src1, iRegIorL2I src2,
10566                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10567   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10568   ins_cost(1.9 * INSN_COST);
10569   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10570 
10571   ins_encode %{
10572     __ bicw(as_Register($dst$$reg),
10573               as_Register($src1$$reg),
10574               as_Register($src2$$reg),
10575               Assembler::LSL,
10576               $src3$$constant & 0x1f);
10577   %}
10578 
10579   ins_pipe(ialu_reg_reg_shift);
10580 %}
10581 
10582 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10583                          iRegL src1, iRegL src2,
10584                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10585   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10586   ins_cost(1.9 * INSN_COST);
10587   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10588 
10589   ins_encode %{
10590     __ bic(as_Register($dst$$reg),
10591               as_Register($src1$$reg),
10592               as_Register($src2$$reg),
10593               Assembler::LSL,
10594               $src3$$constant & 0x3f);
10595   %}
10596 
10597   ins_pipe(ialu_reg_reg_shift);
10598 %}
10599 
10600 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10601                          iRegIorL2I src1, iRegIorL2I src2,
10602                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10603   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10604   ins_cost(1.9 * INSN_COST);
10605   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10606 
10607   ins_encode %{
10608     __ eonw(as_Register($dst$$reg),
10609               as_Register($src1$$reg),
10610               as_Register($src2$$reg),
10611               Assembler::LSR,
10612               $src3$$constant & 0x1f);
10613   %}
10614 
10615   ins_pipe(ialu_reg_reg_shift);
10616 %}
10617 
10618 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10619                          iRegL src1, iRegL src2,
10620                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10621   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10622   ins_cost(1.9 * INSN_COST);
10623   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10624 
10625   ins_encode %{
10626     __ eon(as_Register($dst$$reg),
10627               as_Register($src1$$reg),
10628               as_Register($src2$$reg),
10629               Assembler::LSR,
10630               $src3$$constant & 0x3f);
10631   %}
10632 
10633   ins_pipe(ialu_reg_reg_shift);
10634 %}
10635 
10636 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10637                          iRegIorL2I src1, iRegIorL2I src2,
10638                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10639   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10640   ins_cost(1.9 * INSN_COST);
10641   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10642 
10643   ins_encode %{
10644     __ eonw(as_Register($dst$$reg),
10645               as_Register($src1$$reg),
10646               as_Register($src2$$reg),
10647               Assembler::ASR,
10648               $src3$$constant & 0x1f);
10649   %}
10650 
10651   ins_pipe(ialu_reg_reg_shift);
10652 %}
10653 
10654 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10655                          iRegL src1, iRegL src2,
10656                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10657   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10658   ins_cost(1.9 * INSN_COST);
10659   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10660 
10661   ins_encode %{
10662     __ eon(as_Register($dst$$reg),
10663               as_Register($src1$$reg),
10664               as_Register($src2$$reg),
10665               Assembler::ASR,
10666               $src3$$constant & 0x3f);
10667   %}
10668 
10669   ins_pipe(ialu_reg_reg_shift);
10670 %}
10671 
10672 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10673                          iRegIorL2I src1, iRegIorL2I src2,
10674                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10675   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10676   ins_cost(1.9 * INSN_COST);
10677   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10678 
10679   ins_encode %{
10680     __ eonw(as_Register($dst$$reg),
10681               as_Register($src1$$reg),
10682               as_Register($src2$$reg),
10683               Assembler::LSL,
10684               $src3$$constant & 0x1f);
10685   %}
10686 
10687   ins_pipe(ialu_reg_reg_shift);
10688 %}
10689 
10690 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10691                          iRegL src1, iRegL src2,
10692                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10693   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10694   ins_cost(1.9 * INSN_COST);
10695   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10696 
10697   ins_encode %{
10698     __ eon(as_Register($dst$$reg),
10699               as_Register($src1$$reg),
10700               as_Register($src2$$reg),
10701               Assembler::LSL,
10702               $src3$$constant & 0x3f);
10703   %}
10704 
10705   ins_pipe(ialu_reg_reg_shift);
10706 %}
10707 
10708 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10709                          iRegIorL2I src1, iRegIorL2I src2,
10710                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10711   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10712   ins_cost(1.9 * INSN_COST);
10713   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10714 
10715   ins_encode %{
10716     __ ornw(as_Register($dst$$reg),
10717               as_Register($src1$$reg),
10718               as_Register($src2$$reg),
10719               Assembler::LSR,
10720               $src3$$constant & 0x1f);
10721   %}
10722 
10723   ins_pipe(ialu_reg_reg_shift);
10724 %}
10725 
10726 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10727                          iRegL src1, iRegL src2,
10728                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10729   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10730   ins_cost(1.9 * INSN_COST);
10731   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10732 
10733   ins_encode %{
10734     __ orn(as_Register($dst$$reg),
10735               as_Register($src1$$reg),
10736               as_Register($src2$$reg),
10737               Assembler::LSR,
10738               $src3$$constant & 0x3f);
10739   %}
10740 
10741   ins_pipe(ialu_reg_reg_shift);
10742 %}
10743 
10744 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10745                          iRegIorL2I src1, iRegIorL2I src2,
10746                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10747   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10748   ins_cost(1.9 * INSN_COST);
10749   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10750 
10751   ins_encode %{
10752     __ ornw(as_Register($dst$$reg),
10753               as_Register($src1$$reg),
10754               as_Register($src2$$reg),
10755               Assembler::ASR,
10756               $src3$$constant & 0x1f);
10757   %}
10758 
10759   ins_pipe(ialu_reg_reg_shift);
10760 %}
10761 
10762 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10763                          iRegL src1, iRegL src2,
10764                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10765   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10766   ins_cost(1.9 * INSN_COST);
10767   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10768 
10769   ins_encode %{
10770     __ orn(as_Register($dst$$reg),
10771               as_Register($src1$$reg),
10772               as_Register($src2$$reg),
10773               Assembler::ASR,
10774               $src3$$constant & 0x3f);
10775   %}
10776 
10777   ins_pipe(ialu_reg_reg_shift);
10778 %}
10779 
10780 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10781                          iRegIorL2I src1, iRegIorL2I src2,
10782                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10783   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10784   ins_cost(1.9 * INSN_COST);
10785   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10786 
10787   ins_encode %{
10788     __ ornw(as_Register($dst$$reg),
10789               as_Register($src1$$reg),
10790               as_Register($src2$$reg),
10791               Assembler::LSL,
10792               $src3$$constant & 0x1f);
10793   %}
10794 
10795   ins_pipe(ialu_reg_reg_shift);
10796 %}
10797 
10798 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10799                          iRegL src1, iRegL src2,
10800                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10801   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10802   ins_cost(1.9 * INSN_COST);
10803   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10804 
10805   ins_encode %{
10806     __ orn(as_Register($dst$$reg),
10807               as_Register($src1$$reg),
10808               as_Register($src2$$reg),
10809               Assembler::LSL,
10810               $src3$$constant & 0x3f);
10811   %}
10812 
10813   ins_pipe(ialu_reg_reg_shift);
10814 %}
10815 
10816 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10817                          iRegIorL2I src1, iRegIorL2I src2,
10818                          immI src3, rFlagsReg cr) %{
10819   match(Set dst (AndI src1 (URShiftI src2 src3)));
10820 
10821   ins_cost(1.9 * INSN_COST);
10822   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10823 
10824   ins_encode %{
10825     __ andw(as_Register($dst$$reg),
10826               as_Register($src1$$reg),
10827               as_Register($src2$$reg),
10828               Assembler::LSR,
10829               $src3$$constant & 0x1f);
10830   %}
10831 
10832   ins_pipe(ialu_reg_reg_shift);
10833 %}
10834 
10835 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10836                          iRegL src1, iRegL src2,
10837                          immI src3, rFlagsReg cr) %{
10838   match(Set dst (AndL src1 (URShiftL src2 src3)));
10839 
10840   ins_cost(1.9 * INSN_COST);
10841   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10842 
10843   ins_encode %{
10844     __ andr(as_Register($dst$$reg),
10845               as_Register($src1$$reg),
10846               as_Register($src2$$reg),
10847               Assembler::LSR,
10848               $src3$$constant & 0x3f);
10849   %}
10850 
10851   ins_pipe(ialu_reg_reg_shift);
10852 %}
10853 
10854 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10855                          iRegIorL2I src1, iRegIorL2I src2,
10856                          immI src3, rFlagsReg cr) %{
10857   match(Set dst (AndI src1 (RShiftI src2 src3)));
10858 
10859   ins_cost(1.9 * INSN_COST);
10860   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10861 
10862   ins_encode %{
10863     __ andw(as_Register($dst$$reg),
10864               as_Register($src1$$reg),
10865               as_Register($src2$$reg),
10866               Assembler::ASR,
10867               $src3$$constant & 0x1f);
10868   %}
10869 
10870   ins_pipe(ialu_reg_reg_shift);
10871 %}
10872 
10873 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10874                          iRegL src1, iRegL src2,
10875                          immI src3, rFlagsReg cr) %{
10876   match(Set dst (AndL src1 (RShiftL src2 src3)));
10877 
10878   ins_cost(1.9 * INSN_COST);
10879   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10880 
10881   ins_encode %{
10882     __ andr(as_Register($dst$$reg),
10883               as_Register($src1$$reg),
10884               as_Register($src2$$reg),
10885               Assembler::ASR,
10886               $src3$$constant & 0x3f);
10887   %}
10888 
10889   ins_pipe(ialu_reg_reg_shift);
10890 %}
10891 
10892 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10893                          iRegIorL2I src1, iRegIorL2I src2,
10894                          immI src3, rFlagsReg cr) %{
10895   match(Set dst (AndI src1 (LShiftI src2 src3)));
10896 
10897   ins_cost(1.9 * INSN_COST);
10898   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10899 
10900   ins_encode %{
10901     __ andw(as_Register($dst$$reg),
10902               as_Register($src1$$reg),
10903               as_Register($src2$$reg),
10904               Assembler::LSL,
10905               $src3$$constant & 0x1f);
10906   %}
10907 
10908   ins_pipe(ialu_reg_reg_shift);
10909 %}
10910 
10911 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10912                          iRegL src1, iRegL src2,
10913                          immI src3, rFlagsReg cr) %{
10914   match(Set dst (AndL src1 (LShiftL src2 src3)));
10915 
10916   ins_cost(1.9 * INSN_COST);
10917   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10918 
10919   ins_encode %{
10920     __ andr(as_Register($dst$$reg),
10921               as_Register($src1$$reg),
10922               as_Register($src2$$reg),
10923               Assembler::LSL,
10924               $src3$$constant & 0x3f);
10925   %}
10926 
10927   ins_pipe(ialu_reg_reg_shift);
10928 %}
10929 
10930 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10931                          iRegIorL2I src1, iRegIorL2I src2,
10932                          immI src3, rFlagsReg cr) %{
10933   match(Set dst (XorI src1 (URShiftI src2 src3)));
10934 
10935   ins_cost(1.9 * INSN_COST);
10936   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10937 
10938   ins_encode %{
10939     __ eorw(as_Register($dst$$reg),
10940               as_Register($src1$$reg),
10941               as_Register($src2$$reg),
10942               Assembler::LSR,
10943               $src3$$constant & 0x1f);
10944   %}
10945 
10946   ins_pipe(ialu_reg_reg_shift);
10947 %}
10948 
10949 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10950                          iRegL src1, iRegL src2,
10951                          immI src3, rFlagsReg cr) %{
10952   match(Set dst (XorL src1 (URShiftL src2 src3)));
10953 
10954   ins_cost(1.9 * INSN_COST);
10955   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10956 
10957   ins_encode %{
10958     __ eor(as_Register($dst$$reg),
10959               as_Register($src1$$reg),
10960               as_Register($src2$$reg),
10961               Assembler::LSR,
10962               $src3$$constant & 0x3f);
10963   %}
10964 
10965   ins_pipe(ialu_reg_reg_shift);
10966 %}
10967 
10968 instruct XorI_reg_RShift_reg(iRegINoSp dst,
10969                          iRegIorL2I src1, iRegIorL2I src2,
10970                          immI src3, rFlagsReg cr) %{
10971   match(Set dst (XorI src1 (RShiftI src2 src3)));
10972 
10973   ins_cost(1.9 * INSN_COST);
10974   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
10975 
10976   ins_encode %{
10977     __ eorw(as_Register($dst$$reg),
10978               as_Register($src1$$reg),
10979               as_Register($src2$$reg),
10980               Assembler::ASR,
10981               $src3$$constant & 0x1f);
10982   %}
10983 
10984   ins_pipe(ialu_reg_reg_shift);
10985 %}
10986 
10987 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
10988                          iRegL src1, iRegL src2,
10989                          immI src3, rFlagsReg cr) %{
10990   match(Set dst (XorL src1 (RShiftL src2 src3)));
10991 
10992   ins_cost(1.9 * INSN_COST);
10993   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
10994 
10995   ins_encode %{
10996     __ eor(as_Register($dst$$reg),
10997               as_Register($src1$$reg),
10998               as_Register($src2$$reg),
10999               Assembler::ASR,
11000               $src3$$constant & 0x3f);
11001   %}
11002 
11003   ins_pipe(ialu_reg_reg_shift);
11004 %}
11005 
11006 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11007                          iRegIorL2I src1, iRegIorL2I src2,
11008                          immI src3, rFlagsReg cr) %{
11009   match(Set dst (XorI src1 (LShiftI src2 src3)));
11010 
11011   ins_cost(1.9 * INSN_COST);
11012   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11013 
11014   ins_encode %{
11015     __ eorw(as_Register($dst$$reg),
11016               as_Register($src1$$reg),
11017               as_Register($src2$$reg),
11018               Assembler::LSL,
11019               $src3$$constant & 0x1f);
11020   %}
11021 
11022   ins_pipe(ialu_reg_reg_shift);
11023 %}
11024 
11025 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11026                          iRegL src1, iRegL src2,
11027                          immI src3, rFlagsReg cr) %{
11028   match(Set dst (XorL src1 (LShiftL src2 src3)));
11029 
11030   ins_cost(1.9 * INSN_COST);
11031   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11032 
11033   ins_encode %{
11034     __ eor(as_Register($dst$$reg),
11035               as_Register($src1$$reg),
11036               as_Register($src2$$reg),
11037               Assembler::LSL,
11038               $src3$$constant & 0x3f);
11039   %}
11040 
11041   ins_pipe(ialu_reg_reg_shift);
11042 %}
11043 
11044 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11045                          iRegIorL2I src1, iRegIorL2I src2,
11046                          immI src3, rFlagsReg cr) %{
11047   match(Set dst (OrI src1 (URShiftI src2 src3)));
11048 
11049   ins_cost(1.9 * INSN_COST);
11050   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11051 
11052   ins_encode %{
11053     __ orrw(as_Register($dst$$reg),
11054               as_Register($src1$$reg),
11055               as_Register($src2$$reg),
11056               Assembler::LSR,
11057               $src3$$constant & 0x1f);
11058   %}
11059 
11060   ins_pipe(ialu_reg_reg_shift);
11061 %}
11062 
11063 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11064                          iRegL src1, iRegL src2,
11065                          immI src3, rFlagsReg cr) %{
11066   match(Set dst (OrL src1 (URShiftL src2 src3)));
11067 
11068   ins_cost(1.9 * INSN_COST);
11069   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11070 
11071   ins_encode %{
11072     __ orr(as_Register($dst$$reg),
11073               as_Register($src1$$reg),
11074               as_Register($src2$$reg),
11075               Assembler::LSR,
11076               $src3$$constant & 0x3f);
11077   %}
11078 
11079   ins_pipe(ialu_reg_reg_shift);
11080 %}
11081 
11082 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11083                          iRegIorL2I src1, iRegIorL2I src2,
11084                          immI src3, rFlagsReg cr) %{
11085   match(Set dst (OrI src1 (RShiftI src2 src3)));
11086 
11087   ins_cost(1.9 * INSN_COST);
11088   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11089 
11090   ins_encode %{
11091     __ orrw(as_Register($dst$$reg),
11092               as_Register($src1$$reg),
11093               as_Register($src2$$reg),
11094               Assembler::ASR,
11095               $src3$$constant & 0x1f);
11096   %}
11097 
11098   ins_pipe(ialu_reg_reg_shift);
11099 %}
11100 
11101 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11102                          iRegL src1, iRegL src2,
11103                          immI src3, rFlagsReg cr) %{
11104   match(Set dst (OrL src1 (RShiftL src2 src3)));
11105 
11106   ins_cost(1.9 * INSN_COST);
11107   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11108 
11109   ins_encode %{
11110     __ orr(as_Register($dst$$reg),
11111               as_Register($src1$$reg),
11112               as_Register($src2$$reg),
11113               Assembler::ASR,
11114               $src3$$constant & 0x3f);
11115   %}
11116 
11117   ins_pipe(ialu_reg_reg_shift);
11118 %}
11119 
11120 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11121                          iRegIorL2I src1, iRegIorL2I src2,
11122                          immI src3, rFlagsReg cr) %{
11123   match(Set dst (OrI src1 (LShiftI src2 src3)));
11124 
11125   ins_cost(1.9 * INSN_COST);
11126   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11127 
11128   ins_encode %{
11129     __ orrw(as_Register($dst$$reg),
11130               as_Register($src1$$reg),
11131               as_Register($src2$$reg),
11132               Assembler::LSL,
11133               $src3$$constant & 0x1f);
11134   %}
11135 
11136   ins_pipe(ialu_reg_reg_shift);
11137 %}
11138 
11139 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11140                          iRegL src1, iRegL src2,
11141                          immI src3, rFlagsReg cr) %{
11142   match(Set dst (OrL src1 (LShiftL src2 src3)));
11143 
11144   ins_cost(1.9 * INSN_COST);
11145   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11146 
11147   ins_encode %{
11148     __ orr(as_Register($dst$$reg),
11149               as_Register($src1$$reg),
11150               as_Register($src2$$reg),
11151               Assembler::LSL,
11152               $src3$$constant & 0x3f);
11153   %}
11154 
11155   ins_pipe(ialu_reg_reg_shift);
11156 %}
11157 
11158 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11159                          iRegIorL2I src1, iRegIorL2I src2,
11160                          immI src3, rFlagsReg cr) %{
11161   match(Set dst (AddI src1 (URShiftI src2 src3)));
11162 
11163   ins_cost(1.9 * INSN_COST);
11164   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11165 
11166   ins_encode %{
11167     __ addw(as_Register($dst$$reg),
11168               as_Register($src1$$reg),
11169               as_Register($src2$$reg),
11170               Assembler::LSR,
11171               $src3$$constant & 0x1f);
11172   %}
11173 
11174   ins_pipe(ialu_reg_reg_shift);
11175 %}
11176 
11177 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11178                          iRegL src1, iRegL src2,
11179                          immI src3, rFlagsReg cr) %{
11180   match(Set dst (AddL src1 (URShiftL src2 src3)));
11181 
11182   ins_cost(1.9 * INSN_COST);
11183   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11184 
11185   ins_encode %{
11186     __ add(as_Register($dst$$reg),
11187               as_Register($src1$$reg),
11188               as_Register($src2$$reg),
11189               Assembler::LSR,
11190               $src3$$constant & 0x3f);
11191   %}
11192 
11193   ins_pipe(ialu_reg_reg_shift);
11194 %}
11195 
11196 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11197                          iRegIorL2I src1, iRegIorL2I src2,
11198                          immI src3, rFlagsReg cr) %{
11199   match(Set dst (AddI src1 (RShiftI src2 src3)));
11200 
11201   ins_cost(1.9 * INSN_COST);
11202   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11203 
11204   ins_encode %{
11205     __ addw(as_Register($dst$$reg),
11206               as_Register($src1$$reg),
11207               as_Register($src2$$reg),
11208               Assembler::ASR,
11209               $src3$$constant & 0x1f);
11210   %}
11211 
11212   ins_pipe(ialu_reg_reg_shift);
11213 %}
11214 
11215 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11216                          iRegL src1, iRegL src2,
11217                          immI src3, rFlagsReg cr) %{
11218   match(Set dst (AddL src1 (RShiftL src2 src3)));
11219 
11220   ins_cost(1.9 * INSN_COST);
11221   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11222 
11223   ins_encode %{
11224     __ add(as_Register($dst$$reg),
11225               as_Register($src1$$reg),
11226               as_Register($src2$$reg),
11227               Assembler::ASR,
11228               $src3$$constant & 0x3f);
11229   %}
11230 
11231   ins_pipe(ialu_reg_reg_shift);
11232 %}
11233 
11234 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11235                          iRegIorL2I src1, iRegIorL2I src2,
11236                          immI src3, rFlagsReg cr) %{
11237   match(Set dst (AddI src1 (LShiftI src2 src3)));
11238 
11239   ins_cost(1.9 * INSN_COST);
11240   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11241 
11242   ins_encode %{
11243     __ addw(as_Register($dst$$reg),
11244               as_Register($src1$$reg),
11245               as_Register($src2$$reg),
11246               Assembler::LSL,
11247               $src3$$constant & 0x1f);
11248   %}
11249 
11250   ins_pipe(ialu_reg_reg_shift);
11251 %}
11252 
11253 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11254                          iRegL src1, iRegL src2,
11255                          immI src3, rFlagsReg cr) %{
11256   match(Set dst (AddL src1 (LShiftL src2 src3)));
11257 
11258   ins_cost(1.9 * INSN_COST);
11259   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11260 
11261   ins_encode %{
11262     __ add(as_Register($dst$$reg),
11263               as_Register($src1$$reg),
11264               as_Register($src2$$reg),
11265               Assembler::LSL,
11266               $src3$$constant & 0x3f);
11267   %}
11268 
11269   ins_pipe(ialu_reg_reg_shift);
11270 %}
11271 
11272 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11273                          iRegIorL2I src1, iRegIorL2I src2,
11274                          immI src3, rFlagsReg cr) %{
11275   match(Set dst (SubI src1 (URShiftI src2 src3)));
11276 
11277   ins_cost(1.9 * INSN_COST);
11278   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11279 
11280   ins_encode %{
11281     __ subw(as_Register($dst$$reg),
11282               as_Register($src1$$reg),
11283               as_Register($src2$$reg),
11284               Assembler::LSR,
11285               $src3$$constant & 0x1f);
11286   %}
11287 
11288   ins_pipe(ialu_reg_reg_shift);
11289 %}
11290 
11291 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11292                          iRegL src1, iRegL src2,
11293                          immI src3, rFlagsReg cr) %{
11294   match(Set dst (SubL src1 (URShiftL src2 src3)));
11295 
11296   ins_cost(1.9 * INSN_COST);
11297   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11298 
11299   ins_encode %{
11300     __ sub(as_Register($dst$$reg),
11301               as_Register($src1$$reg),
11302               as_Register($src2$$reg),
11303               Assembler::LSR,
11304               $src3$$constant & 0x3f);
11305   %}
11306 
11307   ins_pipe(ialu_reg_reg_shift);
11308 %}
11309 
11310 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11311                          iRegIorL2I src1, iRegIorL2I src2,
11312                          immI src3, rFlagsReg cr) %{
11313   match(Set dst (SubI src1 (RShiftI src2 src3)));
11314 
11315   ins_cost(1.9 * INSN_COST);
11316   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11317 
11318   ins_encode %{
11319     __ subw(as_Register($dst$$reg),
11320               as_Register($src1$$reg),
11321               as_Register($src2$$reg),
11322               Assembler::ASR,
11323               $src3$$constant & 0x1f);
11324   %}
11325 
11326   ins_pipe(ialu_reg_reg_shift);
11327 %}
11328 
11329 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11330                          iRegL src1, iRegL src2,
11331                          immI src3, rFlagsReg cr) %{
11332   match(Set dst (SubL src1 (RShiftL src2 src3)));
11333 
11334   ins_cost(1.9 * INSN_COST);
11335   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11336 
11337   ins_encode %{
11338     __ sub(as_Register($dst$$reg),
11339               as_Register($src1$$reg),
11340               as_Register($src2$$reg),
11341               Assembler::ASR,
11342               $src3$$constant & 0x3f);
11343   %}
11344 
11345   ins_pipe(ialu_reg_reg_shift);
11346 %}
11347 
11348 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11349                          iRegIorL2I src1, iRegIorL2I src2,
11350                          immI src3, rFlagsReg cr) %{
11351   match(Set dst (SubI src1 (LShiftI src2 src3)));
11352 
11353   ins_cost(1.9 * INSN_COST);
11354   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11355 
11356   ins_encode %{
11357     __ subw(as_Register($dst$$reg),
11358               as_Register($src1$$reg),
11359               as_Register($src2$$reg),
11360               Assembler::LSL,
11361               $src3$$constant & 0x1f);
11362   %}
11363 
11364   ins_pipe(ialu_reg_reg_shift);
11365 %}
11366 
11367 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11368                          iRegL src1, iRegL src2,
11369                          immI src3, rFlagsReg cr) %{
11370   match(Set dst (SubL src1 (LShiftL src2 src3)));
11371 
11372   ins_cost(1.9 * INSN_COST);
11373   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11374 
11375   ins_encode %{
11376     __ sub(as_Register($dst$$reg),
11377               as_Register($src1$$reg),
11378               as_Register($src2$$reg),
11379               Assembler::LSL,
11380               $src3$$constant & 0x3f);
11381   %}
11382 
11383   ins_pipe(ialu_reg_reg_shift);
11384 %}
11385 
11386 
11387 
11388 // Shift Left followed by Shift Right.
11389 // This idiom is used by the compiler for the i2b bytecode etc.
11390 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11391 %{
11392   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11393   // Make sure we are not going to exceed what sbfm can do.
11394   predicate((unsigned int)n->in(2)->get_int() <= 63
11395             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11396 
11397   ins_cost(INSN_COST * 2);
11398   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11399   ins_encode %{
11400     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11401     int s = 63 - lshift;
11402     int r = (rshift - lshift) & 63;
11403     __ sbfm(as_Register($dst$$reg),
11404             as_Register($src$$reg),
11405             r, s);
11406   %}
11407 
11408   ins_pipe(ialu_reg_shift);
11409 %}
11410 
11411 // Shift Left followed by Shift Right.
11412 // This idiom is used by the compiler for the i2b bytecode etc.
11413 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11414 %{
11415   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11416   // Make sure we are not going to exceed what sbfmw can do.
11417   predicate((unsigned int)n->in(2)->get_int() <= 31
11418             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11419 
11420   ins_cost(INSN_COST * 2);
11421   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11422   ins_encode %{
11423     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11424     int s = 31 - lshift;
11425     int r = (rshift - lshift) & 31;
11426     __ sbfmw(as_Register($dst$$reg),
11427             as_Register($src$$reg),
11428             r, s);
11429   %}
11430 
11431   ins_pipe(ialu_reg_shift);
11432 %}
11433 
11434 // Shift Left followed by Shift Right.
11435 // This idiom is used by the compiler for the i2b bytecode etc.
11436 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11437 %{
11438   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11439   // Make sure we are not going to exceed what ubfm can do.
11440   predicate((unsigned int)n->in(2)->get_int() <= 63
11441             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11442 
11443   ins_cost(INSN_COST * 2);
11444   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11445   ins_encode %{
11446     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11447     int s = 63 - lshift;
11448     int r = (rshift - lshift) & 63;
11449     __ ubfm(as_Register($dst$$reg),
11450             as_Register($src$$reg),
11451             r, s);
11452   %}
11453 
11454   ins_pipe(ialu_reg_shift);
11455 %}
11456 
11457 // Shift Left followed by Shift Right.
11458 // This idiom is used by the compiler for the i2b bytecode etc.
11459 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11460 %{
11461   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11462   // Make sure we are not going to exceed what ubfmw can do.
11463   predicate((unsigned int)n->in(2)->get_int() <= 31
11464             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11465 
11466   ins_cost(INSN_COST * 2);
11467   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11468   ins_encode %{
11469     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11470     int s = 31 - lshift;
11471     int r = (rshift - lshift) & 31;
11472     __ ubfmw(as_Register($dst$$reg),
11473             as_Register($src$$reg),
11474             r, s);
11475   %}
11476 
11477   ins_pipe(ialu_reg_shift);
11478 %}
11479 // Bitfield extract with shift & mask
11480 
11481 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11482 %{
11483   match(Set dst (AndI (URShiftI src rshift) mask));
11484 
11485   ins_cost(INSN_COST);
11486   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11487   ins_encode %{
11488     int rshift = $rshift$$constant;
11489     long mask = $mask$$constant;
11490     int width = exact_log2(mask+1);
11491     __ ubfxw(as_Register($dst$$reg),
11492             as_Register($src$$reg), rshift, width);
11493   %}
11494   ins_pipe(ialu_reg_shift);
11495 %}
11496 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11497 %{
11498   match(Set dst (AndL (URShiftL src rshift) mask));
11499 
11500   ins_cost(INSN_COST);
11501   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11502   ins_encode %{
11503     int rshift = $rshift$$constant;
11504     long mask = $mask$$constant;
11505     int width = exact_log2(mask+1);
11506     __ ubfx(as_Register($dst$$reg),
11507             as_Register($src$$reg), rshift, width);
11508   %}
11509   ins_pipe(ialu_reg_shift);
11510 %}
11511 
11512 // We can use ubfx when extending an And with a mask when we know mask
11513 // is positive.  We know that because immI_bitmask guarantees it.
11514 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11515 %{
11516   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11517 
11518   ins_cost(INSN_COST * 2);
11519   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11520   ins_encode %{
11521     int rshift = $rshift$$constant;
11522     long mask = $mask$$constant;
11523     int width = exact_log2(mask+1);
11524     __ ubfx(as_Register($dst$$reg),
11525             as_Register($src$$reg), rshift, width);
11526   %}
11527   ins_pipe(ialu_reg_shift);
11528 %}
11529 
11530 // We can use ubfiz when masking by a positive number and then left shifting the result.
11531 // We know that the mask is positive because immI_bitmask guarantees it.
11532 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11533 %{
11534   match(Set dst (LShiftI (AndI src mask) lshift));
11535   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11536     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
11537 
11538   ins_cost(INSN_COST);
11539   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11540   ins_encode %{
11541     int lshift = $lshift$$constant;
11542     long mask = $mask$$constant;
11543     int width = exact_log2(mask+1);
11544     __ ubfizw(as_Register($dst$$reg),
11545           as_Register($src$$reg), lshift, width);
11546   %}
11547   ins_pipe(ialu_reg_shift);
11548 %}
11549 // We can use ubfiz when masking by a positive number and then left shifting the result.
11550 // We know that the mask is positive because immL_bitmask guarantees it.
11551 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11552 %{
11553   match(Set dst (LShiftL (AndL src mask) lshift));
11554   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
11555     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
11556 
11557   ins_cost(INSN_COST);
11558   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11559   ins_encode %{
11560     int lshift = $lshift$$constant;
11561     long mask = $mask$$constant;
11562     int width = exact_log2(mask+1);
11563     __ ubfiz(as_Register($dst$$reg),
11564           as_Register($src$$reg), lshift, width);
11565   %}
11566   ins_pipe(ialu_reg_shift);
11567 %}
11568 
11569 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11570 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11571 %{
11572   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
11573   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11574     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
11575 
11576   ins_cost(INSN_COST);
11577   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11578   ins_encode %{
11579     int lshift = $lshift$$constant;
11580     long mask = $mask$$constant;
11581     int width = exact_log2(mask+1);
11582     __ ubfiz(as_Register($dst$$reg),
11583              as_Register($src$$reg), lshift, width);
11584   %}
11585   ins_pipe(ialu_reg_shift);
11586 %}
11587 
11588 // Rotations
11589 
11590 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11591 %{
11592   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11593   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11594 
11595   ins_cost(INSN_COST);
11596   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11597 
11598   ins_encode %{
11599     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11600             $rshift$$constant & 63);
11601   %}
11602   ins_pipe(ialu_reg_reg_extr);
11603 %}
11604 
11605 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11606 %{
11607   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11608   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11609 
11610   ins_cost(INSN_COST);
11611   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11612 
11613   ins_encode %{
11614     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11615             $rshift$$constant & 31);
11616   %}
11617   ins_pipe(ialu_reg_reg_extr);
11618 %}
11619 
11620 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11621 %{
11622   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11623   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11624 
11625   ins_cost(INSN_COST);
11626   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11627 
11628   ins_encode %{
11629     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11630             $rshift$$constant & 63);
11631   %}
11632   ins_pipe(ialu_reg_reg_extr);
11633 %}
11634 
11635 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11636 %{
11637   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11638   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11639 
11640   ins_cost(INSN_COST);
11641   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11642 
11643   ins_encode %{
11644     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11645             $rshift$$constant & 31);
11646   %}
11647   ins_pipe(ialu_reg_reg_extr);
11648 %}
11649 
11650 
11651 // rol expander
11652 
11653 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11654 %{
11655   effect(DEF dst, USE src, USE shift);
11656 
11657   format %{ "rol    $dst, $src, $shift" %}
11658   ins_cost(INSN_COST * 3);
11659   ins_encode %{
11660     __ subw(rscratch1, zr, as_Register($shift$$reg));
11661     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11662             rscratch1);
11663     %}
11664   ins_pipe(ialu_reg_reg_vshift);
11665 %}
11666 
11667 // rol expander
11668 
11669 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11670 %{
11671   effect(DEF dst, USE src, USE shift);
11672 
11673   format %{ "rol    $dst, $src, $shift" %}
11674   ins_cost(INSN_COST * 3);
11675   ins_encode %{
11676     __ subw(rscratch1, zr, as_Register($shift$$reg));
11677     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11678             rscratch1);
11679     %}
11680   ins_pipe(ialu_reg_reg_vshift);
11681 %}
11682 
11683 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11684 %{
11685   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11686 
11687   expand %{
11688     rolL_rReg(dst, src, shift, cr);
11689   %}
11690 %}
11691 
11692 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11693 %{
11694   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11695 
11696   expand %{
11697     rolL_rReg(dst, src, shift, cr);
11698   %}
11699 %}
11700 
11701 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11702 %{
11703   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11704 
11705   expand %{
11706     rolI_rReg(dst, src, shift, cr);
11707   %}
11708 %}
11709 
11710 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11711 %{
11712   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11713 
11714   expand %{
11715     rolI_rReg(dst, src, shift, cr);
11716   %}
11717 %}
11718 
11719 // ror expander
11720 
11721 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11722 %{
11723   effect(DEF dst, USE src, USE shift);
11724 
11725   format %{ "ror    $dst, $src, $shift" %}
11726   ins_cost(INSN_COST);
11727   ins_encode %{
11728     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11729             as_Register($shift$$reg));
11730     %}
11731   ins_pipe(ialu_reg_reg_vshift);
11732 %}
11733 
11734 // ror expander
11735 
11736 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11737 %{
11738   effect(DEF dst, USE src, USE shift);
11739 
11740   format %{ "ror    $dst, $src, $shift" %}
11741   ins_cost(INSN_COST);
11742   ins_encode %{
11743     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11744             as_Register($shift$$reg));
11745     %}
11746   ins_pipe(ialu_reg_reg_vshift);
11747 %}
11748 
11749 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11750 %{
11751   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11752 
11753   expand %{
11754     rorL_rReg(dst, src, shift, cr);
11755   %}
11756 %}
11757 
11758 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11759 %{
11760   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11761 
11762   expand %{
11763     rorL_rReg(dst, src, shift, cr);
11764   %}
11765 %}
11766 
11767 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11768 %{
11769   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11770 
11771   expand %{
11772     rorI_rReg(dst, src, shift, cr);
11773   %}
11774 %}
11775 
11776 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11777 %{
11778   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11779 
11780   expand %{
11781     rorI_rReg(dst, src, shift, cr);
11782   %}
11783 %}
11784 
11785 // Add/subtract (extended)
11786 
11787 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11788 %{
11789   match(Set dst (AddL src1 (ConvI2L src2)));
11790   ins_cost(INSN_COST);
11791   format %{ "add  $dst, $src1, $src2, sxtw" %}
11792 
11793    ins_encode %{
11794      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11795             as_Register($src2$$reg), ext::sxtw);
11796    %}
11797   ins_pipe(ialu_reg_reg);
11798 %};
11799 
11800 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11801 %{
11802   match(Set dst (SubL src1 (ConvI2L src2)));
11803   ins_cost(INSN_COST);
11804   format %{ "sub  $dst, $src1, $src2, sxtw" %}
11805 
11806    ins_encode %{
11807      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11808             as_Register($src2$$reg), ext::sxtw);
11809    %}
11810   ins_pipe(ialu_reg_reg);
11811 %};
11812 
11813 
11814 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11815 %{
11816   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11817   ins_cost(INSN_COST);
11818   format %{ "add  $dst, $src1, $src2, sxth" %}
11819 
11820    ins_encode %{
11821      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11822             as_Register($src2$$reg), ext::sxth);
11823    %}
11824   ins_pipe(ialu_reg_reg);
11825 %}
11826 
11827 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11828 %{
11829   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11830   ins_cost(INSN_COST);
11831   format %{ "add  $dst, $src1, $src2, sxtb" %}
11832 
11833    ins_encode %{
11834      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11835             as_Register($src2$$reg), ext::sxtb);
11836    %}
11837   ins_pipe(ialu_reg_reg);
11838 %}
11839 
11840 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11841 %{
11842   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11843   ins_cost(INSN_COST);
11844   format %{ "add  $dst, $src1, $src2, uxtb" %}
11845 
11846    ins_encode %{
11847      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11848             as_Register($src2$$reg), ext::uxtb);
11849    %}
11850   ins_pipe(ialu_reg_reg);
11851 %}
11852 
11853 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11854 %{
11855   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11856   ins_cost(INSN_COST);
11857   format %{ "add  $dst, $src1, $src2, sxth" %}
11858 
11859    ins_encode %{
11860      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11861             as_Register($src2$$reg), ext::sxth);
11862    %}
11863   ins_pipe(ialu_reg_reg);
11864 %}
11865 
11866 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11867 %{
11868   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11869   ins_cost(INSN_COST);
11870   format %{ "add  $dst, $src1, $src2, sxtw" %}
11871 
11872    ins_encode %{
11873      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11874             as_Register($src2$$reg), ext::sxtw);
11875    %}
11876   ins_pipe(ialu_reg_reg);
11877 %}
11878 
11879 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11880 %{
11881   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11882   ins_cost(INSN_COST);
11883   format %{ "add  $dst, $src1, $src2, sxtb" %}
11884 
11885    ins_encode %{
11886      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11887             as_Register($src2$$reg), ext::sxtb);
11888    %}
11889   ins_pipe(ialu_reg_reg);
11890 %}
11891 
11892 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11893 %{
11894   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11895   ins_cost(INSN_COST);
11896   format %{ "add  $dst, $src1, $src2, uxtb" %}
11897 
11898    ins_encode %{
11899      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11900             as_Register($src2$$reg), ext::uxtb);
11901    %}
11902   ins_pipe(ialu_reg_reg);
11903 %}
11904 
11905 
11906 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11907 %{
11908   match(Set dst (AddI src1 (AndI src2 mask)));
11909   ins_cost(INSN_COST);
11910   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11911 
11912    ins_encode %{
11913      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11914             as_Register($src2$$reg), ext::uxtb);
11915    %}
11916   ins_pipe(ialu_reg_reg);
11917 %}
11918 
11919 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11920 %{
11921   match(Set dst (AddI src1 (AndI src2 mask)));
11922   ins_cost(INSN_COST);
11923   format %{ "addw  $dst, $src1, $src2, uxth" %}
11924 
11925    ins_encode %{
11926      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11927             as_Register($src2$$reg), ext::uxth);
11928    %}
11929   ins_pipe(ialu_reg_reg);
11930 %}
11931 
11932 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11933 %{
11934   match(Set dst (AddL src1 (AndL src2 mask)));
11935   ins_cost(INSN_COST);
11936   format %{ "add  $dst, $src1, $src2, uxtb" %}
11937 
11938    ins_encode %{
11939      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11940             as_Register($src2$$reg), ext::uxtb);
11941    %}
11942   ins_pipe(ialu_reg_reg);
11943 %}
11944 
11945 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11946 %{
11947   match(Set dst (AddL src1 (AndL src2 mask)));
11948   ins_cost(INSN_COST);
11949   format %{ "add  $dst, $src1, $src2, uxth" %}
11950 
11951    ins_encode %{
11952      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11953             as_Register($src2$$reg), ext::uxth);
11954    %}
11955   ins_pipe(ialu_reg_reg);
11956 %}
11957 
11958 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11959 %{
11960   match(Set dst (AddL src1 (AndL src2 mask)));
11961   ins_cost(INSN_COST);
11962   format %{ "add  $dst, $src1, $src2, uxtw" %}
11963 
11964    ins_encode %{
11965      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11966             as_Register($src2$$reg), ext::uxtw);
11967    %}
11968   ins_pipe(ialu_reg_reg);
11969 %}
11970 
11971 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11972 %{
11973   match(Set dst (SubI src1 (AndI src2 mask)));
11974   ins_cost(INSN_COST);
11975   format %{ "subw  $dst, $src1, $src2, uxtb" %}
11976 
11977    ins_encode %{
11978      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11979             as_Register($src2$$reg), ext::uxtb);
11980    %}
11981   ins_pipe(ialu_reg_reg);
11982 %}
11983 
11984 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11985 %{
11986   match(Set dst (SubI src1 (AndI src2 mask)));
11987   ins_cost(INSN_COST);
11988   format %{ "subw  $dst, $src1, $src2, uxth" %}
11989 
11990    ins_encode %{
11991      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11992             as_Register($src2$$reg), ext::uxth);
11993    %}
11994   ins_pipe(ialu_reg_reg);
11995 %}
11996 
11997 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11998 %{
11999   match(Set dst (SubL src1 (AndL src2 mask)));
12000   ins_cost(INSN_COST);
12001   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12002 
12003    ins_encode %{
12004      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12005             as_Register($src2$$reg), ext::uxtb);
12006    %}
12007   ins_pipe(ialu_reg_reg);
12008 %}
12009 
12010 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12011 %{
12012   match(Set dst (SubL src1 (AndL src2 mask)));
12013   ins_cost(INSN_COST);
12014   format %{ "sub  $dst, $src1, $src2, uxth" %}
12015 
12016    ins_encode %{
12017      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12018             as_Register($src2$$reg), ext::uxth);
12019    %}
12020   ins_pipe(ialu_reg_reg);
12021 %}
12022 
12023 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12024 %{
12025   match(Set dst (SubL src1 (AndL src2 mask)));
12026   ins_cost(INSN_COST);
12027   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12028 
12029    ins_encode %{
12030      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12031             as_Register($src2$$reg), ext::uxtw);
12032    %}
12033   ins_pipe(ialu_reg_reg);
12034 %}
12035 
12036 
12037 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12038 %{
12039   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12040   ins_cost(1.9 * INSN_COST);
12041   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12042 
12043    ins_encode %{
12044      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12045             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12046    %}
12047   ins_pipe(ialu_reg_reg_shift);
12048 %}
12049 
12050 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12051 %{
12052   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12053   ins_cost(1.9 * INSN_COST);
12054   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12055 
12056    ins_encode %{
12057      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12058             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12059    %}
12060   ins_pipe(ialu_reg_reg_shift);
12061 %}
12062 
12063 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12064 %{
12065   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12066   ins_cost(1.9 * INSN_COST);
12067   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12068 
12069    ins_encode %{
12070      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12071             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12072    %}
12073   ins_pipe(ialu_reg_reg_shift);
12074 %}
12075 
12076 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12077 %{
12078   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12079   ins_cost(1.9 * INSN_COST);
12080   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12081 
12082    ins_encode %{
12083      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12084             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12085    %}
12086   ins_pipe(ialu_reg_reg_shift);
12087 %}
12088 
12089 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12090 %{
12091   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12092   ins_cost(1.9 * INSN_COST);
12093   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12094 
12095    ins_encode %{
12096      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12097             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12098    %}
12099   ins_pipe(ialu_reg_reg_shift);
12100 %}
12101 
12102 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12103 %{
12104   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12105   ins_cost(1.9 * INSN_COST);
12106   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12107 
12108    ins_encode %{
12109      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12110             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12111    %}
12112   ins_pipe(ialu_reg_reg_shift);
12113 %}
12114 
12115 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12116 %{
12117   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12118   ins_cost(1.9 * INSN_COST);
12119   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12120 
12121    ins_encode %{
12122      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12123             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12124    %}
12125   ins_pipe(ialu_reg_reg_shift);
12126 %}
12127 
12128 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12129 %{
12130   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12131   ins_cost(1.9 * INSN_COST);
12132   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12133 
12134    ins_encode %{
12135      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12136             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12137    %}
12138   ins_pipe(ialu_reg_reg_shift);
12139 %}
12140 
12141 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12142 %{
12143   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12144   ins_cost(1.9 * INSN_COST);
12145   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12146 
12147    ins_encode %{
12148      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12149             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12150    %}
12151   ins_pipe(ialu_reg_reg_shift);
12152 %}
12153 
12154 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12155 %{
12156   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12157   ins_cost(1.9 * INSN_COST);
12158   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12159 
12160    ins_encode %{
12161      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12162             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12163    %}
12164   ins_pipe(ialu_reg_reg_shift);
12165 %}
12166 
12167 
12168 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12169 %{
12170   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12171   ins_cost(1.9 * INSN_COST);
12172   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12173 
12174    ins_encode %{
12175      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12176             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12177    %}
12178   ins_pipe(ialu_reg_reg_shift);
12179 %};
12180 
12181 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12182 %{
12183   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12184   ins_cost(1.9 * INSN_COST);
12185   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12186 
12187    ins_encode %{
12188      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12189             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12190    %}
12191   ins_pipe(ialu_reg_reg_shift);
12192 %};
12193 
12194 
12195 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12196 %{
12197   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12198   ins_cost(1.9 * INSN_COST);
12199   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12200 
12201    ins_encode %{
12202      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12203             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12204    %}
12205   ins_pipe(ialu_reg_reg_shift);
12206 %}
12207 
12208 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12209 %{
12210   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12211   ins_cost(1.9 * INSN_COST);
12212   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12213 
12214    ins_encode %{
12215      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12216             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12217    %}
12218   ins_pipe(ialu_reg_reg_shift);
12219 %}
12220 
12221 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12222 %{
12223   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12224   ins_cost(1.9 * INSN_COST);
12225   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12226 
12227    ins_encode %{
12228      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12229             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12230    %}
12231   ins_pipe(ialu_reg_reg_shift);
12232 %}
12233 
12234 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12235 %{
12236   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12237   ins_cost(1.9 * INSN_COST);
12238   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12239 
12240    ins_encode %{
12241      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12242             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12243    %}
12244   ins_pipe(ialu_reg_reg_shift);
12245 %}
12246 
12247 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12248 %{
12249   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12250   ins_cost(1.9 * INSN_COST);
12251   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12252 
12253    ins_encode %{
12254      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12255             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12256    %}
12257   ins_pipe(ialu_reg_reg_shift);
12258 %}
12259 
12260 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12261 %{
12262   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12263   ins_cost(1.9 * INSN_COST);
12264   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12265 
12266    ins_encode %{
12267      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12268             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12269    %}
12270   ins_pipe(ialu_reg_reg_shift);
12271 %}
12272 
12273 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12274 %{
12275   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12276   ins_cost(1.9 * INSN_COST);
12277   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12278 
12279    ins_encode %{
12280      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12281             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12282    %}
12283   ins_pipe(ialu_reg_reg_shift);
12284 %}
12285 
12286 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12287 %{
12288   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12289   ins_cost(1.9 * INSN_COST);
12290   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12291 
12292    ins_encode %{
12293      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12294             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12295    %}
12296   ins_pipe(ialu_reg_reg_shift);
12297 %}
12298 
12299 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12300 %{
12301   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12302   ins_cost(1.9 * INSN_COST);
12303   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12304 
12305    ins_encode %{
12306      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12307             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12308    %}
12309   ins_pipe(ialu_reg_reg_shift);
12310 %}
12311 
12312 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12313 %{
12314   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12315   ins_cost(1.9 * INSN_COST);
12316   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12317 
12318    ins_encode %{
12319      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12320             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12321    %}
12322   ins_pipe(ialu_reg_reg_shift);
12323 %}
12324 // END This section of the file is automatically generated. Do not edit --------------
12325 
12326 // ============================================================================
12327 // Floating Point Arithmetic Instructions
12328 
12329 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12330   match(Set dst (AddF src1 src2));
12331 
12332   ins_cost(INSN_COST * 5);
12333   format %{ "fadds   $dst, $src1, $src2" %}
12334 
12335   ins_encode %{
12336     __ fadds(as_FloatRegister($dst$$reg),
12337              as_FloatRegister($src1$$reg),
12338              as_FloatRegister($src2$$reg));
12339   %}
12340 
12341   ins_pipe(fp_dop_reg_reg_s);
12342 %}
12343 
12344 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12345   match(Set dst (AddD src1 src2));
12346 
12347   ins_cost(INSN_COST * 5);
12348   format %{ "faddd   $dst, $src1, $src2" %}
12349 
12350   ins_encode %{
12351     __ faddd(as_FloatRegister($dst$$reg),
12352              as_FloatRegister($src1$$reg),
12353              as_FloatRegister($src2$$reg));
12354   %}
12355 
12356   ins_pipe(fp_dop_reg_reg_d);
12357 %}
12358 
12359 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12360   match(Set dst (SubF src1 src2));
12361 
12362   ins_cost(INSN_COST * 5);
12363   format %{ "fsubs   $dst, $src1, $src2" %}
12364 
12365   ins_encode %{
12366     __ fsubs(as_FloatRegister($dst$$reg),
12367              as_FloatRegister($src1$$reg),
12368              as_FloatRegister($src2$$reg));
12369   %}
12370 
12371   ins_pipe(fp_dop_reg_reg_s);
12372 %}
12373 
12374 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12375   match(Set dst (SubD src1 src2));
12376 
12377   ins_cost(INSN_COST * 5);
12378   format %{ "fsubd   $dst, $src1, $src2" %}
12379 
12380   ins_encode %{
12381     __ fsubd(as_FloatRegister($dst$$reg),
12382              as_FloatRegister($src1$$reg),
12383              as_FloatRegister($src2$$reg));
12384   %}
12385 
12386   ins_pipe(fp_dop_reg_reg_d);
12387 %}
12388 
12389 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12390   match(Set dst (MulF src1 src2));
12391 
12392   ins_cost(INSN_COST * 6);
12393   format %{ "fmuls   $dst, $src1, $src2" %}
12394 
12395   ins_encode %{
12396     __ fmuls(as_FloatRegister($dst$$reg),
12397              as_FloatRegister($src1$$reg),
12398              as_FloatRegister($src2$$reg));
12399   %}
12400 
12401   ins_pipe(fp_dop_reg_reg_s);
12402 %}
12403 
12404 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12405   match(Set dst (MulD src1 src2));
12406 
12407   ins_cost(INSN_COST * 6);
12408   format %{ "fmuld   $dst, $src1, $src2" %}
12409 
12410   ins_encode %{
12411     __ fmuld(as_FloatRegister($dst$$reg),
12412              as_FloatRegister($src1$$reg),
12413              as_FloatRegister($src2$$reg));
12414   %}
12415 
12416   ins_pipe(fp_dop_reg_reg_d);
12417 %}
12418 
12419 // src1 * src2 + src3
12420 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12421   predicate(UseFMA);
12422   match(Set dst (FmaF src3 (Binary src1 src2)));
12423 
12424   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12425 
12426   ins_encode %{
12427     __ fmadds(as_FloatRegister($dst$$reg),
12428              as_FloatRegister($src1$$reg),
12429              as_FloatRegister($src2$$reg),
12430              as_FloatRegister($src3$$reg));
12431   %}
12432 
12433   ins_pipe(pipe_class_default);
12434 %}
12435 
12436 // src1 * src2 + src3
12437 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12438   predicate(UseFMA);
12439   match(Set dst (FmaD src3 (Binary src1 src2)));
12440 
12441   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12442 
12443   ins_encode %{
12444     __ fmaddd(as_FloatRegister($dst$$reg),
12445              as_FloatRegister($src1$$reg),
12446              as_FloatRegister($src2$$reg),
12447              as_FloatRegister($src3$$reg));
12448   %}
12449 
12450   ins_pipe(pipe_class_default);
12451 %}
12452 
12453 // -src1 * src2 + src3
12454 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12455   predicate(UseFMA);
12456   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12457   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12458 
12459   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12460 
12461   ins_encode %{
12462     __ fmsubs(as_FloatRegister($dst$$reg),
12463               as_FloatRegister($src1$$reg),
12464               as_FloatRegister($src2$$reg),
12465               as_FloatRegister($src3$$reg));
12466   %}
12467 
12468   ins_pipe(pipe_class_default);
12469 %}
12470 
12471 // -src1 * src2 + src3
12472 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12473   predicate(UseFMA);
12474   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12475   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12476 
12477   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12478 
12479   ins_encode %{
12480     __ fmsubd(as_FloatRegister($dst$$reg),
12481               as_FloatRegister($src1$$reg),
12482               as_FloatRegister($src2$$reg),
12483               as_FloatRegister($src3$$reg));
12484   %}
12485 
12486   ins_pipe(pipe_class_default);
12487 %}
12488 
12489 // -src1 * src2 - src3
12490 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12491   predicate(UseFMA);
12492   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12493   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12494 
12495   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12496 
12497   ins_encode %{
12498     __ fnmadds(as_FloatRegister($dst$$reg),
12499                as_FloatRegister($src1$$reg),
12500                as_FloatRegister($src2$$reg),
12501                as_FloatRegister($src3$$reg));
12502   %}
12503 
12504   ins_pipe(pipe_class_default);
12505 %}
12506 
12507 // -src1 * src2 - src3
12508 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12509   predicate(UseFMA);
12510   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12511   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12512 
12513   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12514 
12515   ins_encode %{
12516     __ fnmaddd(as_FloatRegister($dst$$reg),
12517                as_FloatRegister($src1$$reg),
12518                as_FloatRegister($src2$$reg),
12519                as_FloatRegister($src3$$reg));
12520   %}
12521 
12522   ins_pipe(pipe_class_default);
12523 %}
12524 
12525 // src1 * src2 - src3
12526 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12527   predicate(UseFMA);
12528   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12529 
12530   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12531 
12532   ins_encode %{
12533     __ fnmsubs(as_FloatRegister($dst$$reg),
12534                as_FloatRegister($src1$$reg),
12535                as_FloatRegister($src2$$reg),
12536                as_FloatRegister($src3$$reg));
12537   %}
12538 
12539   ins_pipe(pipe_class_default);
12540 %}
12541 
12542 // src1 * src2 - src3
12543 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12544   predicate(UseFMA);
12545   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12546 
12547   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12548 
12549   ins_encode %{
12550   // n.b. insn name should be fnmsubd
12551     __ fnmsub(as_FloatRegister($dst$$reg),
12552               as_FloatRegister($src1$$reg),
12553               as_FloatRegister($src2$$reg),
12554               as_FloatRegister($src3$$reg));
12555   %}
12556 
12557   ins_pipe(pipe_class_default);
12558 %}
12559 
12560 
12561 // Math.max(FF)F
12562 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12563   match(Set dst (MaxF src1 src2));
12564 
12565   format %{ "fmaxs   $dst, $src1, $src2" %}
12566   ins_encode %{
12567     __ fmaxs(as_FloatRegister($dst$$reg),
12568              as_FloatRegister($src1$$reg),
12569              as_FloatRegister($src2$$reg));
12570   %}
12571 
12572   ins_pipe(fp_dop_reg_reg_s);
12573 %}
12574 
12575 // Math.min(FF)F
12576 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12577   match(Set dst (MinF src1 src2));
12578 
12579   format %{ "fmins   $dst, $src1, $src2" %}
12580   ins_encode %{
12581     __ fmins(as_FloatRegister($dst$$reg),
12582              as_FloatRegister($src1$$reg),
12583              as_FloatRegister($src2$$reg));
12584   %}
12585 
12586   ins_pipe(fp_dop_reg_reg_s);
12587 %}
12588 
12589 // Math.max(DD)D
12590 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12591   match(Set dst (MaxD src1 src2));
12592 
12593   format %{ "fmaxd   $dst, $src1, $src2" %}
12594   ins_encode %{
12595     __ fmaxd(as_FloatRegister($dst$$reg),
12596              as_FloatRegister($src1$$reg),
12597              as_FloatRegister($src2$$reg));
12598   %}
12599 
12600   ins_pipe(fp_dop_reg_reg_d);
12601 %}
12602 
12603 // Math.min(DD)D
12604 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12605   match(Set dst (MinD src1 src2));
12606 
12607   format %{ "fmind   $dst, $src1, $src2" %}
12608   ins_encode %{
12609     __ fmind(as_FloatRegister($dst$$reg),
12610              as_FloatRegister($src1$$reg),
12611              as_FloatRegister($src2$$reg));
12612   %}
12613 
12614   ins_pipe(fp_dop_reg_reg_d);
12615 %}
12616 
12617 
12618 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12619   match(Set dst (DivF src1  src2));
12620 
12621   ins_cost(INSN_COST * 18);
12622   format %{ "fdivs   $dst, $src1, $src2" %}
12623 
12624   ins_encode %{
12625     __ fdivs(as_FloatRegister($dst$$reg),
12626              as_FloatRegister($src1$$reg),
12627              as_FloatRegister($src2$$reg));
12628   %}
12629 
12630   ins_pipe(fp_div_s);
12631 %}
12632 
12633 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12634   match(Set dst (DivD src1  src2));
12635 
12636   ins_cost(INSN_COST * 32);
12637   format %{ "fdivd   $dst, $src1, $src2" %}
12638 
12639   ins_encode %{
12640     __ fdivd(as_FloatRegister($dst$$reg),
12641              as_FloatRegister($src1$$reg),
12642              as_FloatRegister($src2$$reg));
12643   %}
12644 
12645   ins_pipe(fp_div_d);
12646 %}
12647 
12648 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12649   match(Set dst (NegF src));
12650 
12651   ins_cost(INSN_COST * 3);
12652   format %{ "fneg   $dst, $src" %}
12653 
12654   ins_encode %{
12655     __ fnegs(as_FloatRegister($dst$$reg),
12656              as_FloatRegister($src$$reg));
12657   %}
12658 
12659   ins_pipe(fp_uop_s);
12660 %}
12661 
12662 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12663   match(Set dst (NegD src));
12664 
12665   ins_cost(INSN_COST * 3);
12666   format %{ "fnegd   $dst, $src" %}
12667 
12668   ins_encode %{
12669     __ fnegd(as_FloatRegister($dst$$reg),
12670              as_FloatRegister($src$$reg));
12671   %}
12672 
12673   ins_pipe(fp_uop_d);
12674 %}
12675 
12676 instruct absF_reg(vRegF dst, vRegF src) %{
12677   match(Set dst (AbsF src));
12678 
12679   ins_cost(INSN_COST * 3);
12680   format %{ "fabss   $dst, $src" %}
12681   ins_encode %{
12682     __ fabss(as_FloatRegister($dst$$reg),
12683              as_FloatRegister($src$$reg));
12684   %}
12685 
12686   ins_pipe(fp_uop_s);
12687 %}
12688 
12689 instruct absD_reg(vRegD dst, vRegD src) %{
12690   match(Set dst (AbsD src));
12691 
12692   ins_cost(INSN_COST * 3);
12693   format %{ "fabsd   $dst, $src" %}
12694   ins_encode %{
12695     __ fabsd(as_FloatRegister($dst$$reg),
12696              as_FloatRegister($src$$reg));
12697   %}
12698 
12699   ins_pipe(fp_uop_d);
12700 %}
12701 
12702 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12703   match(Set dst (SqrtD src));
12704 
12705   ins_cost(INSN_COST * 50);
12706   format %{ "fsqrtd  $dst, $src" %}
12707   ins_encode %{
12708     __ fsqrtd(as_FloatRegister($dst$$reg),
12709              as_FloatRegister($src$$reg));
12710   %}
12711 
12712   ins_pipe(fp_div_s);
12713 %}
12714 
12715 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12716   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12717 
12718   ins_cost(INSN_COST * 50);
12719   format %{ "fsqrts  $dst, $src" %}
12720   ins_encode %{
12721     __ fsqrts(as_FloatRegister($dst$$reg),
12722              as_FloatRegister($src$$reg));
12723   %}
12724 
12725   ins_pipe(fp_div_d);
12726 %}
12727 
12728 // ============================================================================
12729 // Logical Instructions
12730 
12731 // Integer Logical Instructions
12732 
12733 // And Instructions
12734 
12735 
12736 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12737   match(Set dst (AndI src1 src2));
12738 
12739   format %{ "andw  $dst, $src1, $src2\t# int" %}
12740 
12741   ins_cost(INSN_COST);
12742   ins_encode %{
12743     __ andw(as_Register($dst$$reg),
12744             as_Register($src1$$reg),
12745             as_Register($src2$$reg));
12746   %}
12747 
12748   ins_pipe(ialu_reg_reg);
12749 %}
12750 
12751 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12752   match(Set dst (AndI src1 src2));
12753 
12754   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12755 
12756   ins_cost(INSN_COST);
12757   ins_encode %{
12758     __ andw(as_Register($dst$$reg),
12759             as_Register($src1$$reg),
12760             (unsigned long)($src2$$constant));
12761   %}
12762 
12763   ins_pipe(ialu_reg_imm);
12764 %}
12765 
12766 // Or Instructions
12767 
12768 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12769   match(Set dst (OrI src1 src2));
12770 
12771   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12772 
12773   ins_cost(INSN_COST);
12774   ins_encode %{
12775     __ orrw(as_Register($dst$$reg),
12776             as_Register($src1$$reg),
12777             as_Register($src2$$reg));
12778   %}
12779 
12780   ins_pipe(ialu_reg_reg);
12781 %}
12782 
12783 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12784   match(Set dst (OrI src1 src2));
12785 
12786   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12787 
12788   ins_cost(INSN_COST);
12789   ins_encode %{
12790     __ orrw(as_Register($dst$$reg),
12791             as_Register($src1$$reg),
12792             (unsigned long)($src2$$constant));
12793   %}
12794 
12795   ins_pipe(ialu_reg_imm);
12796 %}
12797 
12798 // Xor Instructions
12799 
12800 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12801   match(Set dst (XorI src1 src2));
12802 
12803   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12804 
12805   ins_cost(INSN_COST);
12806   ins_encode %{
12807     __ eorw(as_Register($dst$$reg),
12808             as_Register($src1$$reg),
12809             as_Register($src2$$reg));
12810   %}
12811 
12812   ins_pipe(ialu_reg_reg);
12813 %}
12814 
12815 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12816   match(Set dst (XorI src1 src2));
12817 
12818   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12819 
12820   ins_cost(INSN_COST);
12821   ins_encode %{
12822     __ eorw(as_Register($dst$$reg),
12823             as_Register($src1$$reg),
12824             (unsigned long)($src2$$constant));
12825   %}
12826 
12827   ins_pipe(ialu_reg_imm);
12828 %}
12829 
12830 // Long Logical Instructions
12831 // TODO
12832 
12833 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12834   match(Set dst (AndL src1 src2));
12835 
12836   format %{ "and  $dst, $src1, $src2\t# int" %}
12837 
12838   ins_cost(INSN_COST);
12839   ins_encode %{
12840     __ andr(as_Register($dst$$reg),
12841             as_Register($src1$$reg),
12842             as_Register($src2$$reg));
12843   %}
12844 
12845   ins_pipe(ialu_reg_reg);
12846 %}
12847 
12848 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12849   match(Set dst (AndL src1 src2));
12850 
12851   format %{ "and  $dst, $src1, $src2\t# int" %}
12852 
12853   ins_cost(INSN_COST);
12854   ins_encode %{
12855     __ andr(as_Register($dst$$reg),
12856             as_Register($src1$$reg),
12857             (unsigned long)($src2$$constant));
12858   %}
12859 
12860   ins_pipe(ialu_reg_imm);
12861 %}
12862 
12863 // Or Instructions
12864 
12865 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12866   match(Set dst (OrL src1 src2));
12867 
12868   format %{ "orr  $dst, $src1, $src2\t# int" %}
12869 
12870   ins_cost(INSN_COST);
12871   ins_encode %{
12872     __ orr(as_Register($dst$$reg),
12873            as_Register($src1$$reg),
12874            as_Register($src2$$reg));
12875   %}
12876 
12877   ins_pipe(ialu_reg_reg);
12878 %}
12879 
12880 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12881   match(Set dst (OrL src1 src2));
12882 
12883   format %{ "orr  $dst, $src1, $src2\t# int" %}
12884 
12885   ins_cost(INSN_COST);
12886   ins_encode %{
12887     __ orr(as_Register($dst$$reg),
12888            as_Register($src1$$reg),
12889            (unsigned long)($src2$$constant));
12890   %}
12891 
12892   ins_pipe(ialu_reg_imm);
12893 %}
12894 
12895 // Xor Instructions
12896 
12897 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12898   match(Set dst (XorL src1 src2));
12899 
12900   format %{ "eor  $dst, $src1, $src2\t# int" %}
12901 
12902   ins_cost(INSN_COST);
12903   ins_encode %{
12904     __ eor(as_Register($dst$$reg),
12905            as_Register($src1$$reg),
12906            as_Register($src2$$reg));
12907   %}
12908 
12909   ins_pipe(ialu_reg_reg);
12910 %}
12911 
12912 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12913   match(Set dst (XorL src1 src2));
12914 
12915   ins_cost(INSN_COST);
12916   format %{ "eor  $dst, $src1, $src2\t# int" %}
12917 
12918   ins_encode %{
12919     __ eor(as_Register($dst$$reg),
12920            as_Register($src1$$reg),
12921            (unsigned long)($src2$$constant));
12922   %}
12923 
12924   ins_pipe(ialu_reg_imm);
12925 %}
12926 
12927 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12928 %{
12929   match(Set dst (ConvI2L src));
12930 
12931   ins_cost(INSN_COST);
12932   format %{ "sxtw  $dst, $src\t# i2l" %}
12933   ins_encode %{
12934     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12935   %}
12936   ins_pipe(ialu_reg_shift);
12937 %}
12938 
12939 // this pattern occurs in bigmath arithmetic
12940 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12941 %{
12942   match(Set dst (AndL (ConvI2L src) mask));
12943 
12944   ins_cost(INSN_COST);
12945   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12946   ins_encode %{
12947     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12948   %}
12949 
12950   ins_pipe(ialu_reg_shift);
12951 %}
12952 
12953 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12954   match(Set dst (ConvL2I src));
12955 
12956   ins_cost(INSN_COST);
12957   format %{ "movw  $dst, $src \t// l2i" %}
12958 
12959   ins_encode %{
12960     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12961   %}
12962 
12963   ins_pipe(ialu_reg);
12964 %}
12965 
12966 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12967 %{
12968   match(Set dst (Conv2B src));
12969   effect(KILL cr);
12970 
12971   format %{
12972     "cmpw $src, zr\n\t"
12973     "cset $dst, ne"
12974   %}
12975 
12976   ins_encode %{
12977     __ cmpw(as_Register($src$$reg), zr);
12978     __ cset(as_Register($dst$$reg), Assembler::NE);
12979   %}
12980 
12981   ins_pipe(ialu_reg);
12982 %}
12983 
12984 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12985 %{
12986   match(Set dst (Conv2B src));
12987   effect(KILL cr);
12988 
12989   format %{
12990     "cmp  $src, zr\n\t"
12991     "cset $dst, ne"
12992   %}
12993 
12994   ins_encode %{
12995     __ cmp(as_Register($src$$reg), zr);
12996     __ cset(as_Register($dst$$reg), Assembler::NE);
12997   %}
12998 
12999   ins_pipe(ialu_reg);
13000 %}
13001 
13002 instruct convD2F_reg(vRegF dst, vRegD src) %{
13003   match(Set dst (ConvD2F src));
13004 
13005   ins_cost(INSN_COST * 5);
13006   format %{ "fcvtd  $dst, $src \t// d2f" %}
13007 
13008   ins_encode %{
13009     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13010   %}
13011 
13012   ins_pipe(fp_d2f);
13013 %}
13014 
13015 instruct convF2D_reg(vRegD dst, vRegF src) %{
13016   match(Set dst (ConvF2D src));
13017 
13018   ins_cost(INSN_COST * 5);
13019   format %{ "fcvts  $dst, $src \t// f2d" %}
13020 
13021   ins_encode %{
13022     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13023   %}
13024 
13025   ins_pipe(fp_f2d);
13026 %}
13027 
13028 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13029   match(Set dst (ConvF2I src));
13030 
13031   ins_cost(INSN_COST * 5);
13032   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13033 
13034   ins_encode %{
13035     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13036   %}
13037 
13038   ins_pipe(fp_f2i);
13039 %}
13040 
13041 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13042   match(Set dst (ConvF2L src));
13043 
13044   ins_cost(INSN_COST * 5);
13045   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13046 
13047   ins_encode %{
13048     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13049   %}
13050 
13051   ins_pipe(fp_f2l);
13052 %}
13053 
13054 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13055   match(Set dst (ConvI2F src));
13056 
13057   ins_cost(INSN_COST * 5);
13058   format %{ "scvtfws  $dst, $src \t// i2f" %}
13059 
13060   ins_encode %{
13061     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13062   %}
13063 
13064   ins_pipe(fp_i2f);
13065 %}
13066 
13067 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13068   match(Set dst (ConvL2F src));
13069 
13070   ins_cost(INSN_COST * 5);
13071   format %{ "scvtfs  $dst, $src \t// l2f" %}
13072 
13073   ins_encode %{
13074     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13075   %}
13076 
13077   ins_pipe(fp_l2f);
13078 %}
13079 
13080 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13081   match(Set dst (ConvD2I src));
13082 
13083   ins_cost(INSN_COST * 5);
13084   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13085 
13086   ins_encode %{
13087     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13088   %}
13089 
13090   ins_pipe(fp_d2i);
13091 %}
13092 
13093 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13094   match(Set dst (ConvD2L src));
13095 
13096   ins_cost(INSN_COST * 5);
13097   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13098 
13099   ins_encode %{
13100     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13101   %}
13102 
13103   ins_pipe(fp_d2l);
13104 %}
13105 
13106 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13107   match(Set dst (ConvI2D src));
13108 
13109   ins_cost(INSN_COST * 5);
13110   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13111 
13112   ins_encode %{
13113     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13114   %}
13115 
13116   ins_pipe(fp_i2d);
13117 %}
13118 
13119 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13120   match(Set dst (ConvL2D src));
13121 
13122   ins_cost(INSN_COST * 5);
13123   format %{ "scvtfd  $dst, $src \t// l2d" %}
13124 
13125   ins_encode %{
13126     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13127   %}
13128 
13129   ins_pipe(fp_l2d);
13130 %}
13131 
13132 // stack <-> reg and reg <-> reg shuffles with no conversion
13133 
13134 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13135 
13136   match(Set dst (MoveF2I src));
13137 
13138   effect(DEF dst, USE src);
13139 
13140   ins_cost(4 * INSN_COST);
13141 
13142   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13143 
13144   ins_encode %{
13145     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13146   %}
13147 
13148   ins_pipe(iload_reg_reg);
13149 
13150 %}
13151 
13152 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13153 
13154   match(Set dst (MoveI2F src));
13155 
13156   effect(DEF dst, USE src);
13157 
13158   ins_cost(4 * INSN_COST);
13159 
13160   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13161 
13162   ins_encode %{
13163     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13164   %}
13165 
13166   ins_pipe(pipe_class_memory);
13167 
13168 %}
13169 
13170 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13171 
13172   match(Set dst (MoveD2L src));
13173 
13174   effect(DEF dst, USE src);
13175 
13176   ins_cost(4 * INSN_COST);
13177 
13178   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13179 
13180   ins_encode %{
13181     __ ldr($dst$$Register, Address(sp, $src$$disp));
13182   %}
13183 
13184   ins_pipe(iload_reg_reg);
13185 
13186 %}
13187 
13188 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13189 
13190   match(Set dst (MoveL2D src));
13191 
13192   effect(DEF dst, USE src);
13193 
13194   ins_cost(4 * INSN_COST);
13195 
13196   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13197 
13198   ins_encode %{
13199     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13200   %}
13201 
13202   ins_pipe(pipe_class_memory);
13203 
13204 %}
13205 
13206 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13207 
13208   match(Set dst (MoveF2I src));
13209 
13210   effect(DEF dst, USE src);
13211 
13212   ins_cost(INSN_COST);
13213 
13214   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13215 
13216   ins_encode %{
13217     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13218   %}
13219 
13220   ins_pipe(pipe_class_memory);
13221 
13222 %}
13223 
13224 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13225 
13226   match(Set dst (MoveI2F src));
13227 
13228   effect(DEF dst, USE src);
13229 
13230   ins_cost(INSN_COST);
13231 
13232   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13233 
13234   ins_encode %{
13235     __ strw($src$$Register, Address(sp, $dst$$disp));
13236   %}
13237 
13238   ins_pipe(istore_reg_reg);
13239 
13240 %}
13241 
13242 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13243 
13244   match(Set dst (MoveD2L src));
13245 
13246   effect(DEF dst, USE src);
13247 
13248   ins_cost(INSN_COST);
13249 
13250   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13251 
13252   ins_encode %{
13253     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13254   %}
13255 
13256   ins_pipe(pipe_class_memory);
13257 
13258 %}
13259 
13260 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13261 
13262   match(Set dst (MoveL2D src));
13263 
13264   effect(DEF dst, USE src);
13265 
13266   ins_cost(INSN_COST);
13267 
13268   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13269 
13270   ins_encode %{
13271     __ str($src$$Register, Address(sp, $dst$$disp));
13272   %}
13273 
13274   ins_pipe(istore_reg_reg);
13275 
13276 %}
13277 
13278 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13279 
13280   match(Set dst (MoveF2I src));
13281 
13282   effect(DEF dst, USE src);
13283 
13284   ins_cost(INSN_COST);
13285 
13286   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13287 
13288   ins_encode %{
13289     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13290   %}
13291 
13292   ins_pipe(fp_f2i);
13293 
13294 %}
13295 
13296 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13297 
13298   match(Set dst (MoveI2F src));
13299 
13300   effect(DEF dst, USE src);
13301 
13302   ins_cost(INSN_COST);
13303 
13304   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13305 
13306   ins_encode %{
13307     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13308   %}
13309 
13310   ins_pipe(fp_i2f);
13311 
13312 %}
13313 
13314 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13315 
13316   match(Set dst (MoveD2L src));
13317 
13318   effect(DEF dst, USE src);
13319 
13320   ins_cost(INSN_COST);
13321 
13322   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13323 
13324   ins_encode %{
13325     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13326   %}
13327 
13328   ins_pipe(fp_d2l);
13329 
13330 %}
13331 
13332 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13333 
13334   match(Set dst (MoveL2D src));
13335 
13336   effect(DEF dst, USE src);
13337 
13338   ins_cost(INSN_COST);
13339 
13340   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13341 
13342   ins_encode %{
13343     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13344   %}
13345 
13346   ins_pipe(fp_l2d);
13347 
13348 %}
13349 
13350 // ============================================================================
13351 // clearing of an array
13352 
13353 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13354 %{
13355   match(Set dummy (ClearArray cnt base));
13356   effect(USE_KILL cnt, USE_KILL base);
13357 
13358   ins_cost(4 * INSN_COST);
13359   format %{ "ClearArray $cnt, $base" %}
13360 
13361   ins_encode %{
13362     __ zero_words($base$$Register, $cnt$$Register);
13363   %}
13364 
13365   ins_pipe(pipe_class_memory);
13366 %}
13367 
13368 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13369 %{
13370   predicate((u_int64_t)n->in(2)->get_long()
13371             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13372   match(Set dummy (ClearArray cnt base));
13373   effect(USE_KILL base);
13374 
13375   ins_cost(4 * INSN_COST);
13376   format %{ "ClearArray $cnt, $base" %}
13377 
13378   ins_encode %{
13379     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13380   %}
13381 
13382   ins_pipe(pipe_class_memory);
13383 %}
13384 
13385 // ============================================================================
13386 // Overflow Math Instructions
13387 
13388 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13389 %{
13390   match(Set cr (OverflowAddI op1 op2));
13391 
13392   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13393   ins_cost(INSN_COST);
13394   ins_encode %{
13395     __ cmnw($op1$$Register, $op2$$Register);
13396   %}
13397 
13398   ins_pipe(icmp_reg_reg);
13399 %}
13400 
13401 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13402 %{
13403   match(Set cr (OverflowAddI op1 op2));
13404 
13405   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13406   ins_cost(INSN_COST);
13407   ins_encode %{
13408     __ cmnw($op1$$Register, $op2$$constant);
13409   %}
13410 
13411   ins_pipe(icmp_reg_imm);
13412 %}
13413 
13414 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13415 %{
13416   match(Set cr (OverflowAddL op1 op2));
13417 
13418   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13419   ins_cost(INSN_COST);
13420   ins_encode %{
13421     __ cmn($op1$$Register, $op2$$Register);
13422   %}
13423 
13424   ins_pipe(icmp_reg_reg);
13425 %}
13426 
13427 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13428 %{
13429   match(Set cr (OverflowAddL op1 op2));
13430 
13431   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13432   ins_cost(INSN_COST);
13433   ins_encode %{
13434     __ cmn($op1$$Register, $op2$$constant);
13435   %}
13436 
13437   ins_pipe(icmp_reg_imm);
13438 %}
13439 
13440 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13441 %{
13442   match(Set cr (OverflowSubI op1 op2));
13443 
13444   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13445   ins_cost(INSN_COST);
13446   ins_encode %{
13447     __ cmpw($op1$$Register, $op2$$Register);
13448   %}
13449 
13450   ins_pipe(icmp_reg_reg);
13451 %}
13452 
13453 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13454 %{
13455   match(Set cr (OverflowSubI op1 op2));
13456 
13457   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13458   ins_cost(INSN_COST);
13459   ins_encode %{
13460     __ cmpw($op1$$Register, $op2$$constant);
13461   %}
13462 
13463   ins_pipe(icmp_reg_imm);
13464 %}
13465 
13466 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13467 %{
13468   match(Set cr (OverflowSubL op1 op2));
13469 
13470   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13471   ins_cost(INSN_COST);
13472   ins_encode %{
13473     __ cmp($op1$$Register, $op2$$Register);
13474   %}
13475 
13476   ins_pipe(icmp_reg_reg);
13477 %}
13478 
13479 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13480 %{
13481   match(Set cr (OverflowSubL op1 op2));
13482 
13483   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13484   ins_cost(INSN_COST);
13485   ins_encode %{
13486     __ subs(zr, $op1$$Register, $op2$$constant);
13487   %}
13488 
13489   ins_pipe(icmp_reg_imm);
13490 %}
13491 
13492 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13493 %{
13494   match(Set cr (OverflowSubI zero op1));
13495 
13496   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13497   ins_cost(INSN_COST);
13498   ins_encode %{
13499     __ cmpw(zr, $op1$$Register);
13500   %}
13501 
13502   ins_pipe(icmp_reg_imm);
13503 %}
13504 
13505 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13506 %{
13507   match(Set cr (OverflowSubL zero op1));
13508 
13509   format %{ "cmp   zr, $op1\t# overflow check long" %}
13510   ins_cost(INSN_COST);
13511   ins_encode %{
13512     __ cmp(zr, $op1$$Register);
13513   %}
13514 
13515   ins_pipe(icmp_reg_imm);
13516 %}
13517 
13518 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13519 %{
13520   match(Set cr (OverflowMulI op1 op2));
13521 
13522   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13523             "cmp   rscratch1, rscratch1, sxtw\n\t"
13524             "movw  rscratch1, #0x80000000\n\t"
13525             "cselw rscratch1, rscratch1, zr, NE\n\t"
13526             "cmpw  rscratch1, #1" %}
13527   ins_cost(5 * INSN_COST);
13528   ins_encode %{
13529     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13530     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13531     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13532     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13533     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13534   %}
13535 
13536   ins_pipe(pipe_slow);
13537 %}
13538 
13539 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13540 %{
13541   match(If cmp (OverflowMulI op1 op2));
13542   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13543             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13544   effect(USE labl, KILL cr);
13545 
13546   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13547             "cmp   rscratch1, rscratch1, sxtw\n\t"
13548             "b$cmp   $labl" %}
13549   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13550   ins_encode %{
13551     Label* L = $labl$$label;
13552     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13553     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13554     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13555     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13556   %}
13557 
13558   ins_pipe(pipe_serial);
13559 %}
13560 
13561 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13562 %{
13563   match(Set cr (OverflowMulL op1 op2));
13564 
13565   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13566             "smulh rscratch2, $op1, $op2\n\t"
13567             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13568             "movw  rscratch1, #0x80000000\n\t"
13569             "cselw rscratch1, rscratch1, zr, NE\n\t"
13570             "cmpw  rscratch1, #1" %}
13571   ins_cost(6 * INSN_COST);
13572   ins_encode %{
13573     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13574     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13575     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13576     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13577     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13578     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13579   %}
13580 
13581   ins_pipe(pipe_slow);
13582 %}
13583 
13584 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13585 %{
13586   match(If cmp (OverflowMulL op1 op2));
13587   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13588             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13589   effect(USE labl, KILL cr);
13590 
13591   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13592             "smulh rscratch2, $op1, $op2\n\t"
13593             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13594             "b$cmp $labl" %}
13595   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13596   ins_encode %{
13597     Label* L = $labl$$label;
13598     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13599     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13600     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13601     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13602     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13603   %}
13604 
13605   ins_pipe(pipe_serial);
13606 %}
13607 
13608 // ============================================================================
13609 // Compare Instructions
13610 
13611 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13612 %{
13613   match(Set cr (CmpI op1 op2));
13614 
13615   effect(DEF cr, USE op1, USE op2);
13616 
13617   ins_cost(INSN_COST);
13618   format %{ "cmpw  $op1, $op2" %}
13619 
13620   ins_encode(aarch64_enc_cmpw(op1, op2));
13621 
13622   ins_pipe(icmp_reg_reg);
13623 %}
13624 
13625 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13626 %{
13627   match(Set cr (CmpI op1 zero));
13628 
13629   effect(DEF cr, USE op1);
13630 
13631   ins_cost(INSN_COST);
13632   format %{ "cmpw $op1, 0" %}
13633 
13634   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13635 
13636   ins_pipe(icmp_reg_imm);
13637 %}
13638 
13639 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13640 %{
13641   match(Set cr (CmpI op1 op2));
13642 
13643   effect(DEF cr, USE op1);
13644 
13645   ins_cost(INSN_COST);
13646   format %{ "cmpw  $op1, $op2" %}
13647 
13648   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13649 
13650   ins_pipe(icmp_reg_imm);
13651 %}
13652 
13653 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13654 %{
13655   match(Set cr (CmpI op1 op2));
13656 
13657   effect(DEF cr, USE op1);
13658 
13659   ins_cost(INSN_COST * 2);
13660   format %{ "cmpw  $op1, $op2" %}
13661 
13662   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13663 
13664   ins_pipe(icmp_reg_imm);
13665 %}
13666 
13667 // Unsigned compare Instructions; really, same as signed compare
13668 // except it should only be used to feed an If or a CMovI which takes a
13669 // cmpOpU.
13670 
13671 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13672 %{
13673   match(Set cr (CmpU op1 op2));
13674 
13675   effect(DEF cr, USE op1, USE op2);
13676 
13677   ins_cost(INSN_COST);
13678   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13679 
13680   ins_encode(aarch64_enc_cmpw(op1, op2));
13681 
13682   ins_pipe(icmp_reg_reg);
13683 %}
13684 
13685 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13686 %{
13687   match(Set cr (CmpU op1 zero));
13688 
13689   effect(DEF cr, USE op1);
13690 
13691   ins_cost(INSN_COST);
13692   format %{ "cmpw $op1, #0\t# unsigned" %}
13693 
13694   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13695 
13696   ins_pipe(icmp_reg_imm);
13697 %}
13698 
13699 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13700 %{
13701   match(Set cr (CmpU op1 op2));
13702 
13703   effect(DEF cr, USE op1);
13704 
13705   ins_cost(INSN_COST);
13706   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13707 
13708   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13709 
13710   ins_pipe(icmp_reg_imm);
13711 %}
13712 
13713 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13714 %{
13715   match(Set cr (CmpU op1 op2));
13716 
13717   effect(DEF cr, USE op1);
13718 
13719   ins_cost(INSN_COST * 2);
13720   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13721 
13722   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13723 
13724   ins_pipe(icmp_reg_imm);
13725 %}
13726 
13727 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13728 %{
13729   match(Set cr (CmpL op1 op2));
13730 
13731   effect(DEF cr, USE op1, USE op2);
13732 
13733   ins_cost(INSN_COST);
13734   format %{ "cmp  $op1, $op2" %}
13735 
13736   ins_encode(aarch64_enc_cmp(op1, op2));
13737 
13738   ins_pipe(icmp_reg_reg);
13739 %}
13740 
13741 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
13742 %{
13743   match(Set cr (CmpL op1 zero));
13744 
13745   effect(DEF cr, USE op1);
13746 
13747   ins_cost(INSN_COST);
13748   format %{ "tst  $op1" %}
13749 
13750   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13751 
13752   ins_pipe(icmp_reg_imm);
13753 %}
13754 
13755 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13756 %{
13757   match(Set cr (CmpL op1 op2));
13758 
13759   effect(DEF cr, USE op1);
13760 
13761   ins_cost(INSN_COST);
13762   format %{ "cmp  $op1, $op2" %}
13763 
13764   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13765 
13766   ins_pipe(icmp_reg_imm);
13767 %}
13768 
13769 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13770 %{
13771   match(Set cr (CmpL op1 op2));
13772 
13773   effect(DEF cr, USE op1);
13774 
13775   ins_cost(INSN_COST * 2);
13776   format %{ "cmp  $op1, $op2" %}
13777 
13778   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13779 
13780   ins_pipe(icmp_reg_imm);
13781 %}
13782 
13783 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
13784 %{
13785   match(Set cr (CmpUL op1 op2));
13786 
13787   effect(DEF cr, USE op1, USE op2);
13788 
13789   ins_cost(INSN_COST);
13790   format %{ "cmp  $op1, $op2" %}
13791 
13792   ins_encode(aarch64_enc_cmp(op1, op2));
13793 
13794   ins_pipe(icmp_reg_reg);
13795 %}
13796 
13797 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
13798 %{
13799   match(Set cr (CmpUL op1 zero));
13800 
13801   effect(DEF cr, USE op1);
13802 
13803   ins_cost(INSN_COST);
13804   format %{ "tst  $op1" %}
13805 
13806   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13807 
13808   ins_pipe(icmp_reg_imm);
13809 %}
13810 
13811 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
13812 %{
13813   match(Set cr (CmpUL op1 op2));
13814 
13815   effect(DEF cr, USE op1);
13816 
13817   ins_cost(INSN_COST);
13818   format %{ "cmp  $op1, $op2" %}
13819 
13820   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13821 
13822   ins_pipe(icmp_reg_imm);
13823 %}
13824 
13825 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
13826 %{
13827   match(Set cr (CmpUL op1 op2));
13828 
13829   effect(DEF cr, USE op1);
13830 
13831   ins_cost(INSN_COST * 2);
13832   format %{ "cmp  $op1, $op2" %}
13833 
13834   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13835 
13836   ins_pipe(icmp_reg_imm);
13837 %}
13838 
13839 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13840 %{
13841   match(Set cr (CmpP op1 op2));
13842 
13843   effect(DEF cr, USE op1, USE op2);
13844 
13845   ins_cost(INSN_COST);
13846   format %{ "cmp  $op1, $op2\t // ptr" %}
13847 
13848   ins_encode(aarch64_enc_cmpp(op1, op2));
13849 
13850   ins_pipe(icmp_reg_reg);
13851 %}
13852 
13853 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13854 %{
13855   match(Set cr (CmpN op1 op2));
13856 
13857   effect(DEF cr, USE op1, USE op2);
13858 
13859   ins_cost(INSN_COST);
13860   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13861 
13862   ins_encode(aarch64_enc_cmpn(op1, op2));
13863 
13864   ins_pipe(icmp_reg_reg);
13865 %}
13866 
13867 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13868 %{
13869   match(Set cr (CmpP op1 zero));
13870 
13871   effect(DEF cr, USE op1, USE zero);
13872 
13873   ins_cost(INSN_COST);
13874   format %{ "cmp  $op1, 0\t // ptr" %}
13875 
13876   ins_encode(aarch64_enc_testp(op1));
13877 
13878   ins_pipe(icmp_reg_imm);
13879 %}
13880 
13881 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13882 %{
13883   match(Set cr (CmpN op1 zero));
13884 
13885   effect(DEF cr, USE op1, USE zero);
13886 
13887   ins_cost(INSN_COST);
13888   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13889 
13890   ins_encode(aarch64_enc_testn(op1));
13891 
13892   ins_pipe(icmp_reg_imm);
13893 %}
13894 
13895 // FP comparisons
13896 //
13897 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13898 // using normal cmpOp. See declaration of rFlagsReg for details.
13899 
13900 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13901 %{
13902   match(Set cr (CmpF src1 src2));
13903 
13904   ins_cost(3 * INSN_COST);
13905   format %{ "fcmps $src1, $src2" %}
13906 
13907   ins_encode %{
13908     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13909   %}
13910 
13911   ins_pipe(pipe_class_compare);
13912 %}
13913 
13914 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13915 %{
13916   match(Set cr (CmpF src1 src2));
13917 
13918   ins_cost(3 * INSN_COST);
13919   format %{ "fcmps $src1, 0.0" %}
13920 
13921   ins_encode %{
13922     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13923   %}
13924 
13925   ins_pipe(pipe_class_compare);
13926 %}
13927 // FROM HERE
13928 
13929 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13930 %{
13931   match(Set cr (CmpD src1 src2));
13932 
13933   ins_cost(3 * INSN_COST);
13934   format %{ "fcmpd $src1, $src2" %}
13935 
13936   ins_encode %{
13937     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13938   %}
13939 
13940   ins_pipe(pipe_class_compare);
13941 %}
13942 
13943 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13944 %{
13945   match(Set cr (CmpD src1 src2));
13946 
13947   ins_cost(3 * INSN_COST);
13948   format %{ "fcmpd $src1, 0.0" %}
13949 
13950   ins_encode %{
13951     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13952   %}
13953 
13954   ins_pipe(pipe_class_compare);
13955 %}
13956 
13957 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13958 %{
13959   match(Set dst (CmpF3 src1 src2));
13960   effect(KILL cr);
13961 
13962   ins_cost(5 * INSN_COST);
13963   format %{ "fcmps $src1, $src2\n\t"
13964             "csinvw($dst, zr, zr, eq\n\t"
13965             "csnegw($dst, $dst, $dst, lt)"
13966   %}
13967 
13968   ins_encode %{
13969     Label done;
13970     FloatRegister s1 = as_FloatRegister($src1$$reg);
13971     FloatRegister s2 = as_FloatRegister($src2$$reg);
13972     Register d = as_Register($dst$$reg);
13973     __ fcmps(s1, s2);
13974     // installs 0 if EQ else -1
13975     __ csinvw(d, zr, zr, Assembler::EQ);
13976     // keeps -1 if less or unordered else installs 1
13977     __ csnegw(d, d, d, Assembler::LT);
13978     __ bind(done);
13979   %}
13980 
13981   ins_pipe(pipe_class_default);
13982 
13983 %}
13984 
13985 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13986 %{
13987   match(Set dst (CmpD3 src1 src2));
13988   effect(KILL cr);
13989 
13990   ins_cost(5 * INSN_COST);
13991   format %{ "fcmpd $src1, $src2\n\t"
13992             "csinvw($dst, zr, zr, eq\n\t"
13993             "csnegw($dst, $dst, $dst, lt)"
13994   %}
13995 
13996   ins_encode %{
13997     Label done;
13998     FloatRegister s1 = as_FloatRegister($src1$$reg);
13999     FloatRegister s2 = as_FloatRegister($src2$$reg);
14000     Register d = as_Register($dst$$reg);
14001     __ fcmpd(s1, s2);
14002     // installs 0 if EQ else -1
14003     __ csinvw(d, zr, zr, Assembler::EQ);
14004     // keeps -1 if less or unordered else installs 1
14005     __ csnegw(d, d, d, Assembler::LT);
14006     __ bind(done);
14007   %}
14008   ins_pipe(pipe_class_default);
14009 
14010 %}
14011 
14012 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14013 %{
14014   match(Set dst (CmpF3 src1 zero));
14015   effect(KILL cr);
14016 
14017   ins_cost(5 * INSN_COST);
14018   format %{ "fcmps $src1, 0.0\n\t"
14019             "csinvw($dst, zr, zr, eq\n\t"
14020             "csnegw($dst, $dst, $dst, lt)"
14021   %}
14022 
14023   ins_encode %{
14024     Label done;
14025     FloatRegister s1 = as_FloatRegister($src1$$reg);
14026     Register d = as_Register($dst$$reg);
14027     __ fcmps(s1, 0.0D);
14028     // installs 0 if EQ else -1
14029     __ csinvw(d, zr, zr, Assembler::EQ);
14030     // keeps -1 if less or unordered else installs 1
14031     __ csnegw(d, d, d, Assembler::LT);
14032     __ bind(done);
14033   %}
14034 
14035   ins_pipe(pipe_class_default);
14036 
14037 %}
14038 
14039 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14040 %{
14041   match(Set dst (CmpD3 src1 zero));
14042   effect(KILL cr);
14043 
14044   ins_cost(5 * INSN_COST);
14045   format %{ "fcmpd $src1, 0.0\n\t"
14046             "csinvw($dst, zr, zr, eq\n\t"
14047             "csnegw($dst, $dst, $dst, lt)"
14048   %}
14049 
14050   ins_encode %{
14051     Label done;
14052     FloatRegister s1 = as_FloatRegister($src1$$reg);
14053     Register d = as_Register($dst$$reg);
14054     __ fcmpd(s1, 0.0D);
14055     // installs 0 if EQ else -1
14056     __ csinvw(d, zr, zr, Assembler::EQ);
14057     // keeps -1 if less or unordered else installs 1
14058     __ csnegw(d, d, d, Assembler::LT);
14059     __ bind(done);
14060   %}
14061   ins_pipe(pipe_class_default);
14062 
14063 %}
14064 
14065 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14066 %{
14067   match(Set dst (CmpLTMask p q));
14068   effect(KILL cr);
14069 
14070   ins_cost(3 * INSN_COST);
14071 
14072   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14073             "csetw $dst, lt\n\t"
14074             "subw $dst, zr, $dst"
14075   %}
14076 
14077   ins_encode %{
14078     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14079     __ csetw(as_Register($dst$$reg), Assembler::LT);
14080     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14081   %}
14082 
14083   ins_pipe(ialu_reg_reg);
14084 %}
14085 
14086 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14087 %{
14088   match(Set dst (CmpLTMask src zero));
14089   effect(KILL cr);
14090 
14091   ins_cost(INSN_COST);
14092 
14093   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14094 
14095   ins_encode %{
14096     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14097   %}
14098 
14099   ins_pipe(ialu_reg_shift);
14100 %}
14101 
14102 // ============================================================================
14103 // Max and Min
14104 
14105 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14106 %{
14107   match(Set dst (MinI src1 src2));
14108 
14109   effect(DEF dst, USE src1, USE src2, KILL cr);
14110   size(8);
14111 
14112   ins_cost(INSN_COST * 3);
14113   format %{
14114     "cmpw $src1 $src2\t signed int\n\t"
14115     "cselw $dst, $src1, $src2 lt\t"
14116   %}
14117 
14118   ins_encode %{
14119     __ cmpw(as_Register($src1$$reg),
14120             as_Register($src2$$reg));
14121     __ cselw(as_Register($dst$$reg),
14122              as_Register($src1$$reg),
14123              as_Register($src2$$reg),
14124              Assembler::LT);
14125   %}
14126 
14127   ins_pipe(ialu_reg_reg);
14128 %}
14129 // FROM HERE
14130 
14131 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14132 %{
14133   match(Set dst (MaxI src1 src2));
14134 
14135   effect(DEF dst, USE src1, USE src2, KILL cr);
14136   size(8);
14137 
14138   ins_cost(INSN_COST * 3);
14139   format %{
14140     "cmpw $src1 $src2\t signed int\n\t"
14141     "cselw $dst, $src1, $src2 gt\t"
14142   %}
14143 
14144   ins_encode %{
14145     __ cmpw(as_Register($src1$$reg),
14146             as_Register($src2$$reg));
14147     __ cselw(as_Register($dst$$reg),
14148              as_Register($src1$$reg),
14149              as_Register($src2$$reg),
14150              Assembler::GT);
14151   %}
14152 
14153   ins_pipe(ialu_reg_reg);
14154 %}
14155 
14156 // ============================================================================
14157 // Branch Instructions
14158 
14159 // Direct Branch.
14160 instruct branch(label lbl)
14161 %{
14162   match(Goto);
14163 
14164   effect(USE lbl);
14165 
14166   ins_cost(BRANCH_COST);
14167   format %{ "b  $lbl" %}
14168 
14169   ins_encode(aarch64_enc_b(lbl));
14170 
14171   ins_pipe(pipe_branch);
14172 %}
14173 
14174 // Conditional Near Branch
14175 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14176 %{
14177   // Same match rule as `branchConFar'.
14178   match(If cmp cr);
14179 
14180   effect(USE lbl);
14181 
14182   ins_cost(BRANCH_COST);
14183   // If set to 1 this indicates that the current instruction is a
14184   // short variant of a long branch. This avoids using this
14185   // instruction in first-pass matching. It will then only be used in
14186   // the `Shorten_branches' pass.
14187   // ins_short_branch(1);
14188   format %{ "b$cmp  $lbl" %}
14189 
14190   ins_encode(aarch64_enc_br_con(cmp, lbl));
14191 
14192   ins_pipe(pipe_branch_cond);
14193 %}
14194 
14195 // Conditional Near Branch Unsigned
14196 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14197 %{
14198   // Same match rule as `branchConFar'.
14199   match(If cmp cr);
14200 
14201   effect(USE lbl);
14202 
14203   ins_cost(BRANCH_COST);
14204   // If set to 1 this indicates that the current instruction is a
14205   // short variant of a long branch. This avoids using this
14206   // instruction in first-pass matching. It will then only be used in
14207   // the `Shorten_branches' pass.
14208   // ins_short_branch(1);
14209   format %{ "b$cmp  $lbl\t# unsigned" %}
14210 
14211   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14212 
14213   ins_pipe(pipe_branch_cond);
14214 %}
14215 
14216 // Make use of CBZ and CBNZ.  These instructions, as well as being
14217 // shorter than (cmp; branch), have the additional benefit of not
14218 // killing the flags.
14219 
14220 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14221   match(If cmp (CmpI op1 op2));
14222   effect(USE labl);
14223 
14224   ins_cost(BRANCH_COST);
14225   format %{ "cbw$cmp   $op1, $labl" %}
14226   ins_encode %{
14227     Label* L = $labl$$label;
14228     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14229     if (cond == Assembler::EQ)
14230       __ cbzw($op1$$Register, *L);
14231     else
14232       __ cbnzw($op1$$Register, *L);
14233   %}
14234   ins_pipe(pipe_cmp_branch);
14235 %}
14236 
14237 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14238   match(If cmp (CmpL op1 op2));
14239   effect(USE labl);
14240 
14241   ins_cost(BRANCH_COST);
14242   format %{ "cb$cmp   $op1, $labl" %}
14243   ins_encode %{
14244     Label* L = $labl$$label;
14245     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14246     if (cond == Assembler::EQ)
14247       __ cbz($op1$$Register, *L);
14248     else
14249       __ cbnz($op1$$Register, *L);
14250   %}
14251   ins_pipe(pipe_cmp_branch);
14252 %}
14253 
14254 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14255   match(If cmp (CmpP op1 op2));
14256   effect(USE labl);
14257 
14258   ins_cost(BRANCH_COST);
14259   format %{ "cb$cmp   $op1, $labl" %}
14260   ins_encode %{
14261     Label* L = $labl$$label;
14262     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14263     if (cond == Assembler::EQ)
14264       __ cbz($op1$$Register, *L);
14265     else
14266       __ cbnz($op1$$Register, *L);
14267   %}
14268   ins_pipe(pipe_cmp_branch);
14269 %}
14270 
14271 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14272   match(If cmp (CmpN op1 op2));
14273   effect(USE labl);
14274 
14275   ins_cost(BRANCH_COST);
14276   format %{ "cbw$cmp   $op1, $labl" %}
14277   ins_encode %{
14278     Label* L = $labl$$label;
14279     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14280     if (cond == Assembler::EQ)
14281       __ cbzw($op1$$Register, *L);
14282     else
14283       __ cbnzw($op1$$Register, *L);
14284   %}
14285   ins_pipe(pipe_cmp_branch);
14286 %}
14287 
14288 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14289   match(If cmp (CmpP (DecodeN oop) zero));
14290   effect(USE labl);
14291 
14292   ins_cost(BRANCH_COST);
14293   format %{ "cb$cmp   $oop, $labl" %}
14294   ins_encode %{
14295     Label* L = $labl$$label;
14296     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14297     if (cond == Assembler::EQ)
14298       __ cbzw($oop$$Register, *L);
14299     else
14300       __ cbnzw($oop$$Register, *L);
14301   %}
14302   ins_pipe(pipe_cmp_branch);
14303 %}
14304 
14305 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14306   match(If cmp (CmpU op1 op2));
14307   effect(USE labl);
14308 
14309   ins_cost(BRANCH_COST);
14310   format %{ "cbw$cmp   $op1, $labl" %}
14311   ins_encode %{
14312     Label* L = $labl$$label;
14313     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14314     if (cond == Assembler::EQ || cond == Assembler::LS)
14315       __ cbzw($op1$$Register, *L);
14316     else
14317       __ cbnzw($op1$$Register, *L);
14318   %}
14319   ins_pipe(pipe_cmp_branch);
14320 %}
14321 
14322 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14323   match(If cmp (CmpUL op1 op2));
14324   effect(USE labl);
14325 
14326   ins_cost(BRANCH_COST);
14327   format %{ "cb$cmp   $op1, $labl" %}
14328   ins_encode %{
14329     Label* L = $labl$$label;
14330     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14331     if (cond == Assembler::EQ || cond == Assembler::LS)
14332       __ cbz($op1$$Register, *L);
14333     else
14334       __ cbnz($op1$$Register, *L);
14335   %}
14336   ins_pipe(pipe_cmp_branch);
14337 %}
14338 
14339 // Test bit and Branch
14340 
14341 // Patterns for short (< 32KiB) variants
14342 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14343   match(If cmp (CmpL op1 op2));
14344   effect(USE labl);
14345 
14346   ins_cost(BRANCH_COST);
14347   format %{ "cb$cmp   $op1, $labl # long" %}
14348   ins_encode %{
14349     Label* L = $labl$$label;
14350     Assembler::Condition cond =
14351       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14352     __ tbr(cond, $op1$$Register, 63, *L);
14353   %}
14354   ins_pipe(pipe_cmp_branch);
14355   ins_short_branch(1);
14356 %}
14357 
14358 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14359   match(If cmp (CmpI op1 op2));
14360   effect(USE labl);
14361 
14362   ins_cost(BRANCH_COST);
14363   format %{ "cb$cmp   $op1, $labl # int" %}
14364   ins_encode %{
14365     Label* L = $labl$$label;
14366     Assembler::Condition cond =
14367       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14368     __ tbr(cond, $op1$$Register, 31, *L);
14369   %}
14370   ins_pipe(pipe_cmp_branch);
14371   ins_short_branch(1);
14372 %}
14373 
14374 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14375   match(If cmp (CmpL (AndL op1 op2) op3));
14376   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14377   effect(USE labl);
14378 
14379   ins_cost(BRANCH_COST);
14380   format %{ "tb$cmp   $op1, $op2, $labl" %}
14381   ins_encode %{
14382     Label* L = $labl$$label;
14383     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14384     int bit = exact_log2($op2$$constant);
14385     __ tbr(cond, $op1$$Register, bit, *L);
14386   %}
14387   ins_pipe(pipe_cmp_branch);
14388   ins_short_branch(1);
14389 %}
14390 
14391 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14392   match(If cmp (CmpI (AndI op1 op2) op3));
14393   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14394   effect(USE labl);
14395 
14396   ins_cost(BRANCH_COST);
14397   format %{ "tb$cmp   $op1, $op2, $labl" %}
14398   ins_encode %{
14399     Label* L = $labl$$label;
14400     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14401     int bit = exact_log2($op2$$constant);
14402     __ tbr(cond, $op1$$Register, bit, *L);
14403   %}
14404   ins_pipe(pipe_cmp_branch);
14405   ins_short_branch(1);
14406 %}
14407 
14408 // And far variants
14409 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14410   match(If cmp (CmpL op1 op2));
14411   effect(USE labl);
14412 
14413   ins_cost(BRANCH_COST);
14414   format %{ "cb$cmp   $op1, $labl # long" %}
14415   ins_encode %{
14416     Label* L = $labl$$label;
14417     Assembler::Condition cond =
14418       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14419     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14420   %}
14421   ins_pipe(pipe_cmp_branch);
14422 %}
14423 
14424 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14425   match(If cmp (CmpI op1 op2));
14426   effect(USE labl);
14427 
14428   ins_cost(BRANCH_COST);
14429   format %{ "cb$cmp   $op1, $labl # int" %}
14430   ins_encode %{
14431     Label* L = $labl$$label;
14432     Assembler::Condition cond =
14433       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14434     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14435   %}
14436   ins_pipe(pipe_cmp_branch);
14437 %}
14438 
14439 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14440   match(If cmp (CmpL (AndL op1 op2) op3));
14441   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14442   effect(USE labl);
14443 
14444   ins_cost(BRANCH_COST);
14445   format %{ "tb$cmp   $op1, $op2, $labl" %}
14446   ins_encode %{
14447     Label* L = $labl$$label;
14448     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14449     int bit = exact_log2($op2$$constant);
14450     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14451   %}
14452   ins_pipe(pipe_cmp_branch);
14453 %}
14454 
14455 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14456   match(If cmp (CmpI (AndI op1 op2) op3));
14457   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14458   effect(USE labl);
14459 
14460   ins_cost(BRANCH_COST);
14461   format %{ "tb$cmp   $op1, $op2, $labl" %}
14462   ins_encode %{
14463     Label* L = $labl$$label;
14464     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14465     int bit = exact_log2($op2$$constant);
14466     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14467   %}
14468   ins_pipe(pipe_cmp_branch);
14469 %}
14470 
14471 // Test bits
14472 
14473 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14474   match(Set cr (CmpL (AndL op1 op2) op3));
14475   predicate(Assembler::operand_valid_for_logical_immediate
14476             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14477 
14478   ins_cost(INSN_COST);
14479   format %{ "tst $op1, $op2 # long" %}
14480   ins_encode %{
14481     __ tst($op1$$Register, $op2$$constant);
14482   %}
14483   ins_pipe(ialu_reg_reg);
14484 %}
14485 
14486 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14487   match(Set cr (CmpI (AndI op1 op2) op3));
14488   predicate(Assembler::operand_valid_for_logical_immediate
14489             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14490 
14491   ins_cost(INSN_COST);
14492   format %{ "tst $op1, $op2 # int" %}
14493   ins_encode %{
14494     __ tstw($op1$$Register, $op2$$constant);
14495   %}
14496   ins_pipe(ialu_reg_reg);
14497 %}
14498 
14499 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14500   match(Set cr (CmpL (AndL op1 op2) op3));
14501 
14502   ins_cost(INSN_COST);
14503   format %{ "tst $op1, $op2 # long" %}
14504   ins_encode %{
14505     __ tst($op1$$Register, $op2$$Register);
14506   %}
14507   ins_pipe(ialu_reg_reg);
14508 %}
14509 
14510 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14511   match(Set cr (CmpI (AndI op1 op2) op3));
14512 
14513   ins_cost(INSN_COST);
14514   format %{ "tstw $op1, $op2 # int" %}
14515   ins_encode %{
14516     __ tstw($op1$$Register, $op2$$Register);
14517   %}
14518   ins_pipe(ialu_reg_reg);
14519 %}
14520 
14521 
14522 // Conditional Far Branch
14523 // Conditional Far Branch Unsigned
14524 // TODO: fixme
14525 
14526 // counted loop end branch near
14527 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14528 %{
14529   match(CountedLoopEnd cmp cr);
14530 
14531   effect(USE lbl);
14532 
14533   ins_cost(BRANCH_COST);
14534   // short variant.
14535   // ins_short_branch(1);
14536   format %{ "b$cmp $lbl \t// counted loop end" %}
14537 
14538   ins_encode(aarch64_enc_br_con(cmp, lbl));
14539 
14540   ins_pipe(pipe_branch);
14541 %}
14542 
14543 // counted loop end branch near Unsigned
14544 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14545 %{
14546   match(CountedLoopEnd cmp cr);
14547 
14548   effect(USE lbl);
14549 
14550   ins_cost(BRANCH_COST);
14551   // short variant.
14552   // ins_short_branch(1);
14553   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14554 
14555   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14556 
14557   ins_pipe(pipe_branch);
14558 %}
14559 
14560 // counted loop end branch far
14561 // counted loop end branch far unsigned
14562 // TODO: fixme
14563 
14564 // ============================================================================
14565 // inlined locking and unlocking
14566 
14567 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14568 %{
14569   match(Set cr (FastLock object box));
14570   effect(TEMP tmp, TEMP tmp2);
14571 
14572   // TODO
14573   // identify correct cost
14574   ins_cost(5 * INSN_COST);
14575   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14576 
14577   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14578 
14579   ins_pipe(pipe_serial);
14580 %}
14581 
14582 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14583 %{
14584   match(Set cr (FastUnlock object box));
14585   effect(TEMP tmp, TEMP tmp2);
14586 
14587   ins_cost(5 * INSN_COST);
14588   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14589 
14590   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14591 
14592   ins_pipe(pipe_serial);
14593 %}
14594 
14595 
14596 // ============================================================================
14597 // Safepoint Instructions
14598 
14599 // TODO
14600 // provide a near and far version of this code
14601 
14602 instruct safePoint(iRegP poll)
14603 %{
14604   match(SafePoint poll);
14605 
14606   format %{
14607     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14608   %}
14609   ins_encode %{
14610     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14611   %}
14612   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14613 %}
14614 
14615 
14616 // ============================================================================
14617 // Procedure Call/Return Instructions
14618 
14619 // Call Java Static Instruction
14620 
14621 instruct CallStaticJavaDirect(method meth)
14622 %{
14623   match(CallStaticJava);
14624 
14625   effect(USE meth);
14626 
14627   ins_cost(CALL_COST);
14628 
14629   format %{ "call,static $meth \t// ==> " %}
14630 
14631   ins_encode( aarch64_enc_java_static_call(meth),
14632               aarch64_enc_call_epilog );
14633 
14634   ins_pipe(pipe_class_call);
14635 %}
14636 
14637 // TO HERE
14638 
14639 // Call Java Dynamic Instruction
14640 instruct CallDynamicJavaDirect(method meth)
14641 %{
14642   match(CallDynamicJava);
14643 
14644   effect(USE meth);
14645 
14646   ins_cost(CALL_COST);
14647 
14648   format %{ "CALL,dynamic $meth \t// ==> " %}
14649 
14650   ins_encode( aarch64_enc_java_dynamic_call(meth),
14651                aarch64_enc_call_epilog );
14652 
14653   ins_pipe(pipe_class_call);
14654 %}
14655 
14656 // Call Runtime Instruction
14657 
14658 instruct CallRuntimeDirect(method meth)
14659 %{
14660   match(CallRuntime);
14661 
14662   effect(USE meth);
14663 
14664   ins_cost(CALL_COST);
14665 
14666   format %{ "CALL, runtime $meth" %}
14667 
14668   ins_encode( aarch64_enc_java_to_runtime(meth) );
14669 
14670   ins_pipe(pipe_class_call);
14671 %}
14672 
14673 // Call Runtime Instruction
14674 
14675 instruct CallLeafDirect(method meth)
14676 %{
14677   match(CallLeaf);
14678 
14679   effect(USE meth);
14680 
14681   ins_cost(CALL_COST);
14682 
14683   format %{ "CALL, runtime leaf $meth" %}
14684 
14685   ins_encode( aarch64_enc_java_to_runtime(meth) );
14686 
14687   ins_pipe(pipe_class_call);
14688 %}
14689 
14690 // Call Runtime Instruction
14691 
14692 instruct CallLeafNoFPDirect(method meth)
14693 %{
14694   match(CallLeafNoFP);
14695 
14696   effect(USE meth);
14697 
14698   ins_cost(CALL_COST);
14699 
14700   format %{ "CALL, runtime leaf nofp $meth" %}
14701 
14702   ins_encode( aarch64_enc_java_to_runtime(meth) );
14703 
14704   ins_pipe(pipe_class_call);
14705 %}
14706 
14707 // Tail Call; Jump from runtime stub to Java code.
14708 // Also known as an 'interprocedural jump'.
14709 // Target of jump will eventually return to caller.
14710 // TailJump below removes the return address.
14711 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14712 %{
14713   match(TailCall jump_target method_oop);
14714 
14715   ins_cost(CALL_COST);
14716 
14717   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14718 
14719   ins_encode(aarch64_enc_tail_call(jump_target));
14720 
14721   ins_pipe(pipe_class_call);
14722 %}
14723 
14724 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14725 %{
14726   match(TailJump jump_target ex_oop);
14727 
14728   ins_cost(CALL_COST);
14729 
14730   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14731 
14732   ins_encode(aarch64_enc_tail_jmp(jump_target));
14733 
14734   ins_pipe(pipe_class_call);
14735 %}
14736 
14737 // Create exception oop: created by stack-crawling runtime code.
14738 // Created exception is now available to this handler, and is setup
14739 // just prior to jumping to this handler. No code emitted.
14740 // TODO check
14741 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14742 instruct CreateException(iRegP_R0 ex_oop)
14743 %{
14744   match(Set ex_oop (CreateEx));
14745 
14746   format %{ " -- \t// exception oop; no code emitted" %}
14747 
14748   size(0);
14749 
14750   ins_encode( /*empty*/ );
14751 
14752   ins_pipe(pipe_class_empty);
14753 %}
14754 
14755 // Rethrow exception: The exception oop will come in the first
14756 // argument position. Then JUMP (not call) to the rethrow stub code.
14757 instruct RethrowException() %{
14758   match(Rethrow);
14759   ins_cost(CALL_COST);
14760 
14761   format %{ "b rethrow_stub" %}
14762 
14763   ins_encode( aarch64_enc_rethrow() );
14764 
14765   ins_pipe(pipe_class_call);
14766 %}
14767 
14768 
14769 // Return Instruction
14770 // epilog node loads ret address into lr as part of frame pop
14771 instruct Ret()
14772 %{
14773   match(Return);
14774 
14775   format %{ "ret\t// return register" %}
14776 
14777   ins_encode( aarch64_enc_ret() );
14778 
14779   ins_pipe(pipe_branch);
14780 %}
14781 
14782 // Die now.
14783 instruct ShouldNotReachHere() %{
14784   match(Halt);
14785 
14786   ins_cost(CALL_COST);
14787   format %{ "ShouldNotReachHere" %}
14788 
14789   ins_encode %{
14790     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
14791     // return true
14792     __ dpcs1(0xdead + 1);
14793   %}
14794 
14795   ins_pipe(pipe_class_default);
14796 %}
14797 
14798 // ============================================================================
14799 // Partial Subtype Check
14800 //
14801 // superklass array for an instance of the superklass.  Set a hidden
14802 // internal cache on a hit (cache is checked with exposed code in
14803 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14804 // encoding ALSO sets flags.
14805 
14806 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14807 %{
14808   match(Set result (PartialSubtypeCheck sub super));
14809   effect(KILL cr, KILL temp);
14810 
14811   ins_cost(1100);  // slightly larger than the next version
14812   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14813 
14814   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14815 
14816   opcode(0x1); // Force zero of result reg on hit
14817 
14818   ins_pipe(pipe_class_memory);
14819 %}
14820 
14821 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14822 %{
14823   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14824   effect(KILL temp, KILL result);
14825 
14826   ins_cost(1100);  // slightly larger than the next version
14827   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14828 
14829   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14830 
14831   opcode(0x0); // Don't zero result reg on hit
14832 
14833   ins_pipe(pipe_class_memory);
14834 %}
14835 
14836 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14837                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14838 %{
14839   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
14840   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14841   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14842 
14843   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14844   ins_encode %{
14845     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14846     __ string_compare($str1$$Register, $str2$$Register,
14847                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14848                       $tmp1$$Register, $tmp2$$Register,
14849                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
14850   %}
14851   ins_pipe(pipe_class_memory);
14852 %}
14853 
14854 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14855                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14856 %{
14857   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
14858   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14859   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14860 
14861   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14862   ins_encode %{
14863     __ string_compare($str1$$Register, $str2$$Register,
14864                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14865                       $tmp1$$Register, $tmp2$$Register,
14866                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
14867   %}
14868   ins_pipe(pipe_class_memory);
14869 %}
14870 
14871 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14872                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14873                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14874 %{
14875   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
14876   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14877   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14878          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14879 
14880   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14881   ins_encode %{
14882     __ string_compare($str1$$Register, $str2$$Register,
14883                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14884                       $tmp1$$Register, $tmp2$$Register,
14885                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14886                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
14887   %}
14888   ins_pipe(pipe_class_memory);
14889 %}
14890 
14891 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14892                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14893                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14894 %{
14895   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
14896   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14897   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14898          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14899 
14900   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14901   ins_encode %{
14902     __ string_compare($str1$$Register, $str2$$Register,
14903                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14904                       $tmp1$$Register, $tmp2$$Register,
14905                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14906                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
14907   %}
14908   ins_pipe(pipe_class_memory);
14909 %}
14910 
14911 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14912        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14913        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14914 %{
14915   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14916   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14917   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14918          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14919   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
14920 
14921   ins_encode %{
14922     __ string_indexof($str1$$Register, $str2$$Register,
14923                       $cnt1$$Register, $cnt2$$Register,
14924                       $tmp1$$Register, $tmp2$$Register,
14925                       $tmp3$$Register, $tmp4$$Register,
14926                       $tmp5$$Register, $tmp6$$Register,
14927                       -1, $result$$Register, StrIntrinsicNode::UU);
14928   %}
14929   ins_pipe(pipe_class_memory);
14930 %}
14931 
14932 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14933        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14934        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14935 %{
14936   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
14937   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14938   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14939          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14940   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
14941 
14942   ins_encode %{
14943     __ string_indexof($str1$$Register, $str2$$Register,
14944                       $cnt1$$Register, $cnt2$$Register,
14945                       $tmp1$$Register, $tmp2$$Register,
14946                       $tmp3$$Register, $tmp4$$Register,
14947                       $tmp5$$Register, $tmp6$$Register,
14948                       -1, $result$$Register, StrIntrinsicNode::LL);
14949   %}
14950   ins_pipe(pipe_class_memory);
14951 %}
14952 
14953 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14954        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14955        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14956 %{
14957   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
14958   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14959   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14960          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14961   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
14962 
14963   ins_encode %{
14964     __ string_indexof($str1$$Register, $str2$$Register,
14965                       $cnt1$$Register, $cnt2$$Register,
14966                       $tmp1$$Register, $tmp2$$Register,
14967                       $tmp3$$Register, $tmp4$$Register,
14968                       $tmp5$$Register, $tmp6$$Register,
14969                       -1, $result$$Register, StrIntrinsicNode::UL);
14970   %}
14971   ins_pipe(pipe_class_memory);
14972 %}
14973 
14974 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14975                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14976                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14977 %{
14978   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14979   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14980   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14981          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14982   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
14983 
14984   ins_encode %{
14985     int icnt2 = (int)$int_cnt2$$constant;
14986     __ string_indexof($str1$$Register, $str2$$Register,
14987                       $cnt1$$Register, zr,
14988                       $tmp1$$Register, $tmp2$$Register,
14989                       $tmp3$$Register, $tmp4$$Register, zr, zr,
14990                       icnt2, $result$$Register, StrIntrinsicNode::UU);
14991   %}
14992   ins_pipe(pipe_class_memory);
14993 %}
14994 
14995 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14996                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
14997                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
14998 %{
14999   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15000   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15001   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15002          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15003   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15004 
15005   ins_encode %{
15006     int icnt2 = (int)$int_cnt2$$constant;
15007     __ string_indexof($str1$$Register, $str2$$Register,
15008                       $cnt1$$Register, zr,
15009                       $tmp1$$Register, $tmp2$$Register,
15010                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15011                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15012   %}
15013   ins_pipe(pipe_class_memory);
15014 %}
15015 
15016 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15017                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15018                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15019 %{
15020   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15021   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15022   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15023          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15024   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15025 
15026   ins_encode %{
15027     int icnt2 = (int)$int_cnt2$$constant;
15028     __ string_indexof($str1$$Register, $str2$$Register,
15029                       $cnt1$$Register, zr,
15030                       $tmp1$$Register, $tmp2$$Register,
15031                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15032                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15033   %}
15034   ins_pipe(pipe_class_memory);
15035 %}
15036 
15037 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15038                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15039                               iRegINoSp tmp3, rFlagsReg cr)
15040 %{
15041   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15042   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15043          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15044 
15045   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15046 
15047   ins_encode %{
15048     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15049                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15050                            $tmp3$$Register);
15051   %}
15052   ins_pipe(pipe_class_memory);
15053 %}
15054 
15055 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15056                         iRegI_R0 result, rFlagsReg cr)
15057 %{
15058   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15059   match(Set result (StrEquals (Binary str1 str2) cnt));
15060   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15061 
15062   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15063   ins_encode %{
15064     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15065     __ string_equals($str1$$Register, $str2$$Register,
15066                      $result$$Register, $cnt$$Register, 1);
15067   %}
15068   ins_pipe(pipe_class_memory);
15069 %}
15070 
15071 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15072                         iRegI_R0 result, rFlagsReg cr)
15073 %{
15074   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15075   match(Set result (StrEquals (Binary str1 str2) cnt));
15076   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15077 
15078   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15079   ins_encode %{
15080     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15081     __ string_equals($str1$$Register, $str2$$Register,
15082                      $result$$Register, $cnt$$Register, 2);
15083   %}
15084   ins_pipe(pipe_class_memory);
15085 %}
15086 
15087 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15088                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15089                        iRegP_R10 tmp, rFlagsReg cr)
15090 %{
15091   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15092   match(Set result (AryEq ary1 ary2));
15093   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15094 
15095   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15096   ins_encode %{
15097     __ arrays_equals($ary1$$Register, $ary2$$Register,
15098                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15099                      $result$$Register, $tmp$$Register, 1);
15100     %}
15101   ins_pipe(pipe_class_memory);
15102 %}
15103 
15104 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15105                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15106                        iRegP_R10 tmp, rFlagsReg cr)
15107 %{
15108   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15109   match(Set result (AryEq ary1 ary2));
15110   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15111 
15112   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15113   ins_encode %{
15114     __ arrays_equals($ary1$$Register, $ary2$$Register,
15115                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15116                      $result$$Register, $tmp$$Register, 2);
15117   %}
15118   ins_pipe(pipe_class_memory);
15119 %}
15120 
15121 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15122 %{
15123   match(Set result (HasNegatives ary1 len));
15124   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15125   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15126   ins_encode %{
15127     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15128   %}
15129   ins_pipe( pipe_slow );
15130 %}
15131 
15132 // fast char[] to byte[] compression
15133 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15134                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15135                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15136                          iRegI_R0 result, rFlagsReg cr)
15137 %{
15138   match(Set result (StrCompressedCopy src (Binary dst len)));
15139   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15140 
15141   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15142   ins_encode %{
15143     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15144                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15145                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15146                            $result$$Register);
15147   %}
15148   ins_pipe( pipe_slow );
15149 %}
15150 
15151 // fast byte[] to char[] inflation
15152 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15153                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15154 %{
15155   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15156   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15157 
15158   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15159   ins_encode %{
15160     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15161                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15162   %}
15163   ins_pipe(pipe_class_memory);
15164 %}
15165 
15166 // encode char[] to byte[] in ISO_8859_1
15167 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15168                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15169                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15170                           iRegI_R0 result, rFlagsReg cr)
15171 %{
15172   match(Set result (EncodeISOArray src (Binary dst len)));
15173   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15174          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15175 
15176   format %{ "Encode array $src,$dst,$len -> $result" %}
15177   ins_encode %{
15178     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15179          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15180          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15181   %}
15182   ins_pipe( pipe_class_memory );
15183 %}
15184 
15185 // ============================================================================
15186 // This name is KNOWN by the ADLC and cannot be changed.
15187 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15188 // for this guy.
15189 instruct tlsLoadP(thread_RegP dst)
15190 %{
15191   match(Set dst (ThreadLocal));
15192 
15193   ins_cost(0);
15194 
15195   format %{ " -- \t// $dst=Thread::current(), empty" %}
15196 
15197   size(0);
15198 
15199   ins_encode( /*empty*/ );
15200 
15201   ins_pipe(pipe_class_empty);
15202 %}
15203 
15204 // ====================VECTOR INSTRUCTIONS=====================================
15205 
15206 // Load vector (32 bits)
15207 instruct loadV4(vecD dst, vmem4 mem)
15208 %{
15209   predicate(n->as_LoadVector()->memory_size() == 4);
15210   match(Set dst (LoadVector mem));
15211   ins_cost(4 * INSN_COST);
15212   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15213   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15214   ins_pipe(vload_reg_mem64);
15215 %}
15216 
15217 // Load vector (64 bits)
15218 instruct loadV8(vecD dst, vmem8 mem)
15219 %{
15220   predicate(n->as_LoadVector()->memory_size() == 8);
15221   match(Set dst (LoadVector mem));
15222   ins_cost(4 * INSN_COST);
15223   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15224   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15225   ins_pipe(vload_reg_mem64);
15226 %}
15227 
15228 // Load Vector (128 bits)
15229 instruct loadV16(vecX dst, vmem16 mem)
15230 %{
15231   predicate(n->as_LoadVector()->memory_size() == 16);
15232   match(Set dst (LoadVector mem));
15233   ins_cost(4 * INSN_COST);
15234   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15235   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15236   ins_pipe(vload_reg_mem128);
15237 %}
15238 
15239 // Store Vector (32 bits)
15240 instruct storeV4(vecD src, vmem4 mem)
15241 %{
15242   predicate(n->as_StoreVector()->memory_size() == 4);
15243   match(Set mem (StoreVector mem src));
15244   ins_cost(4 * INSN_COST);
15245   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15246   ins_encode( aarch64_enc_strvS(src, mem) );
15247   ins_pipe(vstore_reg_mem64);
15248 %}
15249 
15250 // Store Vector (64 bits)
15251 instruct storeV8(vecD src, vmem8 mem)
15252 %{
15253   predicate(n->as_StoreVector()->memory_size() == 8);
15254   match(Set mem (StoreVector mem src));
15255   ins_cost(4 * INSN_COST);
15256   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15257   ins_encode( aarch64_enc_strvD(src, mem) );
15258   ins_pipe(vstore_reg_mem64);
15259 %}
15260 
15261 // Store Vector (128 bits)
15262 instruct storeV16(vecX src, vmem16 mem)
15263 %{
15264   predicate(n->as_StoreVector()->memory_size() == 16);
15265   match(Set mem (StoreVector mem src));
15266   ins_cost(4 * INSN_COST);
15267   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15268   ins_encode( aarch64_enc_strvQ(src, mem) );
15269   ins_pipe(vstore_reg_mem128);
15270 %}
15271 
15272 instruct replicate8B(vecD dst, iRegIorL2I src)
15273 %{
15274   predicate(n->as_Vector()->length() == 4 ||
15275             n->as_Vector()->length() == 8);
15276   match(Set dst (ReplicateB src));
15277   ins_cost(INSN_COST);
15278   format %{ "dup  $dst, $src\t# vector (8B)" %}
15279   ins_encode %{
15280     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15281   %}
15282   ins_pipe(vdup_reg_reg64);
15283 %}
15284 
15285 instruct replicate16B(vecX dst, iRegIorL2I src)
15286 %{
15287   predicate(n->as_Vector()->length() == 16);
15288   match(Set dst (ReplicateB src));
15289   ins_cost(INSN_COST);
15290   format %{ "dup  $dst, $src\t# vector (16B)" %}
15291   ins_encode %{
15292     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15293   %}
15294   ins_pipe(vdup_reg_reg128);
15295 %}
15296 
15297 instruct replicate8B_imm(vecD dst, immI con)
15298 %{
15299   predicate(n->as_Vector()->length() == 4 ||
15300             n->as_Vector()->length() == 8);
15301   match(Set dst (ReplicateB con));
15302   ins_cost(INSN_COST);
15303   format %{ "movi  $dst, $con\t# vector(8B)" %}
15304   ins_encode %{
15305     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15306   %}
15307   ins_pipe(vmovi_reg_imm64);
15308 %}
15309 
15310 instruct replicate16B_imm(vecX dst, immI con)
15311 %{
15312   predicate(n->as_Vector()->length() == 16);
15313   match(Set dst (ReplicateB con));
15314   ins_cost(INSN_COST);
15315   format %{ "movi  $dst, $con\t# vector(16B)" %}
15316   ins_encode %{
15317     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15318   %}
15319   ins_pipe(vmovi_reg_imm128);
15320 %}
15321 
15322 instruct replicate4S(vecD dst, iRegIorL2I src)
15323 %{
15324   predicate(n->as_Vector()->length() == 2 ||
15325             n->as_Vector()->length() == 4);
15326   match(Set dst (ReplicateS src));
15327   ins_cost(INSN_COST);
15328   format %{ "dup  $dst, $src\t# vector (4S)" %}
15329   ins_encode %{
15330     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15331   %}
15332   ins_pipe(vdup_reg_reg64);
15333 %}
15334 
15335 instruct replicate8S(vecX dst, iRegIorL2I src)
15336 %{
15337   predicate(n->as_Vector()->length() == 8);
15338   match(Set dst (ReplicateS src));
15339   ins_cost(INSN_COST);
15340   format %{ "dup  $dst, $src\t# vector (8S)" %}
15341   ins_encode %{
15342     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15343   %}
15344   ins_pipe(vdup_reg_reg128);
15345 %}
15346 
15347 instruct replicate4S_imm(vecD dst, immI con)
15348 %{
15349   predicate(n->as_Vector()->length() == 2 ||
15350             n->as_Vector()->length() == 4);
15351   match(Set dst (ReplicateS con));
15352   ins_cost(INSN_COST);
15353   format %{ "movi  $dst, $con\t# vector(4H)" %}
15354   ins_encode %{
15355     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15356   %}
15357   ins_pipe(vmovi_reg_imm64);
15358 %}
15359 
15360 instruct replicate8S_imm(vecX dst, immI con)
15361 %{
15362   predicate(n->as_Vector()->length() == 8);
15363   match(Set dst (ReplicateS con));
15364   ins_cost(INSN_COST);
15365   format %{ "movi  $dst, $con\t# vector(8H)" %}
15366   ins_encode %{
15367     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15368   %}
15369   ins_pipe(vmovi_reg_imm128);
15370 %}
15371 
15372 instruct replicate2I(vecD dst, iRegIorL2I src)
15373 %{
15374   predicate(n->as_Vector()->length() == 2);
15375   match(Set dst (ReplicateI src));
15376   ins_cost(INSN_COST);
15377   format %{ "dup  $dst, $src\t# vector (2I)" %}
15378   ins_encode %{
15379     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15380   %}
15381   ins_pipe(vdup_reg_reg64);
15382 %}
15383 
15384 instruct replicate4I(vecX dst, iRegIorL2I src)
15385 %{
15386   predicate(n->as_Vector()->length() == 4);
15387   match(Set dst (ReplicateI src));
15388   ins_cost(INSN_COST);
15389   format %{ "dup  $dst, $src\t# vector (4I)" %}
15390   ins_encode %{
15391     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15392   %}
15393   ins_pipe(vdup_reg_reg128);
15394 %}
15395 
15396 instruct replicate2I_imm(vecD dst, immI con)
15397 %{
15398   predicate(n->as_Vector()->length() == 2);
15399   match(Set dst (ReplicateI con));
15400   ins_cost(INSN_COST);
15401   format %{ "movi  $dst, $con\t# vector(2I)" %}
15402   ins_encode %{
15403     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15404   %}
15405   ins_pipe(vmovi_reg_imm64);
15406 %}
15407 
15408 instruct replicate4I_imm(vecX dst, immI con)
15409 %{
15410   predicate(n->as_Vector()->length() == 4);
15411   match(Set dst (ReplicateI con));
15412   ins_cost(INSN_COST);
15413   format %{ "movi  $dst, $con\t# vector(4I)" %}
15414   ins_encode %{
15415     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15416   %}
15417   ins_pipe(vmovi_reg_imm128);
15418 %}
15419 
15420 instruct replicate2L(vecX dst, iRegL src)
15421 %{
15422   predicate(n->as_Vector()->length() == 2);
15423   match(Set dst (ReplicateL src));
15424   ins_cost(INSN_COST);
15425   format %{ "dup  $dst, $src\t# vector (2L)" %}
15426   ins_encode %{
15427     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15428   %}
15429   ins_pipe(vdup_reg_reg128);
15430 %}
15431 
15432 instruct replicate2L_zero(vecX dst, immI0 zero)
15433 %{
15434   predicate(n->as_Vector()->length() == 2);
15435   match(Set dst (ReplicateI zero));
15436   ins_cost(INSN_COST);
15437   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15438   ins_encode %{
15439     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15440            as_FloatRegister($dst$$reg),
15441            as_FloatRegister($dst$$reg));
15442   %}
15443   ins_pipe(vmovi_reg_imm128);
15444 %}
15445 
15446 instruct replicate2F(vecD dst, vRegF src)
15447 %{
15448   predicate(n->as_Vector()->length() == 2);
15449   match(Set dst (ReplicateF src));
15450   ins_cost(INSN_COST);
15451   format %{ "dup  $dst, $src\t# vector (2F)" %}
15452   ins_encode %{
15453     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15454            as_FloatRegister($src$$reg));
15455   %}
15456   ins_pipe(vdup_reg_freg64);
15457 %}
15458 
15459 instruct replicate4F(vecX dst, vRegF src)
15460 %{
15461   predicate(n->as_Vector()->length() == 4);
15462   match(Set dst (ReplicateF src));
15463   ins_cost(INSN_COST);
15464   format %{ "dup  $dst, $src\t# vector (4F)" %}
15465   ins_encode %{
15466     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15467            as_FloatRegister($src$$reg));
15468   %}
15469   ins_pipe(vdup_reg_freg128);
15470 %}
15471 
15472 instruct replicate2D(vecX dst, vRegD src)
15473 %{
15474   predicate(n->as_Vector()->length() == 2);
15475   match(Set dst (ReplicateD src));
15476   ins_cost(INSN_COST);
15477   format %{ "dup  $dst, $src\t# vector (2D)" %}
15478   ins_encode %{
15479     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15480            as_FloatRegister($src$$reg));
15481   %}
15482   ins_pipe(vdup_reg_dreg128);
15483 %}
15484 
15485 // ====================REDUCTION ARITHMETIC====================================
15486 
15487 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15488 %{
15489   match(Set dst (AddReductionVI src1 src2));
15490   ins_cost(INSN_COST);
15491   effect(TEMP tmp, TEMP tmp2);
15492   format %{ "umov  $tmp, $src2, S, 0\n\t"
15493             "umov  $tmp2, $src2, S, 1\n\t"
15494             "addw  $dst, $src1, $tmp\n\t"
15495             "addw  $dst, $dst, $tmp2\t add reduction2i"
15496   %}
15497   ins_encode %{
15498     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15499     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15500     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15501     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15502   %}
15503   ins_pipe(pipe_class_default);
15504 %}
15505 
15506 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15507 %{
15508   match(Set dst (AddReductionVI src1 src2));
15509   ins_cost(INSN_COST);
15510   effect(TEMP tmp, TEMP tmp2);
15511   format %{ "addv  $tmp, T4S, $src2\n\t"
15512             "umov  $tmp2, $tmp, S, 0\n\t"
15513             "addw  $dst, $tmp2, $src1\t add reduction4i"
15514   %}
15515   ins_encode %{
15516     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15517             as_FloatRegister($src2$$reg));
15518     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15519     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15520   %}
15521   ins_pipe(pipe_class_default);
15522 %}
15523 
15524 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15525 %{
15526   match(Set dst (MulReductionVI src1 src2));
15527   ins_cost(INSN_COST);
15528   effect(TEMP tmp, TEMP dst);
15529   format %{ "umov  $tmp, $src2, S, 0\n\t"
15530             "mul   $dst, $tmp, $src1\n\t"
15531             "umov  $tmp, $src2, S, 1\n\t"
15532             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15533   %}
15534   ins_encode %{
15535     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15536     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15537     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15538     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15539   %}
15540   ins_pipe(pipe_class_default);
15541 %}
15542 
15543 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15544 %{
15545   match(Set dst (MulReductionVI src1 src2));
15546   ins_cost(INSN_COST);
15547   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15548   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15549             "mul   $tmp, $tmp, $src2\n\t"
15550             "umov  $tmp2, $tmp, S, 0\n\t"
15551             "mul   $dst, $tmp2, $src1\n\t"
15552             "umov  $tmp2, $tmp, S, 1\n\t"
15553             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15554   %}
15555   ins_encode %{
15556     __ ins(as_FloatRegister($tmp$$reg), __ D,
15557            as_FloatRegister($src2$$reg), 0, 1);
15558     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15559            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15560     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15561     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15562     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15563     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15564   %}
15565   ins_pipe(pipe_class_default);
15566 %}
15567 
15568 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15569 %{
15570   match(Set dst (AddReductionVF src1 src2));
15571   ins_cost(INSN_COST);
15572   effect(TEMP tmp, TEMP dst);
15573   format %{ "fadds $dst, $src1, $src2\n\t"
15574             "ins   $tmp, S, $src2, 0, 1\n\t"
15575             "fadds $dst, $dst, $tmp\t add reduction2f"
15576   %}
15577   ins_encode %{
15578     __ fadds(as_FloatRegister($dst$$reg),
15579              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15580     __ ins(as_FloatRegister($tmp$$reg), __ S,
15581            as_FloatRegister($src2$$reg), 0, 1);
15582     __ fadds(as_FloatRegister($dst$$reg),
15583              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15584   %}
15585   ins_pipe(pipe_class_default);
15586 %}
15587 
15588 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15589 %{
15590   match(Set dst (AddReductionVF src1 src2));
15591   ins_cost(INSN_COST);
15592   effect(TEMP tmp, TEMP dst);
15593   format %{ "fadds $dst, $src1, $src2\n\t"
15594             "ins   $tmp, S, $src2, 0, 1\n\t"
15595             "fadds $dst, $dst, $tmp\n\t"
15596             "ins   $tmp, S, $src2, 0, 2\n\t"
15597             "fadds $dst, $dst, $tmp\n\t"
15598             "ins   $tmp, S, $src2, 0, 3\n\t"
15599             "fadds $dst, $dst, $tmp\t add reduction4f"
15600   %}
15601   ins_encode %{
15602     __ fadds(as_FloatRegister($dst$$reg),
15603              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15604     __ ins(as_FloatRegister($tmp$$reg), __ S,
15605            as_FloatRegister($src2$$reg), 0, 1);
15606     __ fadds(as_FloatRegister($dst$$reg),
15607              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15608     __ ins(as_FloatRegister($tmp$$reg), __ S,
15609            as_FloatRegister($src2$$reg), 0, 2);
15610     __ fadds(as_FloatRegister($dst$$reg),
15611              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15612     __ ins(as_FloatRegister($tmp$$reg), __ S,
15613            as_FloatRegister($src2$$reg), 0, 3);
15614     __ fadds(as_FloatRegister($dst$$reg),
15615              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15616   %}
15617   ins_pipe(pipe_class_default);
15618 %}
15619 
15620 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15621 %{
15622   match(Set dst (MulReductionVF src1 src2));
15623   ins_cost(INSN_COST);
15624   effect(TEMP tmp, TEMP dst);
15625   format %{ "fmuls $dst, $src1, $src2\n\t"
15626             "ins   $tmp, S, $src2, 0, 1\n\t"
15627             "fmuls $dst, $dst, $tmp\t add reduction4f"
15628   %}
15629   ins_encode %{
15630     __ fmuls(as_FloatRegister($dst$$reg),
15631              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15632     __ ins(as_FloatRegister($tmp$$reg), __ S,
15633            as_FloatRegister($src2$$reg), 0, 1);
15634     __ fmuls(as_FloatRegister($dst$$reg),
15635              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15636   %}
15637   ins_pipe(pipe_class_default);
15638 %}
15639 
15640 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15641 %{
15642   match(Set dst (MulReductionVF src1 src2));
15643   ins_cost(INSN_COST);
15644   effect(TEMP tmp, TEMP dst);
15645   format %{ "fmuls $dst, $src1, $src2\n\t"
15646             "ins   $tmp, S, $src2, 0, 1\n\t"
15647             "fmuls $dst, $dst, $tmp\n\t"
15648             "ins   $tmp, S, $src2, 0, 2\n\t"
15649             "fmuls $dst, $dst, $tmp\n\t"
15650             "ins   $tmp, S, $src2, 0, 3\n\t"
15651             "fmuls $dst, $dst, $tmp\t add reduction4f"
15652   %}
15653   ins_encode %{
15654     __ fmuls(as_FloatRegister($dst$$reg),
15655              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15656     __ ins(as_FloatRegister($tmp$$reg), __ S,
15657            as_FloatRegister($src2$$reg), 0, 1);
15658     __ fmuls(as_FloatRegister($dst$$reg),
15659              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15660     __ ins(as_FloatRegister($tmp$$reg), __ S,
15661            as_FloatRegister($src2$$reg), 0, 2);
15662     __ fmuls(as_FloatRegister($dst$$reg),
15663              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15664     __ ins(as_FloatRegister($tmp$$reg), __ S,
15665            as_FloatRegister($src2$$reg), 0, 3);
15666     __ fmuls(as_FloatRegister($dst$$reg),
15667              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15668   %}
15669   ins_pipe(pipe_class_default);
15670 %}
15671 
15672 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15673 %{
15674   match(Set dst (AddReductionVD src1 src2));
15675   ins_cost(INSN_COST);
15676   effect(TEMP tmp, TEMP dst);
15677   format %{ "faddd $dst, $src1, $src2\n\t"
15678             "ins   $tmp, D, $src2, 0, 1\n\t"
15679             "faddd $dst, $dst, $tmp\t add reduction2d"
15680   %}
15681   ins_encode %{
15682     __ faddd(as_FloatRegister($dst$$reg),
15683              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15684     __ ins(as_FloatRegister($tmp$$reg), __ D,
15685            as_FloatRegister($src2$$reg), 0, 1);
15686     __ faddd(as_FloatRegister($dst$$reg),
15687              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15688   %}
15689   ins_pipe(pipe_class_default);
15690 %}
15691 
15692 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15693 %{
15694   match(Set dst (MulReductionVD src1 src2));
15695   ins_cost(INSN_COST);
15696   effect(TEMP tmp, TEMP dst);
15697   format %{ "fmuld $dst, $src1, $src2\n\t"
15698             "ins   $tmp, D, $src2, 0, 1\n\t"
15699             "fmuld $dst, $dst, $tmp\t add reduction2d"
15700   %}
15701   ins_encode %{
15702     __ fmuld(as_FloatRegister($dst$$reg),
15703              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15704     __ ins(as_FloatRegister($tmp$$reg), __ D,
15705            as_FloatRegister($src2$$reg), 0, 1);
15706     __ fmuld(as_FloatRegister($dst$$reg),
15707              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15708   %}
15709   ins_pipe(pipe_class_default);
15710 %}
15711 
15712 // ====================VECTOR ARITHMETIC=======================================
15713 
15714 // --------------------------------- ADD --------------------------------------
15715 
15716 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15717 %{
15718   predicate(n->as_Vector()->length() == 4 ||
15719             n->as_Vector()->length() == 8);
15720   match(Set dst (AddVB src1 src2));
15721   ins_cost(INSN_COST);
15722   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15723   ins_encode %{
15724     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15725             as_FloatRegister($src1$$reg),
15726             as_FloatRegister($src2$$reg));
15727   %}
15728   ins_pipe(vdop64);
15729 %}
15730 
15731 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15732 %{
15733   predicate(n->as_Vector()->length() == 16);
15734   match(Set dst (AddVB src1 src2));
15735   ins_cost(INSN_COST);
15736   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15737   ins_encode %{
15738     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15739             as_FloatRegister($src1$$reg),
15740             as_FloatRegister($src2$$reg));
15741   %}
15742   ins_pipe(vdop128);
15743 %}
15744 
15745 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15746 %{
15747   predicate(n->as_Vector()->length() == 2 ||
15748             n->as_Vector()->length() == 4);
15749   match(Set dst (AddVS src1 src2));
15750   ins_cost(INSN_COST);
15751   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15752   ins_encode %{
15753     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15754             as_FloatRegister($src1$$reg),
15755             as_FloatRegister($src2$$reg));
15756   %}
15757   ins_pipe(vdop64);
15758 %}
15759 
15760 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15761 %{
15762   predicate(n->as_Vector()->length() == 8);
15763   match(Set dst (AddVS src1 src2));
15764   ins_cost(INSN_COST);
15765   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15766   ins_encode %{
15767     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15768             as_FloatRegister($src1$$reg),
15769             as_FloatRegister($src2$$reg));
15770   %}
15771   ins_pipe(vdop128);
15772 %}
15773 
15774 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15775 %{
15776   predicate(n->as_Vector()->length() == 2);
15777   match(Set dst (AddVI src1 src2));
15778   ins_cost(INSN_COST);
15779   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15780   ins_encode %{
15781     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15782             as_FloatRegister($src1$$reg),
15783             as_FloatRegister($src2$$reg));
15784   %}
15785   ins_pipe(vdop64);
15786 %}
15787 
15788 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15789 %{
15790   predicate(n->as_Vector()->length() == 4);
15791   match(Set dst (AddVI src1 src2));
15792   ins_cost(INSN_COST);
15793   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15794   ins_encode %{
15795     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15796             as_FloatRegister($src1$$reg),
15797             as_FloatRegister($src2$$reg));
15798   %}
15799   ins_pipe(vdop128);
15800 %}
15801 
15802 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15803 %{
15804   predicate(n->as_Vector()->length() == 2);
15805   match(Set dst (AddVL src1 src2));
15806   ins_cost(INSN_COST);
15807   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15808   ins_encode %{
15809     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15810             as_FloatRegister($src1$$reg),
15811             as_FloatRegister($src2$$reg));
15812   %}
15813   ins_pipe(vdop128);
15814 %}
15815 
15816 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15817 %{
15818   predicate(n->as_Vector()->length() == 2);
15819   match(Set dst (AddVF src1 src2));
15820   ins_cost(INSN_COST);
15821   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15822   ins_encode %{
15823     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15824             as_FloatRegister($src1$$reg),
15825             as_FloatRegister($src2$$reg));
15826   %}
15827   ins_pipe(vdop_fp64);
15828 %}
15829 
15830 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15831 %{
15832   predicate(n->as_Vector()->length() == 4);
15833   match(Set dst (AddVF src1 src2));
15834   ins_cost(INSN_COST);
15835   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15836   ins_encode %{
15837     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15838             as_FloatRegister($src1$$reg),
15839             as_FloatRegister($src2$$reg));
15840   %}
15841   ins_pipe(vdop_fp128);
15842 %}
15843 
15844 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15845 %{
15846   match(Set dst (AddVD src1 src2));
15847   ins_cost(INSN_COST);
15848   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15849   ins_encode %{
15850     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15851             as_FloatRegister($src1$$reg),
15852             as_FloatRegister($src2$$reg));
15853   %}
15854   ins_pipe(vdop_fp128);
15855 %}
15856 
15857 // --------------------------------- SUB --------------------------------------
15858 
15859 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15860 %{
15861   predicate(n->as_Vector()->length() == 4 ||
15862             n->as_Vector()->length() == 8);
15863   match(Set dst (SubVB src1 src2));
15864   ins_cost(INSN_COST);
15865   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15866   ins_encode %{
15867     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15868             as_FloatRegister($src1$$reg),
15869             as_FloatRegister($src2$$reg));
15870   %}
15871   ins_pipe(vdop64);
15872 %}
15873 
15874 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15875 %{
15876   predicate(n->as_Vector()->length() == 16);
15877   match(Set dst (SubVB src1 src2));
15878   ins_cost(INSN_COST);
15879   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15880   ins_encode %{
15881     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15882             as_FloatRegister($src1$$reg),
15883             as_FloatRegister($src2$$reg));
15884   %}
15885   ins_pipe(vdop128);
15886 %}
15887 
15888 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15889 %{
15890   predicate(n->as_Vector()->length() == 2 ||
15891             n->as_Vector()->length() == 4);
15892   match(Set dst (SubVS src1 src2));
15893   ins_cost(INSN_COST);
15894   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15895   ins_encode %{
15896     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15897             as_FloatRegister($src1$$reg),
15898             as_FloatRegister($src2$$reg));
15899   %}
15900   ins_pipe(vdop64);
15901 %}
15902 
15903 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15904 %{
15905   predicate(n->as_Vector()->length() == 8);
15906   match(Set dst (SubVS src1 src2));
15907   ins_cost(INSN_COST);
15908   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15909   ins_encode %{
15910     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15911             as_FloatRegister($src1$$reg),
15912             as_FloatRegister($src2$$reg));
15913   %}
15914   ins_pipe(vdop128);
15915 %}
15916 
15917 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15918 %{
15919   predicate(n->as_Vector()->length() == 2);
15920   match(Set dst (SubVI src1 src2));
15921   ins_cost(INSN_COST);
15922   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15923   ins_encode %{
15924     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15925             as_FloatRegister($src1$$reg),
15926             as_FloatRegister($src2$$reg));
15927   %}
15928   ins_pipe(vdop64);
15929 %}
15930 
15931 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15932 %{
15933   predicate(n->as_Vector()->length() == 4);
15934   match(Set dst (SubVI src1 src2));
15935   ins_cost(INSN_COST);
15936   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15937   ins_encode %{
15938     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15939             as_FloatRegister($src1$$reg),
15940             as_FloatRegister($src2$$reg));
15941   %}
15942   ins_pipe(vdop128);
15943 %}
15944 
15945 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15946 %{
15947   predicate(n->as_Vector()->length() == 2);
15948   match(Set dst (SubVL src1 src2));
15949   ins_cost(INSN_COST);
15950   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15951   ins_encode %{
15952     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15953             as_FloatRegister($src1$$reg),
15954             as_FloatRegister($src2$$reg));
15955   %}
15956   ins_pipe(vdop128);
15957 %}
15958 
15959 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15960 %{
15961   predicate(n->as_Vector()->length() == 2);
15962   match(Set dst (SubVF src1 src2));
15963   ins_cost(INSN_COST);
15964   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15965   ins_encode %{
15966     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15967             as_FloatRegister($src1$$reg),
15968             as_FloatRegister($src2$$reg));
15969   %}
15970   ins_pipe(vdop_fp64);
15971 %}
15972 
15973 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15974 %{
15975   predicate(n->as_Vector()->length() == 4);
15976   match(Set dst (SubVF src1 src2));
15977   ins_cost(INSN_COST);
15978   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15979   ins_encode %{
15980     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15981             as_FloatRegister($src1$$reg),
15982             as_FloatRegister($src2$$reg));
15983   %}
15984   ins_pipe(vdop_fp128);
15985 %}
15986 
15987 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15988 %{
15989   predicate(n->as_Vector()->length() == 2);
15990   match(Set dst (SubVD src1 src2));
15991   ins_cost(INSN_COST);
15992   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15993   ins_encode %{
15994     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15995             as_FloatRegister($src1$$reg),
15996             as_FloatRegister($src2$$reg));
15997   %}
15998   ins_pipe(vdop_fp128);
15999 %}
16000 
16001 // --------------------------------- MUL --------------------------------------
16002 
16003 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16004 %{
16005   predicate(n->as_Vector()->length() == 2 ||
16006             n->as_Vector()->length() == 4);
16007   match(Set dst (MulVS src1 src2));
16008   ins_cost(INSN_COST);
16009   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16010   ins_encode %{
16011     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16012             as_FloatRegister($src1$$reg),
16013             as_FloatRegister($src2$$reg));
16014   %}
16015   ins_pipe(vmul64);
16016 %}
16017 
16018 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16019 %{
16020   predicate(n->as_Vector()->length() == 8);
16021   match(Set dst (MulVS src1 src2));
16022   ins_cost(INSN_COST);
16023   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16024   ins_encode %{
16025     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16026             as_FloatRegister($src1$$reg),
16027             as_FloatRegister($src2$$reg));
16028   %}
16029   ins_pipe(vmul128);
16030 %}
16031 
16032 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16033 %{
16034   predicate(n->as_Vector()->length() == 2);
16035   match(Set dst (MulVI src1 src2));
16036   ins_cost(INSN_COST);
16037   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16038   ins_encode %{
16039     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16040             as_FloatRegister($src1$$reg),
16041             as_FloatRegister($src2$$reg));
16042   %}
16043   ins_pipe(vmul64);
16044 %}
16045 
16046 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16047 %{
16048   predicate(n->as_Vector()->length() == 4);
16049   match(Set dst (MulVI src1 src2));
16050   ins_cost(INSN_COST);
16051   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16052   ins_encode %{
16053     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16054             as_FloatRegister($src1$$reg),
16055             as_FloatRegister($src2$$reg));
16056   %}
16057   ins_pipe(vmul128);
16058 %}
16059 
16060 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16061 %{
16062   predicate(n->as_Vector()->length() == 2);
16063   match(Set dst (MulVF src1 src2));
16064   ins_cost(INSN_COST);
16065   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16066   ins_encode %{
16067     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16068             as_FloatRegister($src1$$reg),
16069             as_FloatRegister($src2$$reg));
16070   %}
16071   ins_pipe(vmuldiv_fp64);
16072 %}
16073 
16074 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16075 %{
16076   predicate(n->as_Vector()->length() == 4);
16077   match(Set dst (MulVF src1 src2));
16078   ins_cost(INSN_COST);
16079   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16080   ins_encode %{
16081     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16082             as_FloatRegister($src1$$reg),
16083             as_FloatRegister($src2$$reg));
16084   %}
16085   ins_pipe(vmuldiv_fp128);
16086 %}
16087 
16088 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16089 %{
16090   predicate(n->as_Vector()->length() == 2);
16091   match(Set dst (MulVD src1 src2));
16092   ins_cost(INSN_COST);
16093   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16094   ins_encode %{
16095     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16096             as_FloatRegister($src1$$reg),
16097             as_FloatRegister($src2$$reg));
16098   %}
16099   ins_pipe(vmuldiv_fp128);
16100 %}
16101 
16102 // --------------------------------- MLA --------------------------------------
16103 
16104 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16105 %{
16106   predicate(n->as_Vector()->length() == 2 ||
16107             n->as_Vector()->length() == 4);
16108   match(Set dst (AddVS dst (MulVS src1 src2)));
16109   ins_cost(INSN_COST);
16110   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16111   ins_encode %{
16112     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16113             as_FloatRegister($src1$$reg),
16114             as_FloatRegister($src2$$reg));
16115   %}
16116   ins_pipe(vmla64);
16117 %}
16118 
16119 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16120 %{
16121   predicate(n->as_Vector()->length() == 8);
16122   match(Set dst (AddVS dst (MulVS src1 src2)));
16123   ins_cost(INSN_COST);
16124   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16125   ins_encode %{
16126     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16127             as_FloatRegister($src1$$reg),
16128             as_FloatRegister($src2$$reg));
16129   %}
16130   ins_pipe(vmla128);
16131 %}
16132 
16133 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16134 %{
16135   predicate(n->as_Vector()->length() == 2);
16136   match(Set dst (AddVI dst (MulVI src1 src2)));
16137   ins_cost(INSN_COST);
16138   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16139   ins_encode %{
16140     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16141             as_FloatRegister($src1$$reg),
16142             as_FloatRegister($src2$$reg));
16143   %}
16144   ins_pipe(vmla64);
16145 %}
16146 
16147 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16148 %{
16149   predicate(n->as_Vector()->length() == 4);
16150   match(Set dst (AddVI dst (MulVI src1 src2)));
16151   ins_cost(INSN_COST);
16152   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16153   ins_encode %{
16154     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16155             as_FloatRegister($src1$$reg),
16156             as_FloatRegister($src2$$reg));
16157   %}
16158   ins_pipe(vmla128);
16159 %}
16160 
16161 // dst + src1 * src2
16162 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16163   predicate(UseFMA && n->as_Vector()->length() == 2);
16164   match(Set dst (FmaVF  dst (Binary src1 src2)));
16165   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16166   ins_cost(INSN_COST);
16167   ins_encode %{
16168     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16169             as_FloatRegister($src1$$reg),
16170             as_FloatRegister($src2$$reg));
16171   %}
16172   ins_pipe(vmuldiv_fp64);
16173 %}
16174 
16175 // dst + src1 * src2
16176 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16177   predicate(UseFMA && n->as_Vector()->length() == 4);
16178   match(Set dst (FmaVF  dst (Binary src1 src2)));
16179   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16180   ins_cost(INSN_COST);
16181   ins_encode %{
16182     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16183             as_FloatRegister($src1$$reg),
16184             as_FloatRegister($src2$$reg));
16185   %}
16186   ins_pipe(vmuldiv_fp128);
16187 %}
16188 
16189 // dst + src1 * src2
16190 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16191   predicate(UseFMA && n->as_Vector()->length() == 2);
16192   match(Set dst (FmaVD  dst (Binary src1 src2)));
16193   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16194   ins_cost(INSN_COST);
16195   ins_encode %{
16196     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16197             as_FloatRegister($src1$$reg),
16198             as_FloatRegister($src2$$reg));
16199   %}
16200   ins_pipe(vmuldiv_fp128);
16201 %}
16202 
16203 // --------------------------------- MLS --------------------------------------
16204 
16205 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16206 %{
16207   predicate(n->as_Vector()->length() == 2 ||
16208             n->as_Vector()->length() == 4);
16209   match(Set dst (SubVS dst (MulVS src1 src2)));
16210   ins_cost(INSN_COST);
16211   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16212   ins_encode %{
16213     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16214             as_FloatRegister($src1$$reg),
16215             as_FloatRegister($src2$$reg));
16216   %}
16217   ins_pipe(vmla64);
16218 %}
16219 
16220 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16221 %{
16222   predicate(n->as_Vector()->length() == 8);
16223   match(Set dst (SubVS dst (MulVS src1 src2)));
16224   ins_cost(INSN_COST);
16225   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16226   ins_encode %{
16227     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16228             as_FloatRegister($src1$$reg),
16229             as_FloatRegister($src2$$reg));
16230   %}
16231   ins_pipe(vmla128);
16232 %}
16233 
16234 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16235 %{
16236   predicate(n->as_Vector()->length() == 2);
16237   match(Set dst (SubVI dst (MulVI src1 src2)));
16238   ins_cost(INSN_COST);
16239   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16240   ins_encode %{
16241     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16242             as_FloatRegister($src1$$reg),
16243             as_FloatRegister($src2$$reg));
16244   %}
16245   ins_pipe(vmla64);
16246 %}
16247 
16248 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16249 %{
16250   predicate(n->as_Vector()->length() == 4);
16251   match(Set dst (SubVI dst (MulVI src1 src2)));
16252   ins_cost(INSN_COST);
16253   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16254   ins_encode %{
16255     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16256             as_FloatRegister($src1$$reg),
16257             as_FloatRegister($src2$$reg));
16258   %}
16259   ins_pipe(vmla128);
16260 %}
16261 
16262 // dst - src1 * src2
16263 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16264   predicate(UseFMA && n->as_Vector()->length() == 2);
16265   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16266   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16267   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16268   ins_cost(INSN_COST);
16269   ins_encode %{
16270     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16271             as_FloatRegister($src1$$reg),
16272             as_FloatRegister($src2$$reg));
16273   %}
16274   ins_pipe(vmuldiv_fp64);
16275 %}
16276 
16277 // dst - src1 * src2
16278 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16279   predicate(UseFMA && n->as_Vector()->length() == 4);
16280   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16281   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16282   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16283   ins_cost(INSN_COST);
16284   ins_encode %{
16285     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16286             as_FloatRegister($src1$$reg),
16287             as_FloatRegister($src2$$reg));
16288   %}
16289   ins_pipe(vmuldiv_fp128);
16290 %}
16291 
16292 // dst - src1 * src2
16293 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16294   predicate(UseFMA && n->as_Vector()->length() == 2);
16295   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16296   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16297   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16298   ins_cost(INSN_COST);
16299   ins_encode %{
16300     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16301             as_FloatRegister($src1$$reg),
16302             as_FloatRegister($src2$$reg));
16303   %}
16304   ins_pipe(vmuldiv_fp128);
16305 %}
16306 
16307 // --------------------------------- DIV --------------------------------------
16308 
16309 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16310 %{
16311   predicate(n->as_Vector()->length() == 2);
16312   match(Set dst (DivVF src1 src2));
16313   ins_cost(INSN_COST);
16314   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16315   ins_encode %{
16316     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16317             as_FloatRegister($src1$$reg),
16318             as_FloatRegister($src2$$reg));
16319   %}
16320   ins_pipe(vmuldiv_fp64);
16321 %}
16322 
16323 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16324 %{
16325   predicate(n->as_Vector()->length() == 4);
16326   match(Set dst (DivVF src1 src2));
16327   ins_cost(INSN_COST);
16328   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16329   ins_encode %{
16330     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16331             as_FloatRegister($src1$$reg),
16332             as_FloatRegister($src2$$reg));
16333   %}
16334   ins_pipe(vmuldiv_fp128);
16335 %}
16336 
16337 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16338 %{
16339   predicate(n->as_Vector()->length() == 2);
16340   match(Set dst (DivVD src1 src2));
16341   ins_cost(INSN_COST);
16342   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16343   ins_encode %{
16344     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16345             as_FloatRegister($src1$$reg),
16346             as_FloatRegister($src2$$reg));
16347   %}
16348   ins_pipe(vmuldiv_fp128);
16349 %}
16350 
16351 // --------------------------------- SQRT -------------------------------------
16352 
16353 instruct vsqrt2D(vecX dst, vecX src)
16354 %{
16355   predicate(n->as_Vector()->length() == 2);
16356   match(Set dst (SqrtVD src));
16357   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16358   ins_encode %{
16359     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16360              as_FloatRegister($src$$reg));
16361   %}
16362   ins_pipe(vsqrt_fp128);
16363 %}
16364 
16365 // --------------------------------- ABS --------------------------------------
16366 
16367 instruct vabs2F(vecD dst, vecD src)
16368 %{
16369   predicate(n->as_Vector()->length() == 2);
16370   match(Set dst (AbsVF src));
16371   ins_cost(INSN_COST * 3);
16372   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16373   ins_encode %{
16374     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16375             as_FloatRegister($src$$reg));
16376   %}
16377   ins_pipe(vunop_fp64);
16378 %}
16379 
16380 instruct vabs4F(vecX dst, vecX src)
16381 %{
16382   predicate(n->as_Vector()->length() == 4);
16383   match(Set dst (AbsVF src));
16384   ins_cost(INSN_COST * 3);
16385   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16386   ins_encode %{
16387     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16388             as_FloatRegister($src$$reg));
16389   %}
16390   ins_pipe(vunop_fp128);
16391 %}
16392 
16393 instruct vabs2D(vecX dst, vecX src)
16394 %{
16395   predicate(n->as_Vector()->length() == 2);
16396   match(Set dst (AbsVD src));
16397   ins_cost(INSN_COST * 3);
16398   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16399   ins_encode %{
16400     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16401             as_FloatRegister($src$$reg));
16402   %}
16403   ins_pipe(vunop_fp128);
16404 %}
16405 
16406 // --------------------------------- NEG --------------------------------------
16407 
16408 instruct vneg2F(vecD dst, vecD src)
16409 %{
16410   predicate(n->as_Vector()->length() == 2);
16411   match(Set dst (NegVF src));
16412   ins_cost(INSN_COST * 3);
16413   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16414   ins_encode %{
16415     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16416             as_FloatRegister($src$$reg));
16417   %}
16418   ins_pipe(vunop_fp64);
16419 %}
16420 
16421 instruct vneg4F(vecX dst, vecX src)
16422 %{
16423   predicate(n->as_Vector()->length() == 4);
16424   match(Set dst (NegVF src));
16425   ins_cost(INSN_COST * 3);
16426   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16427   ins_encode %{
16428     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16429             as_FloatRegister($src$$reg));
16430   %}
16431   ins_pipe(vunop_fp128);
16432 %}
16433 
16434 instruct vneg2D(vecX dst, vecX src)
16435 %{
16436   predicate(n->as_Vector()->length() == 2);
16437   match(Set dst (NegVD src));
16438   ins_cost(INSN_COST * 3);
16439   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16440   ins_encode %{
16441     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16442             as_FloatRegister($src$$reg));
16443   %}
16444   ins_pipe(vunop_fp128);
16445 %}
16446 
16447 // --------------------------------- AND --------------------------------------
16448 
16449 instruct vand8B(vecD dst, vecD src1, vecD src2)
16450 %{
16451   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16452             n->as_Vector()->length_in_bytes() == 8);
16453   match(Set dst (AndV src1 src2));
16454   ins_cost(INSN_COST);
16455   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16456   ins_encode %{
16457     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16458             as_FloatRegister($src1$$reg),
16459             as_FloatRegister($src2$$reg));
16460   %}
16461   ins_pipe(vlogical64);
16462 %}
16463 
16464 instruct vand16B(vecX dst, vecX src1, vecX src2)
16465 %{
16466   predicate(n->as_Vector()->length_in_bytes() == 16);
16467   match(Set dst (AndV src1 src2));
16468   ins_cost(INSN_COST);
16469   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16470   ins_encode %{
16471     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16472             as_FloatRegister($src1$$reg),
16473             as_FloatRegister($src2$$reg));
16474   %}
16475   ins_pipe(vlogical128);
16476 %}
16477 
16478 // --------------------------------- OR ---------------------------------------
16479 
16480 instruct vor8B(vecD dst, vecD src1, vecD src2)
16481 %{
16482   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16483             n->as_Vector()->length_in_bytes() == 8);
16484   match(Set dst (OrV src1 src2));
16485   ins_cost(INSN_COST);
16486   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16487   ins_encode %{
16488     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16489             as_FloatRegister($src1$$reg),
16490             as_FloatRegister($src2$$reg));
16491   %}
16492   ins_pipe(vlogical64);
16493 %}
16494 
16495 instruct vor16B(vecX dst, vecX src1, vecX src2)
16496 %{
16497   predicate(n->as_Vector()->length_in_bytes() == 16);
16498   match(Set dst (OrV src1 src2));
16499   ins_cost(INSN_COST);
16500   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16501   ins_encode %{
16502     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16503             as_FloatRegister($src1$$reg),
16504             as_FloatRegister($src2$$reg));
16505   %}
16506   ins_pipe(vlogical128);
16507 %}
16508 
16509 // --------------------------------- XOR --------------------------------------
16510 
16511 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16512 %{
16513   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16514             n->as_Vector()->length_in_bytes() == 8);
16515   match(Set dst (XorV src1 src2));
16516   ins_cost(INSN_COST);
16517   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16518   ins_encode %{
16519     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16520             as_FloatRegister($src1$$reg),
16521             as_FloatRegister($src2$$reg));
16522   %}
16523   ins_pipe(vlogical64);
16524 %}
16525 
16526 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16527 %{
16528   predicate(n->as_Vector()->length_in_bytes() == 16);
16529   match(Set dst (XorV src1 src2));
16530   ins_cost(INSN_COST);
16531   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16532   ins_encode %{
16533     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16534             as_FloatRegister($src1$$reg),
16535             as_FloatRegister($src2$$reg));
16536   %}
16537   ins_pipe(vlogical128);
16538 %}
16539 
16540 // ------------------------------ Shift ---------------------------------------
16541 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
16542   predicate(n->as_Vector()->length_in_bytes() == 8);
16543   match(Set dst (LShiftCntV cnt));
16544   match(Set dst (RShiftCntV cnt));
16545   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
16546   ins_encode %{
16547     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
16548   %}
16549   ins_pipe(vdup_reg_reg64);
16550 %}
16551 
16552 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
16553   predicate(n->as_Vector()->length_in_bytes() == 16);
16554   match(Set dst (LShiftCntV cnt));
16555   match(Set dst (RShiftCntV cnt));
16556   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
16557   ins_encode %{
16558     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16559   %}
16560   ins_pipe(vdup_reg_reg128);
16561 %}
16562 
16563 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
16564   predicate(n->as_Vector()->length() == 4 ||
16565             n->as_Vector()->length() == 8);
16566   match(Set dst (LShiftVB src shift));
16567   ins_cost(INSN_COST);
16568   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16569   ins_encode %{
16570     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16571             as_FloatRegister($src$$reg),
16572             as_FloatRegister($shift$$reg));
16573   %}
16574   ins_pipe(vshift64);
16575 %}
16576 
16577 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16578   predicate(n->as_Vector()->length() == 16);
16579   match(Set dst (LShiftVB src shift));
16580   ins_cost(INSN_COST);
16581   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16582   ins_encode %{
16583     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16584             as_FloatRegister($src$$reg),
16585             as_FloatRegister($shift$$reg));
16586   %}
16587   ins_pipe(vshift128);
16588 %}
16589 
16590 // Right shifts with vector shift count on aarch64 SIMD are implemented
16591 // as left shift by negative shift count.
16592 // There are two cases for vector shift count.
16593 //
16594 // Case 1: The vector shift count is from replication.
16595 //        |            |
16596 //    LoadVector  RShiftCntV
16597 //        |       /
16598 //     RShiftVI
16599 // Note: In inner loop, multiple neg instructions are used, which can be
16600 // moved to outer loop and merge into one neg instruction.
16601 //
16602 // Case 2: The vector shift count is from loading.
16603 // This case isn't supported by middle-end now. But it's supported by
16604 // panama/vectorIntrinsics(JEP 338: Vector API).
16605 //        |            |
16606 //    LoadVector  LoadVector
16607 //        |       /
16608 //     RShiftVI
16609 //
16610 
16611 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
16612   predicate(n->as_Vector()->length() == 4 ||
16613             n->as_Vector()->length() == 8);
16614   match(Set dst (RShiftVB src shift));
16615   ins_cost(INSN_COST);
16616   effect(TEMP tmp);
16617   format %{ "negr  $tmp,$shift\t"
16618             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
16619   ins_encode %{
16620     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16621             as_FloatRegister($shift$$reg));
16622     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16623             as_FloatRegister($src$$reg),
16624             as_FloatRegister($tmp$$reg));
16625   %}
16626   ins_pipe(vshift64);
16627 %}
16628 
16629 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
16630   predicate(n->as_Vector()->length() == 16);
16631   match(Set dst (RShiftVB src shift));
16632   ins_cost(INSN_COST);
16633   effect(TEMP tmp);
16634   format %{ "negr  $tmp,$shift\t"
16635             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
16636   ins_encode %{
16637     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16638             as_FloatRegister($shift$$reg));
16639     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16640             as_FloatRegister($src$$reg),
16641             as_FloatRegister($tmp$$reg));
16642   %}
16643   ins_pipe(vshift128);
16644 %}
16645 
16646 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
16647   predicate(n->as_Vector()->length() == 4 ||
16648             n->as_Vector()->length() == 8);
16649   match(Set dst (URShiftVB src shift));
16650   ins_cost(INSN_COST);
16651   effect(TEMP tmp);
16652   format %{ "negr  $tmp,$shift\t"
16653             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
16654   ins_encode %{
16655     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16656             as_FloatRegister($shift$$reg));
16657     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16658             as_FloatRegister($src$$reg),
16659             as_FloatRegister($tmp$$reg));
16660   %}
16661   ins_pipe(vshift64);
16662 %}
16663 
16664 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
16665   predicate(n->as_Vector()->length() == 16);
16666   match(Set dst (URShiftVB src shift));
16667   ins_cost(INSN_COST);
16668   effect(TEMP tmp);
16669   format %{ "negr  $tmp,$shift\t"
16670             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
16671   ins_encode %{
16672     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16673             as_FloatRegister($shift$$reg));
16674     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16675             as_FloatRegister($src$$reg),
16676             as_FloatRegister($tmp$$reg));
16677   %}
16678   ins_pipe(vshift128);
16679 %}
16680 
16681 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16682   predicate(n->as_Vector()->length() == 4 ||
16683             n->as_Vector()->length() == 8);
16684   match(Set dst (LShiftVB src shift));
16685   ins_cost(INSN_COST);
16686   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16687   ins_encode %{
16688     int sh = (int)$shift$$constant;
16689     if (sh >= 8) {
16690       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16691              as_FloatRegister($src$$reg),
16692              as_FloatRegister($src$$reg));
16693     } else {
16694       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16695              as_FloatRegister($src$$reg), sh);
16696     }
16697   %}
16698   ins_pipe(vshift64_imm);
16699 %}
16700 
16701 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16702   predicate(n->as_Vector()->length() == 16);
16703   match(Set dst (LShiftVB src shift));
16704   ins_cost(INSN_COST);
16705   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16706   ins_encode %{
16707     int sh = (int)$shift$$constant;
16708     if (sh >= 8) {
16709       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16710              as_FloatRegister($src$$reg),
16711              as_FloatRegister($src$$reg));
16712     } else {
16713       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16714              as_FloatRegister($src$$reg), sh);
16715     }
16716   %}
16717   ins_pipe(vshift128_imm);
16718 %}
16719 
16720 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16721   predicate(n->as_Vector()->length() == 4 ||
16722             n->as_Vector()->length() == 8);
16723   match(Set dst (RShiftVB src shift));
16724   ins_cost(INSN_COST);
16725   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16726   ins_encode %{
16727     int sh = (int)$shift$$constant;
16728     if (sh >= 8) sh = 7;
16729     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16730            as_FloatRegister($src$$reg), sh);
16731   %}
16732   ins_pipe(vshift64_imm);
16733 %}
16734 
16735 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16736   predicate(n->as_Vector()->length() == 16);
16737   match(Set dst (RShiftVB src shift));
16738   ins_cost(INSN_COST);
16739   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16740   ins_encode %{
16741     int sh = (int)$shift$$constant;
16742     if (sh >= 8) sh = 7;
16743     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16744            as_FloatRegister($src$$reg), sh);
16745   %}
16746   ins_pipe(vshift128_imm);
16747 %}
16748 
16749 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16750   predicate(n->as_Vector()->length() == 4 ||
16751             n->as_Vector()->length() == 8);
16752   match(Set dst (URShiftVB src shift));
16753   ins_cost(INSN_COST);
16754   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16755   ins_encode %{
16756     int sh = (int)$shift$$constant;
16757     if (sh >= 8) {
16758       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16759              as_FloatRegister($src$$reg),
16760              as_FloatRegister($src$$reg));
16761     } else {
16762       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16763              as_FloatRegister($src$$reg), sh);
16764     }
16765   %}
16766   ins_pipe(vshift64_imm);
16767 %}
16768 
16769 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16770   predicate(n->as_Vector()->length() == 16);
16771   match(Set dst (URShiftVB src shift));
16772   ins_cost(INSN_COST);
16773   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16774   ins_encode %{
16775     int sh = (int)$shift$$constant;
16776     if (sh >= 8) {
16777       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16778              as_FloatRegister($src$$reg),
16779              as_FloatRegister($src$$reg));
16780     } else {
16781       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16782              as_FloatRegister($src$$reg), sh);
16783     }
16784   %}
16785   ins_pipe(vshift128_imm);
16786 %}
16787 
16788 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
16789   predicate(n->as_Vector()->length() == 2 ||
16790             n->as_Vector()->length() == 4);
16791   match(Set dst (LShiftVS src shift));
16792   ins_cost(INSN_COST);
16793   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16794   ins_encode %{
16795     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16796             as_FloatRegister($src$$reg),
16797             as_FloatRegister($shift$$reg));
16798   %}
16799   ins_pipe(vshift64);
16800 %}
16801 
16802 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16803   predicate(n->as_Vector()->length() == 8);
16804   match(Set dst (LShiftVS src shift));
16805   ins_cost(INSN_COST);
16806   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16807   ins_encode %{
16808     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16809             as_FloatRegister($src$$reg),
16810             as_FloatRegister($shift$$reg));
16811   %}
16812   ins_pipe(vshift128);
16813 %}
16814 
16815 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
16816   predicate(n->as_Vector()->length() == 2 ||
16817             n->as_Vector()->length() == 4);
16818   match(Set dst (RShiftVS src shift));
16819   ins_cost(INSN_COST);
16820   effect(TEMP tmp);
16821   format %{ "negr  $tmp,$shift\t"
16822             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
16823   ins_encode %{
16824     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16825             as_FloatRegister($shift$$reg));
16826     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16827             as_FloatRegister($src$$reg),
16828             as_FloatRegister($tmp$$reg));
16829   %}
16830   ins_pipe(vshift64);
16831 %}
16832 
16833 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
16834   predicate(n->as_Vector()->length() == 8);
16835   match(Set dst (RShiftVS src shift));
16836   ins_cost(INSN_COST);
16837   effect(TEMP tmp);
16838   format %{ "negr  $tmp,$shift\t"
16839             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
16840   ins_encode %{
16841     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16842             as_FloatRegister($shift$$reg));
16843     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16844             as_FloatRegister($src$$reg),
16845             as_FloatRegister($tmp$$reg));
16846   %}
16847   ins_pipe(vshift128);
16848 %}
16849 
16850 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
16851   predicate(n->as_Vector()->length() == 2 ||
16852             n->as_Vector()->length() == 4);
16853   match(Set dst (URShiftVS src shift));
16854   ins_cost(INSN_COST);
16855   effect(TEMP tmp);
16856   format %{ "negr  $tmp,$shift\t"
16857             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
16858   ins_encode %{
16859     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16860             as_FloatRegister($shift$$reg));
16861     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16862             as_FloatRegister($src$$reg),
16863             as_FloatRegister($tmp$$reg));
16864   %}
16865   ins_pipe(vshift64);
16866 %}
16867 
16868 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
16869   predicate(n->as_Vector()->length() == 8);
16870   match(Set dst (URShiftVS src shift));
16871   ins_cost(INSN_COST);
16872   effect(TEMP tmp);
16873   format %{ "negr  $tmp,$shift\t"
16874             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
16875   ins_encode %{
16876     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16877             as_FloatRegister($shift$$reg));
16878     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16879             as_FloatRegister($src$$reg),
16880             as_FloatRegister($tmp$$reg));
16881   %}
16882   ins_pipe(vshift128);
16883 %}
16884 
16885 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16886   predicate(n->as_Vector()->length() == 2 ||
16887             n->as_Vector()->length() == 4);
16888   match(Set dst (LShiftVS src shift));
16889   ins_cost(INSN_COST);
16890   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16891   ins_encode %{
16892     int sh = (int)$shift$$constant;
16893     if (sh >= 16) {
16894       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16895              as_FloatRegister($src$$reg),
16896              as_FloatRegister($src$$reg));
16897     } else {
16898       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16899              as_FloatRegister($src$$reg), sh);
16900     }
16901   %}
16902   ins_pipe(vshift64_imm);
16903 %}
16904 
16905 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16906   predicate(n->as_Vector()->length() == 8);
16907   match(Set dst (LShiftVS src shift));
16908   ins_cost(INSN_COST);
16909   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16910   ins_encode %{
16911     int sh = (int)$shift$$constant;
16912     if (sh >= 16) {
16913       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16914              as_FloatRegister($src$$reg),
16915              as_FloatRegister($src$$reg));
16916     } else {
16917       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16918              as_FloatRegister($src$$reg), sh);
16919     }
16920   %}
16921   ins_pipe(vshift128_imm);
16922 %}
16923 
16924 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16925   predicate(n->as_Vector()->length() == 2 ||
16926             n->as_Vector()->length() == 4);
16927   match(Set dst (RShiftVS src shift));
16928   ins_cost(INSN_COST);
16929   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16930   ins_encode %{
16931     int sh = (int)$shift$$constant;
16932     if (sh >= 16) sh = 15;
16933     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16934            as_FloatRegister($src$$reg), sh);
16935   %}
16936   ins_pipe(vshift64_imm);
16937 %}
16938 
16939 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16940   predicate(n->as_Vector()->length() == 8);
16941   match(Set dst (RShiftVS src shift));
16942   ins_cost(INSN_COST);
16943   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16944   ins_encode %{
16945     int sh = (int)$shift$$constant;
16946     if (sh >= 16) sh = 15;
16947     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16948            as_FloatRegister($src$$reg), sh);
16949   %}
16950   ins_pipe(vshift128_imm);
16951 %}
16952 
16953 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16954   predicate(n->as_Vector()->length() == 2 ||
16955             n->as_Vector()->length() == 4);
16956   match(Set dst (URShiftVS src shift));
16957   ins_cost(INSN_COST);
16958   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16959   ins_encode %{
16960     int sh = (int)$shift$$constant;
16961     if (sh >= 16) {
16962       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16963              as_FloatRegister($src$$reg),
16964              as_FloatRegister($src$$reg));
16965     } else {
16966       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16967              as_FloatRegister($src$$reg), sh);
16968     }
16969   %}
16970   ins_pipe(vshift64_imm);
16971 %}
16972 
16973 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16974   predicate(n->as_Vector()->length() == 8);
16975   match(Set dst (URShiftVS src shift));
16976   ins_cost(INSN_COST);
16977   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16978   ins_encode %{
16979     int sh = (int)$shift$$constant;
16980     if (sh >= 16) {
16981       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16982              as_FloatRegister($src$$reg),
16983              as_FloatRegister($src$$reg));
16984     } else {
16985       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16986              as_FloatRegister($src$$reg), sh);
16987     }
16988   %}
16989   ins_pipe(vshift128_imm);
16990 %}
16991 
16992 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
16993   predicate(n->as_Vector()->length() == 2);
16994   match(Set dst (LShiftVI src shift));
16995   ins_cost(INSN_COST);
16996   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16997   ins_encode %{
16998     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16999             as_FloatRegister($src$$reg),
17000             as_FloatRegister($shift$$reg));
17001   %}
17002   ins_pipe(vshift64);
17003 %}
17004 
17005 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17006   predicate(n->as_Vector()->length() == 4);
17007   match(Set dst (LShiftVI src shift));
17008   ins_cost(INSN_COST);
17009   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17010   ins_encode %{
17011     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17012             as_FloatRegister($src$$reg),
17013             as_FloatRegister($shift$$reg));
17014   %}
17015   ins_pipe(vshift128);
17016 %}
17017 
17018 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17019   predicate(n->as_Vector()->length() == 2);
17020   match(Set dst (RShiftVI src shift));
17021   ins_cost(INSN_COST);
17022   effect(TEMP tmp);
17023   format %{ "negr  $tmp,$shift\t"
17024             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
17025   ins_encode %{
17026     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17027             as_FloatRegister($shift$$reg));
17028     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17029             as_FloatRegister($src$$reg),
17030             as_FloatRegister($tmp$$reg));
17031   %}
17032   ins_pipe(vshift64);
17033 %}
17034 
17035 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17036   predicate(n->as_Vector()->length() == 4);
17037   match(Set dst (RShiftVI src shift));
17038   ins_cost(INSN_COST);
17039   effect(TEMP tmp);
17040   format %{ "negr  $tmp,$shift\t"
17041             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
17042   ins_encode %{
17043     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17044             as_FloatRegister($shift$$reg));
17045     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17046             as_FloatRegister($src$$reg),
17047             as_FloatRegister($tmp$$reg));
17048   %}
17049   ins_pipe(vshift128);
17050 %}
17051 
17052 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17053   predicate(n->as_Vector()->length() == 2);
17054   match(Set dst (URShiftVI src shift));
17055   ins_cost(INSN_COST);
17056   effect(TEMP tmp);
17057   format %{ "negr  $tmp,$shift\t"
17058             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
17059   ins_encode %{
17060     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17061             as_FloatRegister($shift$$reg));
17062     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17063             as_FloatRegister($src$$reg),
17064             as_FloatRegister($tmp$$reg));
17065   %}
17066   ins_pipe(vshift64);
17067 %}
17068 
17069 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17070   predicate(n->as_Vector()->length() == 4);
17071   match(Set dst (URShiftVI src shift));
17072   ins_cost(INSN_COST);
17073   effect(TEMP tmp);
17074   format %{ "negr  $tmp,$shift\t"
17075             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
17076   ins_encode %{
17077     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17078             as_FloatRegister($shift$$reg));
17079     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17080             as_FloatRegister($src$$reg),
17081             as_FloatRegister($tmp$$reg));
17082   %}
17083   ins_pipe(vshift128);
17084 %}
17085 
17086 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17087   predicate(n->as_Vector()->length() == 2);
17088   match(Set dst (LShiftVI src shift));
17089   ins_cost(INSN_COST);
17090   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17091   ins_encode %{
17092     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17093            as_FloatRegister($src$$reg),
17094            (int)$shift$$constant);
17095   %}
17096   ins_pipe(vshift64_imm);
17097 %}
17098 
17099 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17100   predicate(n->as_Vector()->length() == 4);
17101   match(Set dst (LShiftVI src shift));
17102   ins_cost(INSN_COST);
17103   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17104   ins_encode %{
17105     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17106            as_FloatRegister($src$$reg),
17107            (int)$shift$$constant);
17108   %}
17109   ins_pipe(vshift128_imm);
17110 %}
17111 
17112 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17113   predicate(n->as_Vector()->length() == 2);
17114   match(Set dst (RShiftVI src shift));
17115   ins_cost(INSN_COST);
17116   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17117   ins_encode %{
17118     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17119             as_FloatRegister($src$$reg),
17120             (int)$shift$$constant);
17121   %}
17122   ins_pipe(vshift64_imm);
17123 %}
17124 
17125 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17126   predicate(n->as_Vector()->length() == 4);
17127   match(Set dst (RShiftVI src shift));
17128   ins_cost(INSN_COST);
17129   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17130   ins_encode %{
17131     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17132             as_FloatRegister($src$$reg),
17133             (int)$shift$$constant);
17134   %}
17135   ins_pipe(vshift128_imm);
17136 %}
17137 
17138 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17139   predicate(n->as_Vector()->length() == 2);
17140   match(Set dst (URShiftVI src shift));
17141   ins_cost(INSN_COST);
17142   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17143   ins_encode %{
17144     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17145             as_FloatRegister($src$$reg),
17146             (int)$shift$$constant);
17147   %}
17148   ins_pipe(vshift64_imm);
17149 %}
17150 
17151 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17152   predicate(n->as_Vector()->length() == 4);
17153   match(Set dst (URShiftVI src shift));
17154   ins_cost(INSN_COST);
17155   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17156   ins_encode %{
17157     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17158             as_FloatRegister($src$$reg),
17159             (int)$shift$$constant);
17160   %}
17161   ins_pipe(vshift128_imm);
17162 %}
17163 
17164 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17165   predicate(n->as_Vector()->length() == 2);
17166   match(Set dst (LShiftVL src shift));
17167   ins_cost(INSN_COST);
17168   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17169   ins_encode %{
17170     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17171             as_FloatRegister($src$$reg),
17172             as_FloatRegister($shift$$reg));
17173   %}
17174   ins_pipe(vshift128);
17175 %}
17176 
17177 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17178   predicate(n->as_Vector()->length() == 2);
17179   match(Set dst (RShiftVL src shift));
17180   ins_cost(INSN_COST);
17181   effect(TEMP tmp);
17182   format %{ "negr  $tmp,$shift\t"
17183             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
17184   ins_encode %{
17185     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17186             as_FloatRegister($shift$$reg));
17187     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17188             as_FloatRegister($src$$reg),
17189             as_FloatRegister($tmp$$reg));
17190   %}
17191   ins_pipe(vshift128);
17192 %}
17193 
17194 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17195   predicate(n->as_Vector()->length() == 2);
17196   match(Set dst (URShiftVL src shift));
17197   ins_cost(INSN_COST);
17198   effect(TEMP tmp);
17199   format %{ "negr  $tmp,$shift\t"
17200             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
17201   ins_encode %{
17202     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17203             as_FloatRegister($shift$$reg));
17204     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17205             as_FloatRegister($src$$reg),
17206             as_FloatRegister($tmp$$reg));
17207   %}
17208   ins_pipe(vshift128);
17209 %}
17210 
17211 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17212   predicate(n->as_Vector()->length() == 2);
17213   match(Set dst (LShiftVL src shift));
17214   ins_cost(INSN_COST);
17215   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17216   ins_encode %{
17217     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17218            as_FloatRegister($src$$reg),
17219            (int)$shift$$constant);
17220   %}
17221   ins_pipe(vshift128_imm);
17222 %}
17223 
17224 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17225   predicate(n->as_Vector()->length() == 2);
17226   match(Set dst (RShiftVL src shift));
17227   ins_cost(INSN_COST);
17228   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17229   ins_encode %{
17230     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17231             as_FloatRegister($src$$reg),
17232             (int)$shift$$constant);
17233   %}
17234   ins_pipe(vshift128_imm);
17235 %}
17236 
17237 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17238   predicate(n->as_Vector()->length() == 2);
17239   match(Set dst (URShiftVL src shift));
17240   ins_cost(INSN_COST);
17241   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17242   ins_encode %{
17243     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17244             as_FloatRegister($src$$reg),
17245             (int)$shift$$constant);
17246   %}
17247   ins_pipe(vshift128_imm);
17248 %}
17249 
17250 //----------PEEPHOLE RULES-----------------------------------------------------
17251 // These must follow all instruction definitions as they use the names
17252 // defined in the instructions definitions.
17253 //
17254 // peepmatch ( root_instr_name [preceding_instruction]* );
17255 //
17256 // peepconstraint %{
17257 // (instruction_number.operand_name relational_op instruction_number.operand_name
17258 //  [, ...] );
17259 // // instruction numbers are zero-based using left to right order in peepmatch
17260 //
17261 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17262 // // provide an instruction_number.operand_name for each operand that appears
17263 // // in the replacement instruction's match rule
17264 //
17265 // ---------VM FLAGS---------------------------------------------------------
17266 //
17267 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17268 //
17269 // Each peephole rule is given an identifying number starting with zero and
17270 // increasing by one in the order seen by the parser.  An individual peephole
17271 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17272 // on the command-line.
17273 //
17274 // ---------CURRENT LIMITATIONS----------------------------------------------
17275 //
17276 // Only match adjacent instructions in same basic block
17277 // Only equality constraints
17278 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17279 // Only one replacement instruction
17280 //
17281 // ---------EXAMPLE----------------------------------------------------------
17282 //
17283 // // pertinent parts of existing instructions in architecture description
17284 // instruct movI(iRegINoSp dst, iRegI src)
17285 // %{
17286 //   match(Set dst (CopyI src));
17287 // %}
17288 //
17289 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17290 // %{
17291 //   match(Set dst (AddI dst src));
17292 //   effect(KILL cr);
17293 // %}
17294 //
17295 // // Change (inc mov) to lea
17296 // peephole %{
17297 //   // increment preceeded by register-register move
17298 //   peepmatch ( incI_iReg movI );
17299 //   // require that the destination register of the increment
17300 //   // match the destination register of the move
17301 //   peepconstraint ( 0.dst == 1.dst );
17302 //   // construct a replacement instruction that sets
17303 //   // the destination to ( move's source register + one )
17304 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17305 // %}
17306 //
17307 
17308 // Implementation no longer uses movX instructions since
17309 // machine-independent system no longer uses CopyX nodes.
17310 //
17311 // peephole
17312 // %{
17313 //   peepmatch (incI_iReg movI);
17314 //   peepconstraint (0.dst == 1.dst);
17315 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17316 // %}
17317 
17318 // peephole
17319 // %{
17320 //   peepmatch (decI_iReg movI);
17321 //   peepconstraint (0.dst == 1.dst);
17322 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17323 // %}
17324 
17325 // peephole
17326 // %{
17327 //   peepmatch (addI_iReg_imm movI);
17328 //   peepconstraint (0.dst == 1.dst);
17329 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17330 // %}
17331 
17332 // peephole
17333 // %{
17334 //   peepmatch (incL_iReg movL);
17335 //   peepconstraint (0.dst == 1.dst);
17336 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17337 // %}
17338 
17339 // peephole
17340 // %{
17341 //   peepmatch (decL_iReg movL);
17342 //   peepconstraint (0.dst == 1.dst);
17343 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17344 // %}
17345 
17346 // peephole
17347 // %{
17348 //   peepmatch (addL_iReg_imm movL);
17349 //   peepconstraint (0.dst == 1.dst);
17350 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17351 // %}
17352 
17353 // peephole
17354 // %{
17355 //   peepmatch (addP_iReg_imm movP);
17356 //   peepconstraint (0.dst == 1.dst);
17357 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17358 // %}
17359 
17360 // // Change load of spilled value to only a spill
17361 // instruct storeI(memory mem, iRegI src)
17362 // %{
17363 //   match(Set mem (StoreI mem src));
17364 // %}
17365 //
17366 // instruct loadI(iRegINoSp dst, memory mem)
17367 // %{
17368 //   match(Set dst (LoadI mem));
17369 // %}
17370 //
17371 
17372 //----------SMARTSPILL RULES---------------------------------------------------
17373 // These must follow all instruction definitions as they use the names
17374 // defined in the instructions definitions.
17375 
17376 // Local Variables:
17377 // mode: c++
17378 // End: