1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "opto/addnode.hpp"
1003 
1004 class CallStubImpl {
1005 
1006   //--------------------------------------------------------------
1007   //---<  Used for optimization in Compile::shorten_branches  >---
1008   //--------------------------------------------------------------
1009 
1010  public:
1011   // Size of call trampoline stub.
1012   static uint size_call_trampoline() {
1013     return 0; // no call trampolines on this platform
1014   }
1015 
1016   // number of relocations needed by a call trampoline stub
1017   static uint reloc_call_trampoline() {
1018     return 0; // no call trampolines on this platform
1019   }
1020 };
1021 
1022 class HandlerImpl {
1023 
1024  public:
1025 
1026   static int emit_exception_handler(CodeBuffer &cbuf);
1027   static int emit_deopt_handler(CodeBuffer& cbuf);
1028 
1029   static uint size_exception_handler() {
1030     return MacroAssembler::far_branch_size();
1031   }
1032 
1033   static uint size_deopt_handler() {
1034     // count one adr and one far branch instruction
1035     return 4 * NativeInstruction::instruction_size;
1036   }
1037 };
1038 
1039  bool is_CAS(int opcode, bool maybe_volatile);
1040 
1041   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1042 
1043   bool unnecessary_acquire(const Node *barrier);
1044   bool needs_acquiring_load(const Node *load);
1045 
1046   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1047 
1048   bool unnecessary_release(const Node *barrier);
1049   bool unnecessary_volatile(const Node *barrier);
1050   bool needs_releasing_store(const Node *store);
1051 
1052   // predicate controlling translation of CompareAndSwapX
1053   bool needs_acquiring_load_exclusive(const Node *load);
1054 
1055   // predicate controlling translation of StoreCM
1056   bool unnecessary_storestore(const Node *storecm);
1057 
1058   // predicate controlling addressing modes
1059   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1060 %}
1061 
1062 source %{
1063 
1064   // Optimizaton of volatile gets and puts
1065   // -------------------------------------
1066   //
1067   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1068   // use to implement volatile reads and writes. For a volatile read
1069   // we simply need
1070   //
1071   //   ldar<x>
1072   //
1073   // and for a volatile write we need
1074   //
1075   //   stlr<x>
1076   //
1077   // Alternatively, we can implement them by pairing a normal
1078   // load/store with a memory barrier. For a volatile read we need
1079   //
1080   //   ldr<x>
1081   //   dmb ishld
1082   //
1083   // for a volatile write
1084   //
1085   //   dmb ish
1086   //   str<x>
1087   //   dmb ish
1088   //
1089   // We can also use ldaxr and stlxr to implement compare and swap CAS
1090   // sequences. These are normally translated to an instruction
1091   // sequence like the following
1092   //
1093   //   dmb      ish
1094   // retry:
1095   //   ldxr<x>   rval raddr
1096   //   cmp       rval rold
1097   //   b.ne done
1098   //   stlxr<x>  rval, rnew, rold
1099   //   cbnz      rval retry
1100   // done:
1101   //   cset      r0, eq
1102   //   dmb ishld
1103   //
1104   // Note that the exclusive store is already using an stlxr
1105   // instruction. That is required to ensure visibility to other
1106   // threads of the exclusive write (assuming it succeeds) before that
1107   // of any subsequent writes.
1108   //
1109   // The following instruction sequence is an improvement on the above
1110   //
1111   // retry:
1112   //   ldaxr<x>  rval raddr
1113   //   cmp       rval rold
1114   //   b.ne done
1115   //   stlxr<x>  rval, rnew, rold
1116   //   cbnz      rval retry
1117   // done:
1118   //   cset      r0, eq
1119   //
1120   // We don't need the leading dmb ish since the stlxr guarantees
1121   // visibility of prior writes in the case that the swap is
1122   // successful. Crucially we don't have to worry about the case where
1123   // the swap is not successful since no valid program should be
1124   // relying on visibility of prior changes by the attempting thread
1125   // in the case where the CAS fails.
1126   //
1127   // Similarly, we don't need the trailing dmb ishld if we substitute
1128   // an ldaxr instruction since that will provide all the guarantees we
1129   // require regarding observation of changes made by other threads
1130   // before any change to the CAS address observed by the load.
1131   //
1132   // In order to generate the desired instruction sequence we need to
1133   // be able to identify specific 'signature' ideal graph node
1134   // sequences which i) occur as a translation of a volatile reads or
1135   // writes or CAS operations and ii) do not occur through any other
1136   // translation or graph transformation. We can then provide
1137   // alternative aldc matching rules which translate these node
1138   // sequences to the desired machine code sequences. Selection of the
1139   // alternative rules can be implemented by predicates which identify
1140   // the relevant node sequences.
1141   //
1142   // The ideal graph generator translates a volatile read to the node
1143   // sequence
1144   //
1145   //   LoadX[mo_acquire]
1146   //   MemBarAcquire
1147   //
1148   // As a special case when using the compressed oops optimization we
1149   // may also see this variant
1150   //
1151   //   LoadN[mo_acquire]
1152   //   DecodeN
1153   //   MemBarAcquire
1154   //
1155   // A volatile write is translated to the node sequence
1156   //
1157   //   MemBarRelease
1158   //   StoreX[mo_release] {CardMark}-optional
1159   //   MemBarVolatile
1160   //
1161   // n.b. the above node patterns are generated with a strict
1162   // 'signature' configuration of input and output dependencies (see
1163   // the predicates below for exact details). The card mark may be as
1164   // simple as a few extra nodes or, in a few GC configurations, may
1165   // include more complex control flow between the leading and
1166   // trailing memory barriers. However, whatever the card mark
1167   // configuration these signatures are unique to translated volatile
1168   // reads/stores -- they will not appear as a result of any other
1169   // bytecode translation or inlining nor as a consequence of
1170   // optimizing transforms.
1171   //
1172   // We also want to catch inlined unsafe volatile gets and puts and
1173   // be able to implement them using either ldar<x>/stlr<x> or some
1174   // combination of ldr<x>/stlr<x> and dmb instructions.
1175   //
1176   // Inlined unsafe volatiles puts manifest as a minor variant of the
1177   // normal volatile put node sequence containing an extra cpuorder
1178   // membar
1179   //
1180   //   MemBarRelease
1181   //   MemBarCPUOrder
1182   //   StoreX[mo_release] {CardMark}-optional
1183   //   MemBarCPUOrder
1184   //   MemBarVolatile
1185   //
1186   // n.b. as an aside, a cpuorder membar is not itself subject to
1187   // matching and translation by adlc rules.  However, the rule
1188   // predicates need to detect its presence in order to correctly
1189   // select the desired adlc rules.
1190   //
1191   // Inlined unsafe volatile gets manifest as a slightly different
1192   // node sequence to a normal volatile get because of the
1193   // introduction of some CPUOrder memory barriers to bracket the
1194   // Load. However, but the same basic skeleton of a LoadX feeding a
1195   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1196   // present
1197   //
1198   //   MemBarCPUOrder
1199   //        ||       \\
1200   //   MemBarCPUOrder LoadX[mo_acquire]
1201   //        ||            |
1202   //        ||       {DecodeN} optional
1203   //        ||       /
1204   //     MemBarAcquire
1205   //
1206   // In this case the acquire membar does not directly depend on the
1207   // load. However, we can be sure that the load is generated from an
1208   // inlined unsafe volatile get if we see it dependent on this unique
1209   // sequence of membar nodes. Similarly, given an acquire membar we
1210   // can know that it was added because of an inlined unsafe volatile
1211   // get if it is fed and feeds a cpuorder membar and if its feed
1212   // membar also feeds an acquiring load.
1213   //
1214   // Finally an inlined (Unsafe) CAS operation is translated to the
1215   // following ideal graph
1216   //
1217   //   MemBarRelease
1218   //   MemBarCPUOrder
1219   //   CompareAndSwapX {CardMark}-optional
1220   //   MemBarCPUOrder
1221   //   MemBarAcquire
1222   //
1223   // So, where we can identify these volatile read and write
1224   // signatures we can choose to plant either of the above two code
1225   // sequences. For a volatile read we can simply plant a normal
1226   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1227   // also choose to inhibit translation of the MemBarAcquire and
1228   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1229   //
1230   // When we recognise a volatile store signature we can choose to
1231   // plant at a dmb ish as a translation for the MemBarRelease, a
1232   // normal str<x> and then a dmb ish for the MemBarVolatile.
1233   // Alternatively, we can inhibit translation of the MemBarRelease
1234   // and MemBarVolatile and instead plant a simple stlr<x>
1235   // instruction.
1236   //
1237   // when we recognise a CAS signature we can choose to plant a dmb
1238   // ish as a translation for the MemBarRelease, the conventional
1239   // macro-instruction sequence for the CompareAndSwap node (which
1240   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1241   // Alternatively, we can elide generation of the dmb instructions
1242   // and plant the alternative CompareAndSwap macro-instruction
1243   // sequence (which uses ldaxr<x>).
1244   //
1245   // Of course, the above only applies when we see these signature
1246   // configurations. We still want to plant dmb instructions in any
1247   // other cases where we may see a MemBarAcquire, MemBarRelease or
1248   // MemBarVolatile. For example, at the end of a constructor which
1249   // writes final/volatile fields we will see a MemBarRelease
1250   // instruction and this needs a 'dmb ish' lest we risk the
1251   // constructed object being visible without making the
1252   // final/volatile field writes visible.
1253   //
1254   // n.b. the translation rules below which rely on detection of the
1255   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1256   // If we see anything other than the signature configurations we
1257   // always just translate the loads and stores to ldr<x> and str<x>
1258   // and translate acquire, release and volatile membars to the
1259   // relevant dmb instructions.
1260   //
1261 
1262   // is_CAS(int opcode, bool maybe_volatile)
1263   //
1264   // return true if opcode is one of the possible CompareAndSwapX
1265   // values otherwise false.
1266 
1267   bool is_CAS(int opcode, bool maybe_volatile)
1268   {
1269     switch(opcode) {
1270       // We handle these
1271     case Op_CompareAndSwapI:
1272     case Op_CompareAndSwapL:
1273     case Op_CompareAndSwapP:
1274     case Op_CompareAndSwapN:
1275     case Op_ShenandoahCompareAndSwapP:
1276     case Op_ShenandoahCompareAndSwapN:
1277     case Op_CompareAndSwapB:
1278     case Op_CompareAndSwapS:
1279     case Op_GetAndSetI:
1280     case Op_GetAndSetL:
1281     case Op_GetAndSetP:
1282     case Op_GetAndSetN:
1283     case Op_GetAndAddI:
1284     case Op_GetAndAddL:
1285       return true;
1286     case Op_CompareAndExchangeI:
1287     case Op_CompareAndExchangeN:
1288     case Op_CompareAndExchangeB:
1289     case Op_CompareAndExchangeS:
1290     case Op_CompareAndExchangeL:
1291     case Op_CompareAndExchangeP:
1292     case Op_WeakCompareAndSwapB:
1293     case Op_WeakCompareAndSwapS:
1294     case Op_WeakCompareAndSwapI:
1295     case Op_WeakCompareAndSwapL:
1296     case Op_WeakCompareAndSwapP:
1297     case Op_WeakCompareAndSwapN:
1298     case Op_ShenandoahWeakCompareAndSwapP:
1299     case Op_ShenandoahWeakCompareAndSwapN:
1300     case Op_ShenandoahCompareAndExchangeP:
1301     case Op_ShenandoahCompareAndExchangeN:
1302       return maybe_volatile;
1303     default:
1304       return false;
1305     }
1306   }
1307 
1308   // helper to determine the maximum number of Phi nodes we may need to
1309   // traverse when searching from a card mark membar for the merge mem
1310   // feeding a trailing membar or vice versa
1311 
1312 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1313 
1314 bool unnecessary_acquire(const Node *barrier)
1315 {
1316   assert(barrier->is_MemBar(), "expecting a membar");
1317 
1318   if (UseBarriersForVolatile) {
1319     // we need to plant a dmb
1320     return false;
1321   }
1322 
1323   MemBarNode* mb = barrier->as_MemBar();
1324 
1325   if (mb->trailing_load()) {
1326     return true;
1327   }
1328 
1329   if (mb->trailing_load_store()) {
1330     Node* load_store = mb->in(MemBarNode::Precedent);
1331     assert(load_store->is_LoadStore(), "unexpected graph shape");
1332     return is_CAS(load_store->Opcode(), true);
1333   }
1334 
1335   return false;
1336 }
1337 
1338 bool needs_acquiring_load(const Node *n)
1339 {
1340   assert(n->is_Load(), "expecting a load");
1341   if (UseBarriersForVolatile) {
1342     // we use a normal load and a dmb
1343     return false;
1344   }
1345 
1346   LoadNode *ld = n->as_Load();
1347 
1348   return ld->is_acquire();
1349 }
1350 
1351 bool unnecessary_release(const Node *n)
1352 {
1353   assert((n->is_MemBar() &&
1354           n->Opcode() == Op_MemBarRelease),
1355          "expecting a release membar");
1356 
1357   if (UseBarriersForVolatile) {
1358     // we need to plant a dmb
1359     return false;
1360   }
1361 
1362   MemBarNode *barrier = n->as_MemBar();
1363   if (!barrier->leading()) {
1364     return false;
1365   } else {
1366     Node* trailing = barrier->trailing_membar();
1367     MemBarNode* trailing_mb = trailing->as_MemBar();
1368     assert(trailing_mb->trailing(), "Not a trailing membar?");
1369     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1370 
1371     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1372     if (mem->is_Store()) {
1373       assert(mem->as_Store()->is_release(), "");
1374       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1375       return true;
1376     } else {
1377       assert(mem->is_LoadStore(), "");
1378       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1379       return is_CAS(mem->Opcode(), true);
1380     }
1381   }
1382   return false;
1383 }
1384 
1385 bool unnecessary_volatile(const Node *n)
1386 {
1387   // assert n->is_MemBar();
1388   if (UseBarriersForVolatile) {
1389     // we need to plant a dmb
1390     return false;
1391   }
1392 
1393   MemBarNode *mbvol = n->as_MemBar();
1394 
1395   bool release = mbvol->trailing_store();
1396   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1397 #ifdef ASSERT
1398   if (release) {
1399     Node* leading = mbvol->leading_membar();
1400     assert(leading->Opcode() == Op_MemBarRelease, "");
1401     assert(leading->as_MemBar()->leading_store(), "");
1402     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1403   }
1404 #endif
1405 
1406   return release;
1407 }
1408 
1409 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1410 
1411 bool needs_releasing_store(const Node *n)
1412 {
1413   // assert n->is_Store();
1414   if (UseBarriersForVolatile) {
1415     // we use a normal store and dmb combination
1416     return false;
1417   }
1418 
1419   StoreNode *st = n->as_Store();
1420 
1421   return st->trailing_membar() != NULL;
1422 }
1423 
1424 // predicate controlling translation of CAS
1425 //
1426 // returns true if CAS needs to use an acquiring load otherwise false
1427 
1428 bool needs_acquiring_load_exclusive(const Node *n)
1429 {
1430   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
1431   if (UseBarriersForVolatile) {
1432     return false;
1433   }
1434 
1435   LoadStoreNode* ldst = n->as_LoadStore();
1436   if (is_CAS(n->Opcode(), false)) {
1437     assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1438   } else {
1439     return ldst->trailing_membar() != NULL;
1440   }
1441 
1442   // so we can just return true here
1443   return true;
1444 }
1445 
1446 // predicate controlling translation of StoreCM
1447 //
1448 // returns true if a StoreStore must precede the card write otherwise
1449 // false
1450 
1451 bool unnecessary_storestore(const Node *storecm)
1452 {
1453   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1454 
1455   // we need to generate a dmb ishst between an object put and the
1456   // associated card mark when we are using CMS without conditional
1457   // card marking
1458 
1459   if (UseConcMarkSweepGC && !UseCondCardMark) {
1460     return false;
1461   }
1462 
1463   // a storestore is unnecesary in all other cases
1464 
1465   return true;
1466 }
1467 
1468 
1469 #define __ _masm.
1470 
1471 // advance declarations for helper functions to convert register
1472 // indices to register objects
1473 
1474 // the ad file has to provide implementations of certain methods
1475 // expected by the generic code
1476 //
1477 // REQUIRED FUNCTIONALITY
1478 
1479 //=============================================================================
1480 
1481 // !!!!! Special hack to get all types of calls to specify the byte offset
1482 //       from the start of the call to the point where the return address
1483 //       will point.
1484 
1485 int MachCallStaticJavaNode::ret_addr_offset()
1486 {
1487   // call should be a simple bl
1488   int off = 4;
1489   return off;
1490 }
1491 
1492 int MachCallDynamicJavaNode::ret_addr_offset()
1493 {
1494   return 16; // movz, movk, movk, bl
1495 }
1496 
1497 int MachCallRuntimeNode::ret_addr_offset() {
1498   // for generated stubs the call will be
1499   //   far_call(addr)
1500   // for real runtime callouts it will be six instructions
1501   // see aarch64_enc_java_to_runtime
1502   //   adr(rscratch2, retaddr)
1503   //   lea(rscratch1, RuntimeAddress(addr)
1504   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1505   //   blrt rscratch1
1506   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1507   if (cb) {
1508     return MacroAssembler::far_branch_size();
1509   } else {
1510     return 6 * NativeInstruction::instruction_size;
1511   }
1512 }
1513 
1514 // Indicate if the safepoint node needs the polling page as an input
1515 
1516 // the shared code plants the oop data at the start of the generated
1517 // code for the safepoint node and that needs ot be at the load
1518 // instruction itself. so we cannot plant a mov of the safepoint poll
1519 // address followed by a load. setting this to true means the mov is
1520 // scheduled as a prior instruction. that's better for scheduling
1521 // anyway.
1522 
1523 bool SafePointNode::needs_polling_address_input()
1524 {
1525   return true;
1526 }
1527 
1528 //=============================================================================
1529 
1530 #ifndef PRODUCT
1531 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1532   st->print("BREAKPOINT");
1533 }
1534 #endif
1535 
1536 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1537   MacroAssembler _masm(&cbuf);
1538   __ brk(0);
1539 }
1540 
1541 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1542   return MachNode::size(ra_);
1543 }
1544 
1545 //=============================================================================
1546 
1547 #ifndef PRODUCT
1548   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1549     st->print("nop \t# %d bytes pad for loops and calls", _count);
1550   }
1551 #endif
1552 
1553   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1554     MacroAssembler _masm(&cbuf);
1555     for (int i = 0; i < _count; i++) {
1556       __ nop();
1557     }
1558   }
1559 
1560   uint MachNopNode::size(PhaseRegAlloc*) const {
1561     return _count * NativeInstruction::instruction_size;
1562   }
1563 
1564 //=============================================================================
1565 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1566 
1567 int Compile::ConstantTable::calculate_table_base_offset() const {
1568   return 0;  // absolute addressing, no offset
1569 }
1570 
1571 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1572 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1573   ShouldNotReachHere();
1574 }
1575 
1576 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1577   // Empty encoding
1578 }
1579 
1580 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1581   return 0;
1582 }
1583 
1584 #ifndef PRODUCT
1585 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1586   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1587 }
1588 #endif
1589 
1590 #ifndef PRODUCT
1591 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1592   Compile* C = ra_->C;
1593 
1594   int framesize = C->frame_slots() << LogBytesPerInt;
1595 
1596   if (C->need_stack_bang(framesize))
1597     st->print("# stack bang size=%d\n\t", framesize);
1598 
1599   if (framesize < ((1 << 9) + 2 * wordSize)) {
1600     st->print("sub  sp, sp, #%d\n\t", framesize);
1601     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1602     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1603   } else {
1604     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1605     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1606     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1607     st->print("sub  sp, sp, rscratch1");
1608   }
1609 }
1610 #endif
1611 
1612 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1613   Compile* C = ra_->C;
1614   MacroAssembler _masm(&cbuf);
1615 
1616   // n.b. frame size includes space for return pc and rfp
1617   const long framesize = C->frame_size_in_bytes();
1618   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1619 
1620   // insert a nop at the start of the prolog so we can patch in a
1621   // branch if we need to invalidate the method later
1622   __ nop();
1623 
1624   int bangsize = C->bang_size_in_bytes();
1625   if (C->need_stack_bang(bangsize) && UseStackBanging)
1626     __ generate_stack_overflow_check(bangsize);
1627 
1628   __ build_frame(framesize);
1629 
1630   if (NotifySimulator) {
1631     __ notify(Assembler::method_entry);
1632   }
1633 
1634   if (VerifyStackAtCalls) {
1635     Unimplemented();
1636   }
1637 
1638   C->set_frame_complete(cbuf.insts_size());
1639 
1640   if (C->has_mach_constant_base_node()) {
1641     // NOTE: We set the table base offset here because users might be
1642     // emitted before MachConstantBaseNode.
1643     Compile::ConstantTable& constant_table = C->constant_table();
1644     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1645   }
1646 }
1647 
1648 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1649 {
1650   return MachNode::size(ra_); // too many variables; just compute it
1651                               // the hard way
1652 }
1653 
1654 int MachPrologNode::reloc() const
1655 {
1656   return 0;
1657 }
1658 
1659 //=============================================================================
1660 
1661 #ifndef PRODUCT
1662 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1663   Compile* C = ra_->C;
1664   int framesize = C->frame_slots() << LogBytesPerInt;
1665 
1666   st->print("# pop frame %d\n\t",framesize);
1667 
1668   if (framesize == 0) {
1669     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1670   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1671     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1672     st->print("add  sp, sp, #%d\n\t", framesize);
1673   } else {
1674     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1675     st->print("add  sp, sp, rscratch1\n\t");
1676     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1677   }
1678 
1679   if (do_polling() && C->is_method_compilation()) {
1680     st->print("# touch polling page\n\t");
1681     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
1682     st->print("ldr zr, [rscratch1]");
1683   }
1684 }
1685 #endif
1686 
1687 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1688   Compile* C = ra_->C;
1689   MacroAssembler _masm(&cbuf);
1690   int framesize = C->frame_slots() << LogBytesPerInt;
1691 
1692   __ remove_frame(framesize);
1693 
1694   if (NotifySimulator) {
1695     __ notify(Assembler::method_reentry);
1696   }
1697 
1698   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1699     __ reserved_stack_check();
1700   }
1701 
1702   if (do_polling() && C->is_method_compilation()) {
1703     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1704   }
1705 }
1706 
1707 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1708   // Variable size. Determine dynamically.
1709   return MachNode::size(ra_);
1710 }
1711 
1712 int MachEpilogNode::reloc() const {
1713   // Return number of relocatable values contained in this instruction.
1714   return 1; // 1 for polling page.
1715 }
1716 
1717 const Pipeline * MachEpilogNode::pipeline() const {
1718   return MachNode::pipeline_class();
1719 }
1720 
1721 // This method seems to be obsolete. It is declared in machnode.hpp
1722 // and defined in all *.ad files, but it is never called. Should we
1723 // get rid of it?
1724 int MachEpilogNode::safepoint_offset() const {
1725   assert(do_polling(), "no return for this epilog node");
1726   return 4;
1727 }
1728 
1729 //=============================================================================
1730 
1731 // Figure out which register class each belongs in: rc_int, rc_float or
1732 // rc_stack.
1733 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1734 
1735 static enum RC rc_class(OptoReg::Name reg) {
1736 
1737   if (reg == OptoReg::Bad) {
1738     return rc_bad;
1739   }
1740 
1741   // we have 30 int registers * 2 halves
1742   // (rscratch1 and rscratch2 are omitted)
1743 
1744   if (reg < 60) {
1745     return rc_int;
1746   }
1747 
1748   // we have 32 float register * 2 halves
1749   if (reg < 60 + 128) {
1750     return rc_float;
1751   }
1752 
1753   // Between float regs & stack is the flags regs.
1754   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1755 
1756   return rc_stack;
1757 }
1758 
1759 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1760   Compile* C = ra_->C;
1761 
1762   // Get registers to move.
1763   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1764   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1765   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1766   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1767 
1768   enum RC src_hi_rc = rc_class(src_hi);
1769   enum RC src_lo_rc = rc_class(src_lo);
1770   enum RC dst_hi_rc = rc_class(dst_hi);
1771   enum RC dst_lo_rc = rc_class(dst_lo);
1772 
1773   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1774 
1775   if (src_hi != OptoReg::Bad) {
1776     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1777            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1778            "expected aligned-adjacent pairs");
1779   }
1780 
1781   if (src_lo == dst_lo && src_hi == dst_hi) {
1782     return 0;            // Self copy, no move.
1783   }
1784 
1785   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1786               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1787   int src_offset = ra_->reg2offset(src_lo);
1788   int dst_offset = ra_->reg2offset(dst_lo);
1789 
1790   if (bottom_type()->isa_vect() != NULL) {
1791     uint ireg = ideal_reg();
1792     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1793     if (cbuf) {
1794       MacroAssembler _masm(cbuf);
1795       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1796       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1797         // stack->stack
1798         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1799         if (ireg == Op_VecD) {
1800           __ unspill(rscratch1, true, src_offset);
1801           __ spill(rscratch1, true, dst_offset);
1802         } else {
1803           __ spill_copy128(src_offset, dst_offset);
1804         }
1805       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1806         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1807                ireg == Op_VecD ? __ T8B : __ T16B,
1808                as_FloatRegister(Matcher::_regEncode[src_lo]));
1809       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1810         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1811                        ireg == Op_VecD ? __ D : __ Q,
1812                        ra_->reg2offset(dst_lo));
1813       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1814         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1815                        ireg == Op_VecD ? __ D : __ Q,
1816                        ra_->reg2offset(src_lo));
1817       } else {
1818         ShouldNotReachHere();
1819       }
1820     }
1821   } else if (cbuf) {
1822     MacroAssembler _masm(cbuf);
1823     switch (src_lo_rc) {
1824     case rc_int:
1825       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1826         if (is64) {
1827             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1828                    as_Register(Matcher::_regEncode[src_lo]));
1829         } else {
1830             MacroAssembler _masm(cbuf);
1831             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1832                     as_Register(Matcher::_regEncode[src_lo]));
1833         }
1834       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1835         if (is64) {
1836             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1837                      as_Register(Matcher::_regEncode[src_lo]));
1838         } else {
1839             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1840                      as_Register(Matcher::_regEncode[src_lo]));
1841         }
1842       } else {                    // gpr --> stack spill
1843         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1844         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1845       }
1846       break;
1847     case rc_float:
1848       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1849         if (is64) {
1850             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1851                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1852         } else {
1853             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1854                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1855         }
1856       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1857           if (cbuf) {
1858             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1859                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1860         } else {
1861             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1862                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1863         }
1864       } else {                    // fpr --> stack spill
1865         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1866         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1867                  is64 ? __ D : __ S, dst_offset);
1868       }
1869       break;
1870     case rc_stack:
1871       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1872         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1873       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1874         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1875                    is64 ? __ D : __ S, src_offset);
1876       } else {                    // stack --> stack copy
1877         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1878         __ unspill(rscratch1, is64, src_offset);
1879         __ spill(rscratch1, is64, dst_offset);
1880       }
1881       break;
1882     default:
1883       assert(false, "bad rc_class for spill");
1884       ShouldNotReachHere();
1885     }
1886   }
1887 
1888   if (st) {
1889     st->print("spill ");
1890     if (src_lo_rc == rc_stack) {
1891       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1892     } else {
1893       st->print("%s -> ", Matcher::regName[src_lo]);
1894     }
1895     if (dst_lo_rc == rc_stack) {
1896       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1897     } else {
1898       st->print("%s", Matcher::regName[dst_lo]);
1899     }
1900     if (bottom_type()->isa_vect() != NULL) {
1901       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1902     } else {
1903       st->print("\t# spill size = %d", is64 ? 64:32);
1904     }
1905   }
1906 
1907   return 0;
1908 
1909 }
1910 
1911 #ifndef PRODUCT
1912 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1913   if (!ra_)
1914     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1915   else
1916     implementation(NULL, ra_, false, st);
1917 }
1918 #endif
1919 
1920 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1921   implementation(&cbuf, ra_, false, NULL);
1922 }
1923 
1924 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1925   return MachNode::size(ra_);
1926 }
1927 
1928 //=============================================================================
1929 
1930 #ifndef PRODUCT
1931 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1932   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1933   int reg = ra_->get_reg_first(this);
1934   st->print("add %s, rsp, #%d]\t# box lock",
1935             Matcher::regName[reg], offset);
1936 }
1937 #endif
1938 
1939 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1940   MacroAssembler _masm(&cbuf);
1941 
1942   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1943   int reg    = ra_->get_encode(this);
1944 
1945   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1946     __ add(as_Register(reg), sp, offset);
1947   } else {
1948     ShouldNotReachHere();
1949   }
1950 }
1951 
1952 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1953   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1954   return 4;
1955 }
1956 
1957 //=============================================================================
1958 
1959 #ifndef PRODUCT
1960 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1961 {
1962   st->print_cr("# MachUEPNode");
1963   if (UseCompressedClassPointers) {
1964     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1965     if (Universe::narrow_klass_shift() != 0) {
1966       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1967     }
1968   } else {
1969    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1970   }
1971   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1972   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1973 }
1974 #endif
1975 
1976 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1977 {
1978   // This is the unverified entry point.
1979   MacroAssembler _masm(&cbuf);
1980 
1981   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1982   Label skip;
1983   // TODO
1984   // can we avoid this skip and still use a reloc?
1985   __ br(Assembler::EQ, skip);
1986   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1987   __ bind(skip);
1988 }
1989 
1990 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1991 {
1992   return MachNode::size(ra_);
1993 }
1994 
1995 // REQUIRED EMIT CODE
1996 
1997 //=============================================================================
1998 
1999 // Emit exception handler code.
2000 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2001 {
2002   // mov rscratch1 #exception_blob_entry_point
2003   // br rscratch1
2004   // Note that the code buffer's insts_mark is always relative to insts.
2005   // That's why we must use the macroassembler to generate a handler.
2006   MacroAssembler _masm(&cbuf);
2007   address base = __ start_a_stub(size_exception_handler());
2008   if (base == NULL) {
2009     ciEnv::current()->record_failure("CodeCache is full");
2010     return 0;  // CodeBuffer::expand failed
2011   }
2012   int offset = __ offset();
2013   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2014   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2015   __ end_a_stub();
2016   return offset;
2017 }
2018 
2019 // Emit deopt handler code.
2020 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2021 {
2022   // Note that the code buffer's insts_mark is always relative to insts.
2023   // That's why we must use the macroassembler to generate a handler.
2024   MacroAssembler _masm(&cbuf);
2025   address base = __ start_a_stub(size_deopt_handler());
2026   if (base == NULL) {
2027     ciEnv::current()->record_failure("CodeCache is full");
2028     return 0;  // CodeBuffer::expand failed
2029   }
2030   int offset = __ offset();
2031 
2032   __ adr(lr, __ pc());
2033   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2034 
2035   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2036   __ end_a_stub();
2037   return offset;
2038 }
2039 
2040 // REQUIRED MATCHER CODE
2041 
2042 //=============================================================================
2043 
2044 const bool Matcher::match_rule_supported(int opcode) {
2045 
2046   switch (opcode) {
2047   default:
2048     break;
2049   }
2050 
2051   if (!has_match_rule(opcode)) {
2052     return false;
2053   }
2054 
2055   return true;  // Per default match rules are supported.
2056 }
2057 
2058 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
2059 
2060   // TODO
2061   // identify extra cases that we might want to provide match rules for
2062   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
2063   bool ret_value = match_rule_supported(opcode);
2064   // Add rules here.
2065 
2066   return ret_value;  // Per default match rules are supported.
2067 }
2068 
2069 const bool Matcher::has_predicated_vectors(void) {
2070   return false;
2071 }
2072 
2073 const int Matcher::float_pressure(int default_pressure_threshold) {
2074   return default_pressure_threshold;
2075 }
2076 
2077 int Matcher::regnum_to_fpu_offset(int regnum)
2078 {
2079   Unimplemented();
2080   return 0;
2081 }
2082 
2083 // Is this branch offset short enough that a short branch can be used?
2084 //
2085 // NOTE: If the platform does not provide any short branch variants, then
2086 //       this method should return false for offset 0.
2087 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
2088   // The passed offset is relative to address of the branch.
2089 
2090   return (-32768 <= offset && offset < 32768);
2091 }
2092 
2093 const bool Matcher::isSimpleConstant64(jlong value) {
2094   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2095   // Probably always true, even if a temp register is required.
2096   return true;
2097 }
2098 
2099 // true just means we have fast l2f conversion
2100 const bool Matcher::convL2FSupported(void) {
2101   return true;
2102 }
2103 
2104 // Vector width in bytes.
2105 const int Matcher::vector_width_in_bytes(BasicType bt) {
2106   int size = MIN2(16,(int)MaxVectorSize);
2107   // Minimum 2 values in vector
2108   if (size < 2*type2aelembytes(bt)) size = 0;
2109   // But never < 4
2110   if (size < 4) size = 0;
2111   return size;
2112 }
2113 
2114 // Limits on vector size (number of elements) loaded into vector.
2115 const int Matcher::max_vector_size(const BasicType bt) {
2116   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2117 }
2118 const int Matcher::min_vector_size(const BasicType bt) {
2119 //  For the moment limit the vector size to 8 bytes
2120     int size = 8 / type2aelembytes(bt);
2121     if (size < 2) size = 2;
2122     return size;
2123 }
2124 
2125 // Vector ideal reg.
2126 const uint Matcher::vector_ideal_reg(int len) {
2127   switch(len) {
2128     case  8: return Op_VecD;
2129     case 16: return Op_VecX;
2130   }
2131   ShouldNotReachHere();
2132   return 0;
2133 }
2134 
2135 const uint Matcher::vector_shift_count_ideal_reg(int size) {
2136   switch(size) {
2137     case  8: return Op_VecD;
2138     case 16: return Op_VecX;
2139   }
2140   ShouldNotReachHere();
2141   return 0;
2142 }
2143 
2144 // AES support not yet implemented
2145 const bool Matcher::pass_original_key_for_aes() {
2146   return false;
2147 }
2148 
2149 // x86 supports misaligned vectors store/load.
2150 const bool Matcher::misaligned_vectors_ok() {
2151   return !AlignVector; // can be changed by flag
2152 }
2153 
2154 // false => size gets scaled to BytesPerLong, ok.
2155 const bool Matcher::init_array_count_is_in_bytes = false;
2156 
2157 // Use conditional move (CMOVL)
2158 const int Matcher::long_cmove_cost() {
2159   // long cmoves are no more expensive than int cmoves
2160   return 0;
2161 }
2162 
2163 const int Matcher::float_cmove_cost() {
2164   // float cmoves are no more expensive than int cmoves
2165   return 0;
2166 }
2167 
2168 // Does the CPU require late expand (see block.cpp for description of late expand)?
2169 const bool Matcher::require_postalloc_expand = false;
2170 
2171 // Do we need to mask the count passed to shift instructions or does
2172 // the cpu only look at the lower 5/6 bits anyway?
2173 const bool Matcher::need_masked_shift_count = false;
2174 
2175 // This affects two different things:
2176 //  - how Decode nodes are matched
2177 //  - how ImplicitNullCheck opportunities are recognized
2178 // If true, the matcher will try to remove all Decodes and match them
2179 // (as operands) into nodes. NullChecks are not prepared to deal with
2180 // Decodes by final_graph_reshaping().
2181 // If false, final_graph_reshaping() forces the decode behind the Cmp
2182 // for a NullCheck. The matcher matches the Decode node into a register.
2183 // Implicit_null_check optimization moves the Decode along with the
2184 // memory operation back up before the NullCheck.
2185 bool Matcher::narrow_oop_use_complex_address() {
2186   return Universe::narrow_oop_shift() == 0;
2187 }
2188 
2189 bool Matcher::narrow_klass_use_complex_address() {
2190 // TODO
2191 // decide whether we need to set this to true
2192   return false;
2193 }
2194 
2195 bool Matcher::const_oop_prefer_decode() {
2196   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
2197   return Universe::narrow_oop_base() == NULL;
2198 }
2199 
2200 bool Matcher::const_klass_prefer_decode() {
2201   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2202   return Universe::narrow_klass_base() == NULL;
2203 }
2204 
2205 // Is it better to copy float constants, or load them directly from
2206 // memory?  Intel can load a float constant from a direct address,
2207 // requiring no extra registers.  Most RISCs will have to materialize
2208 // an address into a register first, so they would do better to copy
2209 // the constant from stack.
2210 const bool Matcher::rematerialize_float_constants = false;
2211 
2212 // If CPU can load and store mis-aligned doubles directly then no
2213 // fixup is needed.  Else we split the double into 2 integer pieces
2214 // and move it piece-by-piece.  Only happens when passing doubles into
2215 // C code as the Java calling convention forces doubles to be aligned.
2216 const bool Matcher::misaligned_doubles_ok = true;
2217 
2218 // No-op on amd64
2219 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2220   Unimplemented();
2221 }
2222 
2223 // Advertise here if the CPU requires explicit rounding operations to
2224 // implement the UseStrictFP mode.
2225 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2226 
2227 // Are floats converted to double when stored to stack during
2228 // deoptimization?
2229 bool Matcher::float_in_double() { return false; }
2230 
2231 // Do ints take an entire long register or just half?
2232 // The relevant question is how the int is callee-saved:
2233 // the whole long is written but de-opt'ing will have to extract
2234 // the relevant 32 bits.
2235 const bool Matcher::int_in_long = true;
2236 
2237 // Return whether or not this register is ever used as an argument.
2238 // This function is used on startup to build the trampoline stubs in
2239 // generateOptoStub.  Registers not mentioned will be killed by the VM
2240 // call in the trampoline, and arguments in those registers not be
2241 // available to the callee.
2242 bool Matcher::can_be_java_arg(int reg)
2243 {
2244   return
2245     reg ==  R0_num || reg == R0_H_num ||
2246     reg ==  R1_num || reg == R1_H_num ||
2247     reg ==  R2_num || reg == R2_H_num ||
2248     reg ==  R3_num || reg == R3_H_num ||
2249     reg ==  R4_num || reg == R4_H_num ||
2250     reg ==  R5_num || reg == R5_H_num ||
2251     reg ==  R6_num || reg == R6_H_num ||
2252     reg ==  R7_num || reg == R7_H_num ||
2253     reg ==  V0_num || reg == V0_H_num ||
2254     reg ==  V1_num || reg == V1_H_num ||
2255     reg ==  V2_num || reg == V2_H_num ||
2256     reg ==  V3_num || reg == V3_H_num ||
2257     reg ==  V4_num || reg == V4_H_num ||
2258     reg ==  V5_num || reg == V5_H_num ||
2259     reg ==  V6_num || reg == V6_H_num ||
2260     reg ==  V7_num || reg == V7_H_num;
2261 }
2262 
2263 bool Matcher::is_spillable_arg(int reg)
2264 {
2265   return can_be_java_arg(reg);
2266 }
2267 
2268 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2269   return false;
2270 }
2271 
2272 RegMask Matcher::divI_proj_mask() {
2273   ShouldNotReachHere();
2274   return RegMask();
2275 }
2276 
2277 // Register for MODI projection of divmodI.
2278 RegMask Matcher::modI_proj_mask() {
2279   ShouldNotReachHere();
2280   return RegMask();
2281 }
2282 
2283 // Register for DIVL projection of divmodL.
2284 RegMask Matcher::divL_proj_mask() {
2285   ShouldNotReachHere();
2286   return RegMask();
2287 }
2288 
2289 // Register for MODL projection of divmodL.
2290 RegMask Matcher::modL_proj_mask() {
2291   ShouldNotReachHere();
2292   return RegMask();
2293 }
2294 
2295 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2296   return FP_REG_mask();
2297 }
2298 
2299 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2300   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2301     Node* u = addp->fast_out(i);
2302     if (u->is_Mem()) {
2303       int opsize = u->as_Mem()->memory_size();
2304       assert(opsize > 0, "unexpected memory operand size");
2305       if (u->as_Mem()->memory_size() != (1<<shift)) {
2306         return false;
2307       }
2308     }
2309   }
2310   return true;
2311 }
2312 
2313 const bool Matcher::convi2l_type_required = false;
2314 
2315 // Should the Matcher clone shifts on addressing modes, expecting them
2316 // to be subsumed into complex addressing expressions or compute them
2317 // into registers?
2318 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2319   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
2320     return true;
2321   }
2322 
2323   Node *off = m->in(AddPNode::Offset);
2324   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
2325       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
2326       // Are there other uses besides address expressions?
2327       !is_visited(off)) {
2328     address_visited.set(off->_idx); // Flag as address_visited
2329     mstack.push(off->in(2), Visit);
2330     Node *conv = off->in(1);
2331     if (conv->Opcode() == Op_ConvI2L &&
2332         // Are there other uses besides address expressions?
2333         !is_visited(conv)) {
2334       address_visited.set(conv->_idx); // Flag as address_visited
2335       mstack.push(conv->in(1), Pre_Visit);
2336     } else {
2337       mstack.push(conv, Pre_Visit);
2338     }
2339     address_visited.test_set(m->_idx); // Flag as address_visited
2340     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2341     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2342     return true;
2343   } else if (off->Opcode() == Op_ConvI2L &&
2344              // Are there other uses besides address expressions?
2345              !is_visited(off)) {
2346     address_visited.test_set(m->_idx); // Flag as address_visited
2347     address_visited.set(off->_idx); // Flag as address_visited
2348     mstack.push(off->in(1), Pre_Visit);
2349     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2350     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2351     return true;
2352   }
2353   return false;
2354 }
2355 
2356 void Compile::reshape_address(AddPNode* addp) {
2357 }
2358 
2359 // helper for encoding java_to_runtime calls on sim
2360 //
2361 // this is needed to compute the extra arguments required when
2362 // planting a call to the simulator blrt instruction. the TypeFunc
2363 // can be queried to identify the counts for integral, and floating
2364 // arguments and the return type
2365 
2366 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2367 {
2368   int gps = 0;
2369   int fps = 0;
2370   const TypeTuple *domain = tf->domain();
2371   int max = domain->cnt();
2372   for (int i = TypeFunc::Parms; i < max; i++) {
2373     const Type *t = domain->field_at(i);
2374     switch(t->basic_type()) {
2375     case T_FLOAT:
2376     case T_DOUBLE:
2377       fps++;
2378     default:
2379       gps++;
2380     }
2381   }
2382   gpcnt = gps;
2383   fpcnt = fps;
2384   BasicType rt = tf->return_type();
2385   switch (rt) {
2386   case T_VOID:
2387     rtype = MacroAssembler::ret_type_void;
2388     break;
2389   default:
2390     rtype = MacroAssembler::ret_type_integral;
2391     break;
2392   case T_FLOAT:
2393     rtype = MacroAssembler::ret_type_float;
2394     break;
2395   case T_DOUBLE:
2396     rtype = MacroAssembler::ret_type_double;
2397     break;
2398   }
2399 }
2400 
2401 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2402   MacroAssembler _masm(&cbuf);                                          \
2403   {                                                                     \
2404     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2405     guarantee(DISP == 0, "mode not permitted for volatile");            \
2406     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2407     __ INSN(REG, as_Register(BASE));                                    \
2408   }
2409 
2410 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2411 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2412 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2413                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2414 
2415   // Used for all non-volatile memory accesses.  The use of
2416   // $mem->opcode() to discover whether this pattern uses sign-extended
2417   // offsets is something of a kludge.
2418   static void loadStore(MacroAssembler masm, mem_insn insn,
2419                          Register reg, int opcode,
2420                          Register base, int index, int size, int disp)
2421   {
2422     Address::extend scale;
2423 
2424     // Hooboy, this is fugly.  We need a way to communicate to the
2425     // encoder that the index needs to be sign extended, so we have to
2426     // enumerate all the cases.
2427     switch (opcode) {
2428     case INDINDEXSCALEDI2L:
2429     case INDINDEXSCALEDI2LN:
2430     case INDINDEXI2L:
2431     case INDINDEXI2LN:
2432       scale = Address::sxtw(size);
2433       break;
2434     default:
2435       scale = Address::lsl(size);
2436     }
2437 
2438     if (index == -1) {
2439       (masm.*insn)(reg, Address(base, disp));
2440     } else {
2441       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2442       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2443     }
2444   }
2445 
2446   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2447                          FloatRegister reg, int opcode,
2448                          Register base, int index, int size, int disp)
2449   {
2450     Address::extend scale;
2451 
2452     switch (opcode) {
2453     case INDINDEXSCALEDI2L:
2454     case INDINDEXSCALEDI2LN:
2455       scale = Address::sxtw(size);
2456       break;
2457     default:
2458       scale = Address::lsl(size);
2459     }
2460 
2461      if (index == -1) {
2462       (masm.*insn)(reg, Address(base, disp));
2463     } else {
2464       assert(disp == 0, "unsupported address mode: disp = %d", disp);
2465       (masm.*insn)(reg, Address(base, as_Register(index), scale));
2466     }
2467   }
2468 
2469   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2470                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2471                          int opcode, Register base, int index, int size, int disp)
2472   {
2473     if (index == -1) {
2474       (masm.*insn)(reg, T, Address(base, disp));
2475     } else {
2476       assert(disp == 0, "unsupported address mode");
2477       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2478     }
2479   }
2480 
2481 %}
2482 
2483 
2484 
2485 //----------ENCODING BLOCK-----------------------------------------------------
2486 // This block specifies the encoding classes used by the compiler to
2487 // output byte streams.  Encoding classes are parameterized macros
2488 // used by Machine Instruction Nodes in order to generate the bit
2489 // encoding of the instruction.  Operands specify their base encoding
2490 // interface with the interface keyword.  There are currently
2491 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2492 // COND_INTER.  REG_INTER causes an operand to generate a function
2493 // which returns its register number when queried.  CONST_INTER causes
2494 // an operand to generate a function which returns the value of the
2495 // constant when queried.  MEMORY_INTER causes an operand to generate
2496 // four functions which return the Base Register, the Index Register,
2497 // the Scale Value, and the Offset Value of the operand when queried.
2498 // COND_INTER causes an operand to generate six functions which return
2499 // the encoding code (ie - encoding bits for the instruction)
2500 // associated with each basic boolean condition for a conditional
2501 // instruction.
2502 //
2503 // Instructions specify two basic values for encoding.  Again, a
2504 // function is available to check if the constant displacement is an
2505 // oop. They use the ins_encode keyword to specify their encoding
2506 // classes (which must be a sequence of enc_class names, and their
2507 // parameters, specified in the encoding block), and they use the
2508 // opcode keyword to specify, in order, their primary, secondary, and
2509 // tertiary opcode.  Only the opcode sections which a particular
2510 // instruction needs for encoding need to be specified.
2511 encode %{
2512   // Build emit functions for each basic byte or larger field in the
2513   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2514   // from C++ code in the enc_class source block.  Emit functions will
2515   // live in the main source block for now.  In future, we can
2516   // generalize this by adding a syntax that specifies the sizes of
2517   // fields in an order, so that the adlc can build the emit functions
2518   // automagically
2519 
2520   // catch all for unimplemented encodings
2521   enc_class enc_unimplemented %{
2522     MacroAssembler _masm(&cbuf);
2523     __ unimplemented("C2 catch all");
2524   %}
2525 
2526   // BEGIN Non-volatile memory access
2527 
2528   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2529     Register dst_reg = as_Register($dst$$reg);
2530     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2531                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2532   %}
2533 
2534   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2535     Register dst_reg = as_Register($dst$$reg);
2536     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2537                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2538   %}
2539 
2540   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2541     Register dst_reg = as_Register($dst$$reg);
2542     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2543                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2544   %}
2545 
2546   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2547     Register dst_reg = as_Register($dst$$reg);
2548     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2549                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2550   %}
2551 
2552   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2553     Register dst_reg = as_Register($dst$$reg);
2554     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2555                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2556   %}
2557 
2558   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2559     Register dst_reg = as_Register($dst$$reg);
2560     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2561                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2562   %}
2563 
2564   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2565     Register dst_reg = as_Register($dst$$reg);
2566     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2567                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2568   %}
2569 
2570   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2571     Register dst_reg = as_Register($dst$$reg);
2572     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2573                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2574   %}
2575 
2576   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2577     Register dst_reg = as_Register($dst$$reg);
2578     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2579                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2580   %}
2581 
2582   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2583     Register dst_reg = as_Register($dst$$reg);
2584     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2585                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2586   %}
2587 
2588   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2589     Register dst_reg = as_Register($dst$$reg);
2590     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2591                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2592   %}
2593 
2594   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2595     Register dst_reg = as_Register($dst$$reg);
2596     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2597                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2598   %}
2599 
2600   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2601     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2602     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2603                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2604   %}
2605 
2606   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2607     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2608     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2609                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2610   %}
2611 
2612   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2613     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2614     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2615        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2616   %}
2617 
2618   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2619     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2620     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2621        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2622   %}
2623 
2624   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2625     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2626     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2627        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2628   %}
2629 
2630   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2631     Register src_reg = as_Register($src$$reg);
2632     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2633                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2634   %}
2635 
2636   enc_class aarch64_enc_strb0(memory mem) %{
2637     MacroAssembler _masm(&cbuf);
2638     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2639                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2640   %}
2641 
2642   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2643     MacroAssembler _masm(&cbuf);
2644     __ membar(Assembler::StoreStore);
2645     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2646                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2647   %}
2648 
2649   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2650     Register src_reg = as_Register($src$$reg);
2651     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2652                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2653   %}
2654 
2655   enc_class aarch64_enc_strh0(memory mem) %{
2656     MacroAssembler _masm(&cbuf);
2657     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2658                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2659   %}
2660 
2661   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2662     Register src_reg = as_Register($src$$reg);
2663     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2664                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2665   %}
2666 
2667   enc_class aarch64_enc_strw0(memory mem) %{
2668     MacroAssembler _masm(&cbuf);
2669     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2670                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2671   %}
2672 
2673   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2674     Register src_reg = as_Register($src$$reg);
2675     // we sometimes get asked to store the stack pointer into the
2676     // current thread -- we cannot do that directly on AArch64
2677     if (src_reg == r31_sp) {
2678       MacroAssembler _masm(&cbuf);
2679       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2680       __ mov(rscratch2, sp);
2681       src_reg = rscratch2;
2682     }
2683     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2684                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2685   %}
2686 
2687   enc_class aarch64_enc_str0(memory mem) %{
2688     MacroAssembler _masm(&cbuf);
2689     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2690                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2691   %}
2692 
2693   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2694     FloatRegister src_reg = as_FloatRegister($src$$reg);
2695     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2696                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2697   %}
2698 
2699   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2700     FloatRegister src_reg = as_FloatRegister($src$$reg);
2701     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2702                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2703   %}
2704 
2705   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2706     FloatRegister src_reg = as_FloatRegister($src$$reg);
2707     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2708        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2709   %}
2710 
2711   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2712     FloatRegister src_reg = as_FloatRegister($src$$reg);
2713     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2714        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2715   %}
2716 
2717   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2718     FloatRegister src_reg = as_FloatRegister($src$$reg);
2719     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2720        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2721   %}
2722 
2723   // END Non-volatile memory access
2724 
2725   // volatile loads and stores
2726 
2727   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2728     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2729                  rscratch1, stlrb);
2730   %}
2731 
2732   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2733     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2734                  rscratch1, stlrh);
2735   %}
2736 
2737   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2738     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2739                  rscratch1, stlrw);
2740   %}
2741 
2742 
2743   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2744     Register dst_reg = as_Register($dst$$reg);
2745     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2746              rscratch1, ldarb);
2747     __ sxtbw(dst_reg, dst_reg);
2748   %}
2749 
2750   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2751     Register dst_reg = as_Register($dst$$reg);
2752     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2753              rscratch1, ldarb);
2754     __ sxtb(dst_reg, dst_reg);
2755   %}
2756 
2757   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2758     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2759              rscratch1, ldarb);
2760   %}
2761 
2762   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2763     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2764              rscratch1, ldarb);
2765   %}
2766 
2767   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2768     Register dst_reg = as_Register($dst$$reg);
2769     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2770              rscratch1, ldarh);
2771     __ sxthw(dst_reg, dst_reg);
2772   %}
2773 
2774   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2775     Register dst_reg = as_Register($dst$$reg);
2776     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2777              rscratch1, ldarh);
2778     __ sxth(dst_reg, dst_reg);
2779   %}
2780 
2781   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2782     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2783              rscratch1, ldarh);
2784   %}
2785 
2786   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2787     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2788              rscratch1, ldarh);
2789   %}
2790 
2791   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2792     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2793              rscratch1, ldarw);
2794   %}
2795 
2796   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2797     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2798              rscratch1, ldarw);
2799   %}
2800 
2801   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2802     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2803              rscratch1, ldar);
2804   %}
2805 
2806   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2807     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2808              rscratch1, ldarw);
2809     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2810   %}
2811 
2812   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2813     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2814              rscratch1, ldar);
2815     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2816   %}
2817 
2818   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2819     Register src_reg = as_Register($src$$reg);
2820     // we sometimes get asked to store the stack pointer into the
2821     // current thread -- we cannot do that directly on AArch64
2822     if (src_reg == r31_sp) {
2823         MacroAssembler _masm(&cbuf);
2824       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2825       __ mov(rscratch2, sp);
2826       src_reg = rscratch2;
2827     }
2828     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2829                  rscratch1, stlr);
2830   %}
2831 
2832   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2833     {
2834       MacroAssembler _masm(&cbuf);
2835       FloatRegister src_reg = as_FloatRegister($src$$reg);
2836       __ fmovs(rscratch2, src_reg);
2837     }
2838     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2839                  rscratch1, stlrw);
2840   %}
2841 
2842   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2843     {
2844       MacroAssembler _masm(&cbuf);
2845       FloatRegister src_reg = as_FloatRegister($src$$reg);
2846       __ fmovd(rscratch2, src_reg);
2847     }
2848     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2849                  rscratch1, stlr);
2850   %}
2851 
2852   // synchronized read/update encodings
2853 
2854   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2855     MacroAssembler _masm(&cbuf);
2856     Register dst_reg = as_Register($dst$$reg);
2857     Register base = as_Register($mem$$base);
2858     int index = $mem$$index;
2859     int scale = $mem$$scale;
2860     int disp = $mem$$disp;
2861     if (index == -1) {
2862        if (disp != 0) {
2863         __ lea(rscratch1, Address(base, disp));
2864         __ ldaxr(dst_reg, rscratch1);
2865       } else {
2866         // TODO
2867         // should we ever get anything other than this case?
2868         __ ldaxr(dst_reg, base);
2869       }
2870     } else {
2871       Register index_reg = as_Register(index);
2872       if (disp == 0) {
2873         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2874         __ ldaxr(dst_reg, rscratch1);
2875       } else {
2876         __ lea(rscratch1, Address(base, disp));
2877         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2878         __ ldaxr(dst_reg, rscratch1);
2879       }
2880     }
2881   %}
2882 
2883   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2884     MacroAssembler _masm(&cbuf);
2885     Register src_reg = as_Register($src$$reg);
2886     Register base = as_Register($mem$$base);
2887     int index = $mem$$index;
2888     int scale = $mem$$scale;
2889     int disp = $mem$$disp;
2890     if (index == -1) {
2891        if (disp != 0) {
2892         __ lea(rscratch2, Address(base, disp));
2893         __ stlxr(rscratch1, src_reg, rscratch2);
2894       } else {
2895         // TODO
2896         // should we ever get anything other than this case?
2897         __ stlxr(rscratch1, src_reg, base);
2898       }
2899     } else {
2900       Register index_reg = as_Register(index);
2901       if (disp == 0) {
2902         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
2903         __ stlxr(rscratch1, src_reg, rscratch2);
2904       } else {
2905         __ lea(rscratch2, Address(base, disp));
2906         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
2907         __ stlxr(rscratch1, src_reg, rscratch2);
2908       }
2909     }
2910     __ cmpw(rscratch1, zr);
2911   %}
2912 
2913   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2914     MacroAssembler _masm(&cbuf);
2915     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2916     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2917                Assembler::xword, /*acquire*/ false, /*release*/ true,
2918                /*weak*/ false, noreg);
2919   %}
2920 
2921   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2922     MacroAssembler _masm(&cbuf);
2923     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2924     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2925                Assembler::word, /*acquire*/ false, /*release*/ true,
2926                /*weak*/ false, noreg);
2927   %}
2928 
2929   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2930     MacroAssembler _masm(&cbuf);
2931     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2932     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2933                Assembler::halfword, /*acquire*/ false, /*release*/ true,
2934                /*weak*/ false, noreg);
2935   %}
2936 
2937   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2938     MacroAssembler _masm(&cbuf);
2939     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2940     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2941                Assembler::byte, /*acquire*/ false, /*release*/ true,
2942                /*weak*/ false, noreg);
2943   %}
2944 
2945 
2946   // The only difference between aarch64_enc_cmpxchg and
2947   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
2948   // CompareAndSwap sequence to serve as a barrier on acquiring a
2949   // lock.
2950   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2951     MacroAssembler _masm(&cbuf);
2952     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2953     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2954                Assembler::xword, /*acquire*/ true, /*release*/ true,
2955                /*weak*/ false, noreg);
2956   %}
2957 
2958   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2959     MacroAssembler _masm(&cbuf);
2960     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2961     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2962                Assembler::word, /*acquire*/ true, /*release*/ true,
2963                /*weak*/ false, noreg);
2964   %}
2965 
2966   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2967     MacroAssembler _masm(&cbuf);
2968     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2969     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2970                Assembler::halfword, /*acquire*/ true, /*release*/ true,
2971                /*weak*/ false, noreg);
2972   %}
2973 
2974   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2975     MacroAssembler _masm(&cbuf);
2976     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2977     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2978                Assembler::byte, /*acquire*/ true, /*release*/ true,
2979                /*weak*/ false, noreg);
2980   %}
2981 
2982   // auxiliary used for CompareAndSwapX to set result register
2983   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
2984     MacroAssembler _masm(&cbuf);
2985     Register res_reg = as_Register($res$$reg);
2986     __ cset(res_reg, Assembler::EQ);
2987   %}
2988 
2989   // prefetch encodings
2990 
2991   enc_class aarch64_enc_prefetchw(memory mem) %{
2992     MacroAssembler _masm(&cbuf);
2993     Register base = as_Register($mem$$base);
2994     int index = $mem$$index;
2995     int scale = $mem$$scale;
2996     int disp = $mem$$disp;
2997     if (index == -1) {
2998       __ prfm(Address(base, disp), PSTL1KEEP);
2999     } else {
3000       Register index_reg = as_Register(index);
3001       if (disp == 0) {
3002         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3003       } else {
3004         __ lea(rscratch1, Address(base, disp));
3005         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3006       }
3007     }
3008   %}
3009 
3010   /// mov envcodings
3011 
3012   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3013     MacroAssembler _masm(&cbuf);
3014     u_int32_t con = (u_int32_t)$src$$constant;
3015     Register dst_reg = as_Register($dst$$reg);
3016     if (con == 0) {
3017       __ movw(dst_reg, zr);
3018     } else {
3019       __ movw(dst_reg, con);
3020     }
3021   %}
3022 
3023   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3024     MacroAssembler _masm(&cbuf);
3025     Register dst_reg = as_Register($dst$$reg);
3026     u_int64_t con = (u_int64_t)$src$$constant;
3027     if (con == 0) {
3028       __ mov(dst_reg, zr);
3029     } else {
3030       __ mov(dst_reg, con);
3031     }
3032   %}
3033 
3034   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3035     MacroAssembler _masm(&cbuf);
3036     Register dst_reg = as_Register($dst$$reg);
3037     address con = (address)$src$$constant;
3038     if (con == NULL || con == (address)1) {
3039       ShouldNotReachHere();
3040     } else {
3041       relocInfo::relocType rtype = $src->constant_reloc();
3042       if (rtype == relocInfo::oop_type) {
3043         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3044       } else if (rtype == relocInfo::metadata_type) {
3045         __ mov_metadata(dst_reg, (Metadata*)con);
3046       } else {
3047         assert(rtype == relocInfo::none, "unexpected reloc type");
3048         if (con < (address)(uintptr_t)os::vm_page_size()) {
3049           __ mov(dst_reg, con);
3050         } else {
3051           unsigned long offset;
3052           __ adrp(dst_reg, con, offset);
3053           __ add(dst_reg, dst_reg, offset);
3054         }
3055       }
3056     }
3057   %}
3058 
3059   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3060     MacroAssembler _masm(&cbuf);
3061     Register dst_reg = as_Register($dst$$reg);
3062     __ mov(dst_reg, zr);
3063   %}
3064 
3065   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3066     MacroAssembler _masm(&cbuf);
3067     Register dst_reg = as_Register($dst$$reg);
3068     __ mov(dst_reg, (u_int64_t)1);
3069   %}
3070 
3071   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3072     MacroAssembler _masm(&cbuf);
3073     address page = (address)$src$$constant;
3074     Register dst_reg = as_Register($dst$$reg);
3075     unsigned long off;
3076     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3077     assert(off == 0, "assumed offset == 0");
3078   %}
3079 
3080   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3081     MacroAssembler _masm(&cbuf);
3082     __ load_byte_map_base($dst$$Register);
3083   %}
3084 
3085   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3086     MacroAssembler _masm(&cbuf);
3087     Register dst_reg = as_Register($dst$$reg);
3088     address con = (address)$src$$constant;
3089     if (con == NULL) {
3090       ShouldNotReachHere();
3091     } else {
3092       relocInfo::relocType rtype = $src->constant_reloc();
3093       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3094       __ set_narrow_oop(dst_reg, (jobject)con);
3095     }
3096   %}
3097 
3098   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3099     MacroAssembler _masm(&cbuf);
3100     Register dst_reg = as_Register($dst$$reg);
3101     __ mov(dst_reg, zr);
3102   %}
3103 
3104   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3105     MacroAssembler _masm(&cbuf);
3106     Register dst_reg = as_Register($dst$$reg);
3107     address con = (address)$src$$constant;
3108     if (con == NULL) {
3109       ShouldNotReachHere();
3110     } else {
3111       relocInfo::relocType rtype = $src->constant_reloc();
3112       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3113       __ set_narrow_klass(dst_reg, (Klass *)con);
3114     }
3115   %}
3116 
3117   // arithmetic encodings
3118 
3119   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3120     MacroAssembler _masm(&cbuf);
3121     Register dst_reg = as_Register($dst$$reg);
3122     Register src_reg = as_Register($src1$$reg);
3123     int32_t con = (int32_t)$src2$$constant;
3124     // add has primary == 0, subtract has primary == 1
3125     if ($primary) { con = -con; }
3126     if (con < 0) {
3127       __ subw(dst_reg, src_reg, -con);
3128     } else {
3129       __ addw(dst_reg, src_reg, con);
3130     }
3131   %}
3132 
3133   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3134     MacroAssembler _masm(&cbuf);
3135     Register dst_reg = as_Register($dst$$reg);
3136     Register src_reg = as_Register($src1$$reg);
3137     int32_t con = (int32_t)$src2$$constant;
3138     // add has primary == 0, subtract has primary == 1
3139     if ($primary) { con = -con; }
3140     if (con < 0) {
3141       __ sub(dst_reg, src_reg, -con);
3142     } else {
3143       __ add(dst_reg, src_reg, con);
3144     }
3145   %}
3146 
3147   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3148     MacroAssembler _masm(&cbuf);
3149    Register dst_reg = as_Register($dst$$reg);
3150    Register src1_reg = as_Register($src1$$reg);
3151    Register src2_reg = as_Register($src2$$reg);
3152     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3153   %}
3154 
3155   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3156     MacroAssembler _masm(&cbuf);
3157    Register dst_reg = as_Register($dst$$reg);
3158    Register src1_reg = as_Register($src1$$reg);
3159    Register src2_reg = as_Register($src2$$reg);
3160     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3161   %}
3162 
3163   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3164     MacroAssembler _masm(&cbuf);
3165    Register dst_reg = as_Register($dst$$reg);
3166    Register src1_reg = as_Register($src1$$reg);
3167    Register src2_reg = as_Register($src2$$reg);
3168     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3169   %}
3170 
3171   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3172     MacroAssembler _masm(&cbuf);
3173    Register dst_reg = as_Register($dst$$reg);
3174    Register src1_reg = as_Register($src1$$reg);
3175    Register src2_reg = as_Register($src2$$reg);
3176     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3177   %}
3178 
3179   // compare instruction encodings
3180 
3181   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3182     MacroAssembler _masm(&cbuf);
3183     Register reg1 = as_Register($src1$$reg);
3184     Register reg2 = as_Register($src2$$reg);
3185     __ cmpw(reg1, reg2);
3186   %}
3187 
3188   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3189     MacroAssembler _masm(&cbuf);
3190     Register reg = as_Register($src1$$reg);
3191     int32_t val = $src2$$constant;
3192     if (val >= 0) {
3193       __ subsw(zr, reg, val);
3194     } else {
3195       __ addsw(zr, reg, -val);
3196     }
3197   %}
3198 
3199   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3200     MacroAssembler _masm(&cbuf);
3201     Register reg1 = as_Register($src1$$reg);
3202     u_int32_t val = (u_int32_t)$src2$$constant;
3203     __ movw(rscratch1, val);
3204     __ cmpw(reg1, rscratch1);
3205   %}
3206 
3207   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3208     MacroAssembler _masm(&cbuf);
3209     Register reg1 = as_Register($src1$$reg);
3210     Register reg2 = as_Register($src2$$reg);
3211     __ cmp(reg1, reg2);
3212   %}
3213 
3214   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3215     MacroAssembler _masm(&cbuf);
3216     Register reg = as_Register($src1$$reg);
3217     int64_t val = $src2$$constant;
3218     if (val >= 0) {
3219       __ subs(zr, reg, val);
3220     } else if (val != -val) {
3221       __ adds(zr, reg, -val);
3222     } else {
3223     // aargh, Long.MIN_VALUE is a special case
3224       __ orr(rscratch1, zr, (u_int64_t)val);
3225       __ subs(zr, reg, rscratch1);
3226     }
3227   %}
3228 
3229   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3230     MacroAssembler _masm(&cbuf);
3231     Register reg1 = as_Register($src1$$reg);
3232     u_int64_t val = (u_int64_t)$src2$$constant;
3233     __ mov(rscratch1, val);
3234     __ cmp(reg1, rscratch1);
3235   %}
3236 
3237   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3238     MacroAssembler _masm(&cbuf);
3239     Register reg1 = as_Register($src1$$reg);
3240     Register reg2 = as_Register($src2$$reg);
3241     __ cmp(reg1, reg2);
3242   %}
3243 
3244   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3245     MacroAssembler _masm(&cbuf);
3246     Register reg1 = as_Register($src1$$reg);
3247     Register reg2 = as_Register($src2$$reg);
3248     __ cmpw(reg1, reg2);
3249   %}
3250 
3251   enc_class aarch64_enc_testp(iRegP src) %{
3252     MacroAssembler _masm(&cbuf);
3253     Register reg = as_Register($src$$reg);
3254     __ cmp(reg, zr);
3255   %}
3256 
3257   enc_class aarch64_enc_testn(iRegN src) %{
3258     MacroAssembler _masm(&cbuf);
3259     Register reg = as_Register($src$$reg);
3260     __ cmpw(reg, zr);
3261   %}
3262 
3263   enc_class aarch64_enc_b(label lbl) %{
3264     MacroAssembler _masm(&cbuf);
3265     Label *L = $lbl$$label;
3266     __ b(*L);
3267   %}
3268 
3269   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3270     MacroAssembler _masm(&cbuf);
3271     Label *L = $lbl$$label;
3272     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3273   %}
3274 
3275   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3276     MacroAssembler _masm(&cbuf);
3277     Label *L = $lbl$$label;
3278     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3279   %}
3280 
3281   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3282   %{
3283      Register sub_reg = as_Register($sub$$reg);
3284      Register super_reg = as_Register($super$$reg);
3285      Register temp_reg = as_Register($temp$$reg);
3286      Register result_reg = as_Register($result$$reg);
3287 
3288      Label miss;
3289      MacroAssembler _masm(&cbuf);
3290      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3291                                      NULL, &miss,
3292                                      /*set_cond_codes:*/ true);
3293      if ($primary) {
3294        __ mov(result_reg, zr);
3295      }
3296      __ bind(miss);
3297   %}
3298 
3299   enc_class aarch64_enc_java_static_call(method meth) %{
3300     MacroAssembler _masm(&cbuf);
3301 
3302     address addr = (address)$meth$$method;
3303     address call;
3304     if (!_method) {
3305       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3306       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3307     } else {
3308       int method_index = resolved_method_index(cbuf);
3309       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
3310                                                   : static_call_Relocation::spec(method_index);
3311       call = __ trampoline_call(Address(addr, rspec), &cbuf);
3312 
3313       // Emit stub for static call
3314       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
3315       if (stub == NULL) {
3316         ciEnv::current()->record_failure("CodeCache is full");
3317         return;
3318       }
3319     }
3320     if (call == NULL) {
3321       ciEnv::current()->record_failure("CodeCache is full");
3322       return;
3323     }
3324   %}
3325 
3326   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3327     MacroAssembler _masm(&cbuf);
3328     int method_index = resolved_method_index(cbuf);
3329     address call = __ ic_call((address)$meth$$method, method_index);
3330     if (call == NULL) {
3331       ciEnv::current()->record_failure("CodeCache is full");
3332       return;
3333     }
3334   %}
3335 
3336   enc_class aarch64_enc_call_epilog() %{
3337     MacroAssembler _masm(&cbuf);
3338     if (VerifyStackAtCalls) {
3339       // Check that stack depth is unchanged: find majik cookie on stack
3340       __ call_Unimplemented();
3341     }
3342   %}
3343 
3344   enc_class aarch64_enc_java_to_runtime(method meth) %{
3345     MacroAssembler _masm(&cbuf);
3346 
3347     // some calls to generated routines (arraycopy code) are scheduled
3348     // by C2 as runtime calls. if so we can call them using a br (they
3349     // will be in a reachable segment) otherwise we have to use a blrt
3350     // which loads the absolute address into a register.
3351     address entry = (address)$meth$$method;
3352     CodeBlob *cb = CodeCache::find_blob(entry);
3353     if (cb) {
3354       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3355       if (call == NULL) {
3356         ciEnv::current()->record_failure("CodeCache is full");
3357         return;
3358       }
3359     } else {
3360       int gpcnt;
3361       int fpcnt;
3362       int rtype;
3363       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3364       Label retaddr;
3365       __ adr(rscratch2, retaddr);
3366       __ lea(rscratch1, RuntimeAddress(entry));
3367       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3368       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3369       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3370       __ bind(retaddr);
3371       __ add(sp, sp, 2 * wordSize);
3372     }
3373   %}
3374 
3375   enc_class aarch64_enc_rethrow() %{
3376     MacroAssembler _masm(&cbuf);
3377     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3378   %}
3379 
3380   enc_class aarch64_enc_ret() %{
3381     MacroAssembler _masm(&cbuf);
3382     __ ret(lr);
3383   %}
3384 
3385   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3386     MacroAssembler _masm(&cbuf);
3387     Register target_reg = as_Register($jump_target$$reg);
3388     __ br(target_reg);
3389   %}
3390 
3391   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3392     MacroAssembler _masm(&cbuf);
3393     Register target_reg = as_Register($jump_target$$reg);
3394     // exception oop should be in r0
3395     // ret addr has been popped into lr
3396     // callee expects it in r3
3397     __ mov(r3, lr);
3398     __ br(target_reg);
3399   %}
3400 
3401   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3402     MacroAssembler _masm(&cbuf);
3403     Register oop = as_Register($object$$reg);
3404     Register box = as_Register($box$$reg);
3405     Register disp_hdr = as_Register($tmp$$reg);
3406     Register tmp = as_Register($tmp2$$reg);
3407     Label cont;
3408     Label object_has_monitor;
3409     Label cas_failed;
3410 
3411     assert_different_registers(oop, box, tmp, disp_hdr);
3412 
3413     // Load markOop from object into displaced_header.
3414     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3415 
3416     if (UseBiasedLocking && !UseOptoBiasInlining) {
3417       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3418     }
3419 
3420     // Handle existing monitor
3421     // we can use AArch64's bit test and branch here but
3422     // markoopDesc does not define a bit index just the bit value
3423     // so assert in case the bit pos changes
3424 #   define __monitor_value_log2 1
3425     assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
3426     __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
3427 #   undef __monitor_value_log2
3428 
3429     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
3430     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
3431 
3432     // Load Compare Value application register.
3433 
3434     // Initialize the box. (Must happen before we update the object mark!)
3435     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3436 
3437     // Compare object markOop with mark and if equal exchange scratch1
3438     // with object markOop.
3439     if (UseLSE) {
3440       __ mov(tmp, disp_hdr);
3441       __ casal(Assembler::xword, tmp, box, oop);
3442       __ cmp(tmp, disp_hdr);
3443       __ br(Assembler::EQ, cont);
3444     } else {
3445       Label retry_load;
3446       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3447         __ prfm(Address(oop), PSTL1STRM);
3448       __ bind(retry_load);
3449       __ ldaxr(tmp, oop);
3450       __ cmp(tmp, disp_hdr);
3451       __ br(Assembler::NE, cas_failed);
3452       // use stlxr to ensure update is immediately visible
3453       __ stlxr(tmp, box, oop);
3454       __ cbzw(tmp, cont);
3455       __ b(retry_load);
3456     }
3457 
3458     // Formerly:
3459     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3460     //               /*newv=*/box,
3461     //               /*addr=*/oop,
3462     //               /*tmp=*/tmp,
3463     //               cont,
3464     //               /*fail*/NULL);
3465 
3466     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3467 
3468     // If the compare-and-exchange succeeded, then we found an unlocked
3469     // object, will have now locked it will continue at label cont
3470 
3471     __ bind(cas_failed);
3472     // We did not see an unlocked object so try the fast recursive case.
3473 
3474     // Check if the owner is self by comparing the value in the
3475     // markOop of object (disp_hdr) with the stack pointer.
3476     __ mov(rscratch1, sp);
3477     __ sub(disp_hdr, disp_hdr, rscratch1);
3478     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3479     // If condition is true we are cont and hence we can store 0 as the
3480     // displaced header in the box, which indicates that it is a recursive lock.
3481     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3482     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3483 
3484     // Handle existing monitor.
3485     __ b(cont);
3486 
3487     __ bind(object_has_monitor);
3488     // The object's monitor m is unlocked iff m->owner == NULL,
3489     // otherwise m->owner may contain a thread or a stack address.
3490     //
3491     // Try to CAS m->owner from NULL to current thread.
3492     __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3493     __ mov(disp_hdr, zr);
3494 
3495     if (UseLSE) {
3496       __ mov(rscratch1, disp_hdr);
3497       __ casal(Assembler::xword, rscratch1, rthread, tmp);
3498       __ cmp(rscratch1, disp_hdr);
3499     } else {
3500       Label retry_load, fail;
3501       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) {
3502         __ prfm(Address(tmp), PSTL1STRM);
3503       }
3504       __ bind(retry_load);
3505       __ ldaxr(rscratch1, tmp);
3506       __ cmp(disp_hdr, rscratch1);
3507       __ br(Assembler::NE, fail);
3508       // use stlxr to ensure update is immediately visible
3509       __ stlxr(rscratch1, rthread, tmp);
3510       __ cbnzw(rscratch1, retry_load);
3511       __ bind(fail);
3512     }
3513 
3514     // Label next;
3515     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3516     //               /*newv=*/rthread,
3517     //               /*addr=*/tmp,
3518     //               /*tmp=*/rscratch1,
3519     //               /*succeed*/next,
3520     //               /*fail*/NULL);
3521     // __ bind(next);
3522 
3523     // store a non-null value into the box.
3524     __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3525 
3526     // PPC port checks the following invariants
3527     // #ifdef ASSERT
3528     // bne(flag, cont);
3529     // We have acquired the monitor, check some invariants.
3530     // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
3531     // Invariant 1: _recursions should be 0.
3532     // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
3533     // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
3534     //                        "monitor->_recursions should be 0", -1);
3535     // Invariant 2: OwnerIsThread shouldn't be 0.
3536     // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
3537     //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
3538     //                           "monitor->OwnerIsThread shouldn't be 0", -1);
3539     // #endif
3540 
3541     __ bind(cont);
3542     // flag == EQ indicates success
3543     // flag == NE indicates failure
3544 
3545   %}
3546 
3547   // TODO
3548   // reimplement this with custom cmpxchgptr code
3549   // which avoids some of the unnecessary branching
3550   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3551     MacroAssembler _masm(&cbuf);
3552     Register oop = as_Register($object$$reg);
3553     Register box = as_Register($box$$reg);
3554     Register disp_hdr = as_Register($tmp$$reg);
3555     Register tmp = as_Register($tmp2$$reg);
3556     Label cont;
3557     Label object_has_monitor;
3558     Label cas_failed;
3559 
3560     assert_different_registers(oop, box, tmp, disp_hdr);
3561 
3562     if (UseBiasedLocking && !UseOptoBiasInlining) {
3563       __ biased_locking_exit(oop, tmp, cont);
3564     }
3565 
3566     // Find the lock address and load the displaced header from the stack.
3567     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3568 
3569     // If the displaced header is 0, we have a recursive unlock.
3570     __ cmp(disp_hdr, zr);
3571     __ br(Assembler::EQ, cont);
3572 
3573 
3574     // Handle existing monitor.
3575     __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3576     __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3577 
3578     // Check if it is still a light weight lock, this is is true if we
3579     // see the stack address of the basicLock in the markOop of the
3580     // object.
3581 
3582       if (UseLSE) {
3583         __ mov(tmp, box);
3584         __ casl(Assembler::xword, tmp, disp_hdr, oop);
3585         __ cmp(tmp, box);
3586       } else {
3587         Label retry_load;
3588         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
3589           __ prfm(Address(oop), PSTL1STRM);
3590         __ bind(retry_load);
3591         __ ldxr(tmp, oop);
3592         __ cmp(box, tmp);
3593         __ br(Assembler::NE, cas_failed);
3594         // use stlxr to ensure update is immediately visible
3595         __ stlxr(tmp, disp_hdr, oop);
3596         __ cbzw(tmp, cont);
3597         __ b(retry_load);
3598       }
3599 
3600     // __ cmpxchgptr(/*compare_value=*/box,
3601     //               /*exchange_value=*/disp_hdr,
3602     //               /*where=*/oop,
3603     //               /*result=*/tmp,
3604     //               cont,
3605     //               /*cas_failed*/NULL);
3606     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3607 
3608     __ bind(cas_failed);
3609 
3610     // Handle existing monitor.
3611     __ b(cont);
3612 
3613     __ bind(object_has_monitor);
3614     __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3615     __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3616     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3617     __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3618     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3619     __ cmp(rscratch1, zr);
3620     __ br(Assembler::NE, cont);
3621 
3622     __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3623     __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3624     __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3625     __ cmp(rscratch1, zr);
3626     __ cbnz(rscratch1, cont);
3627     // need a release store here
3628     __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3629     __ stlr(rscratch1, tmp); // rscratch1 is zero
3630 
3631     __ bind(cont);
3632     // flag == EQ indicates success
3633     // flag == NE indicates failure
3634   %}
3635 
3636 %}
3637 
3638 //----------FRAME--------------------------------------------------------------
3639 // Definition of frame structure and management information.
3640 //
3641 //  S T A C K   L A Y O U T    Allocators stack-slot number
3642 //                             |   (to get allocators register number
3643 //  G  Owned by    |        |  v    add OptoReg::stack0())
3644 //  r   CALLER     |        |
3645 //  o     |        +--------+      pad to even-align allocators stack-slot
3646 //  w     V        |  pad0  |        numbers; owned by CALLER
3647 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3648 //  h     ^        |   in   |  5
3649 //        |        |  args  |  4   Holes in incoming args owned by SELF
3650 //  |     |        |        |  3
3651 //  |     |        +--------+
3652 //  V     |        | old out|      Empty on Intel, window on Sparc
3653 //        |    old |preserve|      Must be even aligned.
3654 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3655 //        |        |   in   |  3   area for Intel ret address
3656 //     Owned by    |preserve|      Empty on Sparc.
3657 //       SELF      +--------+
3658 //        |        |  pad2  |  2   pad to align old SP
3659 //        |        +--------+  1
3660 //        |        | locks  |  0
3661 //        |        +--------+----> OptoReg::stack0(), even aligned
3662 //        |        |  pad1  | 11   pad to align new SP
3663 //        |        +--------+
3664 //        |        |        | 10
3665 //        |        | spills |  9   spills
3666 //        V        |        |  8   (pad0 slot for callee)
3667 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3668 //        ^        |  out   |  7
3669 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3670 //     Owned by    +--------+
3671 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3672 //        |    new |preserve|      Must be even-aligned.
3673 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3674 //        |        |        |
3675 //
3676 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3677 //         known from SELF's arguments and the Java calling convention.
3678 //         Region 6-7 is determined per call site.
3679 // Note 2: If the calling convention leaves holes in the incoming argument
3680 //         area, those holes are owned by SELF.  Holes in the outgoing area
3681 //         are owned by the CALLEE.  Holes should not be nessecary in the
3682 //         incoming area, as the Java calling convention is completely under
3683 //         the control of the AD file.  Doubles can be sorted and packed to
3684 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3685 //         varargs C calling conventions.
3686 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3687 //         even aligned with pad0 as needed.
3688 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3689 //           (the latter is true on Intel but is it false on AArch64?)
3690 //         region 6-11 is even aligned; it may be padded out more so that
3691 //         the region from SP to FP meets the minimum stack alignment.
3692 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3693 //         alignment.  Region 11, pad1, may be dynamically extended so that
3694 //         SP meets the minimum alignment.
3695 
3696 frame %{
3697   // What direction does stack grow in (assumed to be same for C & Java)
3698   stack_direction(TOWARDS_LOW);
3699 
3700   // These three registers define part of the calling convention
3701   // between compiled code and the interpreter.
3702 
3703   // Inline Cache Register or methodOop for I2C.
3704   inline_cache_reg(R12);
3705 
3706   // Method Oop Register when calling interpreter.
3707   interpreter_method_oop_reg(R12);
3708 
3709   // Number of stack slots consumed by locking an object
3710   sync_stack_slots(2);
3711 
3712   // Compiled code's Frame Pointer
3713   frame_pointer(R31);
3714 
3715   // Interpreter stores its frame pointer in a register which is
3716   // stored to the stack by I2CAdaptors.
3717   // I2CAdaptors convert from interpreted java to compiled java.
3718   interpreter_frame_pointer(R29);
3719 
3720   // Stack alignment requirement
3721   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3722 
3723   // Number of stack slots between incoming argument block and the start of
3724   // a new frame.  The PROLOG must add this many slots to the stack.  The
3725   // EPILOG must remove this many slots. aarch64 needs two slots for
3726   // return address and fp.
3727   // TODO think this is correct but check
3728   in_preserve_stack_slots(4);
3729 
3730   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3731   // for calls to C.  Supports the var-args backing area for register parms.
3732   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3733 
3734   // The after-PROLOG location of the return address.  Location of
3735   // return address specifies a type (REG or STACK) and a number
3736   // representing the register number (i.e. - use a register name) or
3737   // stack slot.
3738   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3739   // Otherwise, it is above the locks and verification slot and alignment word
3740   // TODO this may well be correct but need to check why that - 2 is there
3741   // ppc port uses 0 but we definitely need to allow for fixed_slots
3742   // which folds in the space used for monitors
3743   return_addr(STACK - 2 +
3744               align_up((Compile::current()->in_preserve_stack_slots() +
3745                         Compile::current()->fixed_slots()),
3746                        stack_alignment_in_slots()));
3747 
3748   // Body of function which returns an integer array locating
3749   // arguments either in registers or in stack slots.  Passed an array
3750   // of ideal registers called "sig" and a "length" count.  Stack-slot
3751   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3752   // arguments for a CALLEE.  Incoming stack arguments are
3753   // automatically biased by the preserve_stack_slots field above.
3754 
3755   calling_convention
3756   %{
3757     // No difference between ingoing/outgoing just pass false
3758     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3759   %}
3760 
3761   c_calling_convention
3762   %{
3763     // This is obviously always outgoing
3764     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3765   %}
3766 
3767   // Location of compiled Java return values.  Same as C for now.
3768   return_value
3769   %{
3770     // TODO do we allow ideal_reg == Op_RegN???
3771     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3772            "only return normal values");
3773 
3774     static const int lo[Op_RegL + 1] = { // enum name
3775       0,                                 // Op_Node
3776       0,                                 // Op_Set
3777       R0_num,                            // Op_RegN
3778       R0_num,                            // Op_RegI
3779       R0_num,                            // Op_RegP
3780       V0_num,                            // Op_RegF
3781       V0_num,                            // Op_RegD
3782       R0_num                             // Op_RegL
3783     };
3784 
3785     static const int hi[Op_RegL + 1] = { // enum name
3786       0,                                 // Op_Node
3787       0,                                 // Op_Set
3788       OptoReg::Bad,                       // Op_RegN
3789       OptoReg::Bad,                      // Op_RegI
3790       R0_H_num,                          // Op_RegP
3791       OptoReg::Bad,                      // Op_RegF
3792       V0_H_num,                          // Op_RegD
3793       R0_H_num                           // Op_RegL
3794     };
3795 
3796     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3797   %}
3798 %}
3799 
3800 //----------ATTRIBUTES---------------------------------------------------------
3801 //----------Operand Attributes-------------------------------------------------
3802 op_attrib op_cost(1);        // Required cost attribute
3803 
3804 //----------Instruction Attributes---------------------------------------------
3805 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3806 ins_attrib ins_size(32);        // Required size attribute (in bits)
3807 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3808                                 // a non-matching short branch variant
3809                                 // of some long branch?
3810 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3811                                 // be a power of 2) specifies the
3812                                 // alignment that some part of the
3813                                 // instruction (not necessarily the
3814                                 // start) requires.  If > 1, a
3815                                 // compute_padding() function must be
3816                                 // provided for the instruction
3817 
3818 //----------OPERANDS-----------------------------------------------------------
3819 // Operand definitions must precede instruction definitions for correct parsing
3820 // in the ADLC because operands constitute user defined types which are used in
3821 // instruction definitions.
3822 
3823 //----------Simple Operands----------------------------------------------------
3824 
3825 // Integer operands 32 bit
3826 // 32 bit immediate
3827 operand immI()
3828 %{
3829   match(ConI);
3830 
3831   op_cost(0);
3832   format %{ %}
3833   interface(CONST_INTER);
3834 %}
3835 
3836 // 32 bit zero
3837 operand immI0()
3838 %{
3839   predicate(n->get_int() == 0);
3840   match(ConI);
3841 
3842   op_cost(0);
3843   format %{ %}
3844   interface(CONST_INTER);
3845 %}
3846 
3847 // 32 bit unit increment
3848 operand immI_1()
3849 %{
3850   predicate(n->get_int() == 1);
3851   match(ConI);
3852 
3853   op_cost(0);
3854   format %{ %}
3855   interface(CONST_INTER);
3856 %}
3857 
3858 // 32 bit unit decrement
3859 operand immI_M1()
3860 %{
3861   predicate(n->get_int() == -1);
3862   match(ConI);
3863 
3864   op_cost(0);
3865   format %{ %}
3866   interface(CONST_INTER);
3867 %}
3868 
3869 // Shift values for add/sub extension shift
3870 operand immIExt()
3871 %{
3872   predicate(0 <= n->get_int() && (n->get_int() <= 4));
3873   match(ConI);
3874 
3875   op_cost(0);
3876   format %{ %}
3877   interface(CONST_INTER);
3878 %}
3879 
3880 operand immI_le_4()
3881 %{
3882   predicate(n->get_int() <= 4);
3883   match(ConI);
3884 
3885   op_cost(0);
3886   format %{ %}
3887   interface(CONST_INTER);
3888 %}
3889 
3890 operand immI_31()
3891 %{
3892   predicate(n->get_int() == 31);
3893   match(ConI);
3894 
3895   op_cost(0);
3896   format %{ %}
3897   interface(CONST_INTER);
3898 %}
3899 
3900 operand immI_8()
3901 %{
3902   predicate(n->get_int() == 8);
3903   match(ConI);
3904 
3905   op_cost(0);
3906   format %{ %}
3907   interface(CONST_INTER);
3908 %}
3909 
3910 operand immI_16()
3911 %{
3912   predicate(n->get_int() == 16);
3913   match(ConI);
3914 
3915   op_cost(0);
3916   format %{ %}
3917   interface(CONST_INTER);
3918 %}
3919 
3920 operand immI_24()
3921 %{
3922   predicate(n->get_int() == 24);
3923   match(ConI);
3924 
3925   op_cost(0);
3926   format %{ %}
3927   interface(CONST_INTER);
3928 %}
3929 
3930 operand immI_32()
3931 %{
3932   predicate(n->get_int() == 32);
3933   match(ConI);
3934 
3935   op_cost(0);
3936   format %{ %}
3937   interface(CONST_INTER);
3938 %}
3939 
3940 operand immI_48()
3941 %{
3942   predicate(n->get_int() == 48);
3943   match(ConI);
3944 
3945   op_cost(0);
3946   format %{ %}
3947   interface(CONST_INTER);
3948 %}
3949 
3950 operand immI_56()
3951 %{
3952   predicate(n->get_int() == 56);
3953   match(ConI);
3954 
3955   op_cost(0);
3956   format %{ %}
3957   interface(CONST_INTER);
3958 %}
3959 
3960 operand immI_63()
3961 %{
3962   predicate(n->get_int() == 63);
3963   match(ConI);
3964 
3965   op_cost(0);
3966   format %{ %}
3967   interface(CONST_INTER);
3968 %}
3969 
3970 operand immI_64()
3971 %{
3972   predicate(n->get_int() == 64);
3973   match(ConI);
3974 
3975   op_cost(0);
3976   format %{ %}
3977   interface(CONST_INTER);
3978 %}
3979 
3980 operand immI_255()
3981 %{
3982   predicate(n->get_int() == 255);
3983   match(ConI);
3984 
3985   op_cost(0);
3986   format %{ %}
3987   interface(CONST_INTER);
3988 %}
3989 
3990 operand immI_65535()
3991 %{
3992   predicate(n->get_int() == 65535);
3993   match(ConI);
3994 
3995   op_cost(0);
3996   format %{ %}
3997   interface(CONST_INTER);
3998 %}
3999 
4000 operand immL_255()
4001 %{
4002   predicate(n->get_long() == 255L);
4003   match(ConL);
4004 
4005   op_cost(0);
4006   format %{ %}
4007   interface(CONST_INTER);
4008 %}
4009 
4010 operand immL_65535()
4011 %{
4012   predicate(n->get_long() == 65535L);
4013   match(ConL);
4014 
4015   op_cost(0);
4016   format %{ %}
4017   interface(CONST_INTER);
4018 %}
4019 
4020 operand immL_4294967295()
4021 %{
4022   predicate(n->get_long() == 4294967295L);
4023   match(ConL);
4024 
4025   op_cost(0);
4026   format %{ %}
4027   interface(CONST_INTER);
4028 %}
4029 
4030 operand immL_bitmask()
4031 %{
4032   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4033             && is_power_of_2(n->get_long() + 1));
4034   match(ConL);
4035 
4036   op_cost(0);
4037   format %{ %}
4038   interface(CONST_INTER);
4039 %}
4040 
4041 operand immI_bitmask()
4042 %{
4043   predicate(((n->get_int() & 0xc0000000) == 0)
4044             && is_power_of_2(n->get_int() + 1));
4045   match(ConI);
4046 
4047   op_cost(0);
4048   format %{ %}
4049   interface(CONST_INTER);
4050 %}
4051 
4052 // Scale values for scaled offset addressing modes (up to long but not quad)
4053 operand immIScale()
4054 %{
4055   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4056   match(ConI);
4057 
4058   op_cost(0);
4059   format %{ %}
4060   interface(CONST_INTER);
4061 %}
4062 
4063 // 26 bit signed offset -- for pc-relative branches
4064 operand immI26()
4065 %{
4066   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4067   match(ConI);
4068 
4069   op_cost(0);
4070   format %{ %}
4071   interface(CONST_INTER);
4072 %}
4073 
4074 // 19 bit signed offset -- for pc-relative loads
4075 operand immI19()
4076 %{
4077   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4078   match(ConI);
4079 
4080   op_cost(0);
4081   format %{ %}
4082   interface(CONST_INTER);
4083 %}
4084 
4085 // 12 bit unsigned offset -- for base plus immediate loads
4086 operand immIU12()
4087 %{
4088   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4089   match(ConI);
4090 
4091   op_cost(0);
4092   format %{ %}
4093   interface(CONST_INTER);
4094 %}
4095 
4096 operand immLU12()
4097 %{
4098   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4099   match(ConL);
4100 
4101   op_cost(0);
4102   format %{ %}
4103   interface(CONST_INTER);
4104 %}
4105 
4106 // Offset for scaled or unscaled immediate loads and stores
4107 operand immIOffset()
4108 %{
4109   predicate(Address::offset_ok_for_immed(n->get_int()));
4110   match(ConI);
4111 
4112   op_cost(0);
4113   format %{ %}
4114   interface(CONST_INTER);
4115 %}
4116 
4117 operand immIOffset4()
4118 %{
4119   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
4120   match(ConI);
4121 
4122   op_cost(0);
4123   format %{ %}
4124   interface(CONST_INTER);
4125 %}
4126 
4127 operand immIOffset8()
4128 %{
4129   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
4130   match(ConI);
4131 
4132   op_cost(0);
4133   format %{ %}
4134   interface(CONST_INTER);
4135 %}
4136 
4137 operand immIOffset16()
4138 %{
4139   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
4140   match(ConI);
4141 
4142   op_cost(0);
4143   format %{ %}
4144   interface(CONST_INTER);
4145 %}
4146 
4147 operand immLoffset()
4148 %{
4149   predicate(Address::offset_ok_for_immed(n->get_long()));
4150   match(ConL);
4151 
4152   op_cost(0);
4153   format %{ %}
4154   interface(CONST_INTER);
4155 %}
4156 
4157 operand immLoffset4()
4158 %{
4159   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4160   match(ConL);
4161 
4162   op_cost(0);
4163   format %{ %}
4164   interface(CONST_INTER);
4165 %}
4166 
4167 operand immLoffset8()
4168 %{
4169   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4170   match(ConL);
4171 
4172   op_cost(0);
4173   format %{ %}
4174   interface(CONST_INTER);
4175 %}
4176 
4177 operand immLoffset16()
4178 %{
4179   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4180   match(ConL);
4181 
4182   op_cost(0);
4183   format %{ %}
4184   interface(CONST_INTER);
4185 %}
4186 
4187 // 32 bit integer valid for add sub immediate
4188 operand immIAddSub()
4189 %{
4190   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4191   match(ConI);
4192   op_cost(0);
4193   format %{ %}
4194   interface(CONST_INTER);
4195 %}
4196 
4197 // 32 bit unsigned integer valid for logical immediate
4198 // TODO -- check this is right when e.g the mask is 0x80000000
4199 operand immILog()
4200 %{
4201   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4202   match(ConI);
4203 
4204   op_cost(0);
4205   format %{ %}
4206   interface(CONST_INTER);
4207 %}
4208 
4209 // Integer operands 64 bit
4210 // 64 bit immediate
4211 operand immL()
4212 %{
4213   match(ConL);
4214 
4215   op_cost(0);
4216   format %{ %}
4217   interface(CONST_INTER);
4218 %}
4219 
4220 // 64 bit zero
4221 operand immL0()
4222 %{
4223   predicate(n->get_long() == 0);
4224   match(ConL);
4225 
4226   op_cost(0);
4227   format %{ %}
4228   interface(CONST_INTER);
4229 %}
4230 
4231 // 64 bit unit increment
4232 operand immL_1()
4233 %{
4234   predicate(n->get_long() == 1);
4235   match(ConL);
4236 
4237   op_cost(0);
4238   format %{ %}
4239   interface(CONST_INTER);
4240 %}
4241 
4242 // 64 bit unit decrement
4243 operand immL_M1()
4244 %{
4245   predicate(n->get_long() == -1);
4246   match(ConL);
4247 
4248   op_cost(0);
4249   format %{ %}
4250   interface(CONST_INTER);
4251 %}
4252 
4253 // 32 bit offset of pc in thread anchor
4254 
4255 operand immL_pc_off()
4256 %{
4257   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4258                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4259   match(ConL);
4260 
4261   op_cost(0);
4262   format %{ %}
4263   interface(CONST_INTER);
4264 %}
4265 
4266 // 64 bit integer valid for add sub immediate
4267 operand immLAddSub()
4268 %{
4269   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4270   match(ConL);
4271   op_cost(0);
4272   format %{ %}
4273   interface(CONST_INTER);
4274 %}
4275 
4276 // 64 bit integer valid for logical immediate
4277 operand immLLog()
4278 %{
4279   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4280   match(ConL);
4281   op_cost(0);
4282   format %{ %}
4283   interface(CONST_INTER);
4284 %}
4285 
4286 // Long Immediate: low 32-bit mask
4287 operand immL_32bits()
4288 %{
4289   predicate(n->get_long() == 0xFFFFFFFFL);
4290   match(ConL);
4291   op_cost(0);
4292   format %{ %}
4293   interface(CONST_INTER);
4294 %}
4295 
4296 // Pointer operands
4297 // Pointer Immediate
4298 operand immP()
4299 %{
4300   match(ConP);
4301 
4302   op_cost(0);
4303   format %{ %}
4304   interface(CONST_INTER);
4305 %}
4306 
4307 // NULL Pointer Immediate
4308 operand immP0()
4309 %{
4310   predicate(n->get_ptr() == 0);
4311   match(ConP);
4312 
4313   op_cost(0);
4314   format %{ %}
4315   interface(CONST_INTER);
4316 %}
4317 
4318 // Pointer Immediate One
4319 // this is used in object initialization (initial object header)
4320 operand immP_1()
4321 %{
4322   predicate(n->get_ptr() == 1);
4323   match(ConP);
4324 
4325   op_cost(0);
4326   format %{ %}
4327   interface(CONST_INTER);
4328 %}
4329 
4330 // Polling Page Pointer Immediate
4331 operand immPollPage()
4332 %{
4333   predicate((address)n->get_ptr() == os::get_polling_page());
4334   match(ConP);
4335 
4336   op_cost(0);
4337   format %{ %}
4338   interface(CONST_INTER);
4339 %}
4340 
4341 // Card Table Byte Map Base
4342 operand immByteMapBase()
4343 %{
4344   // Get base of card map
4345   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
4346             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
4347   match(ConP);
4348 
4349   op_cost(0);
4350   format %{ %}
4351   interface(CONST_INTER);
4352 %}
4353 
4354 // Pointer Immediate Minus One
4355 // this is used when we want to write the current PC to the thread anchor
4356 operand immP_M1()
4357 %{
4358   predicate(n->get_ptr() == -1);
4359   match(ConP);
4360 
4361   op_cost(0);
4362   format %{ %}
4363   interface(CONST_INTER);
4364 %}
4365 
4366 // Pointer Immediate Minus Two
4367 // this is used when we want to write the current PC to the thread anchor
4368 operand immP_M2()
4369 %{
4370   predicate(n->get_ptr() == -2);
4371   match(ConP);
4372 
4373   op_cost(0);
4374   format %{ %}
4375   interface(CONST_INTER);
4376 %}
4377 
4378 // Float and Double operands
4379 // Double Immediate
4380 operand immD()
4381 %{
4382   match(ConD);
4383   op_cost(0);
4384   format %{ %}
4385   interface(CONST_INTER);
4386 %}
4387 
4388 // Double Immediate: +0.0d
4389 operand immD0()
4390 %{
4391   predicate(jlong_cast(n->getd()) == 0);
4392   match(ConD);
4393 
4394   op_cost(0);
4395   format %{ %}
4396   interface(CONST_INTER);
4397 %}
4398 
4399 // constant 'double +0.0'.
4400 operand immDPacked()
4401 %{
4402   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4403   match(ConD);
4404   op_cost(0);
4405   format %{ %}
4406   interface(CONST_INTER);
4407 %}
4408 
4409 // Float Immediate
4410 operand immF()
4411 %{
4412   match(ConF);
4413   op_cost(0);
4414   format %{ %}
4415   interface(CONST_INTER);
4416 %}
4417 
4418 // Float Immediate: +0.0f.
4419 operand immF0()
4420 %{
4421   predicate(jint_cast(n->getf()) == 0);
4422   match(ConF);
4423 
4424   op_cost(0);
4425   format %{ %}
4426   interface(CONST_INTER);
4427 %}
4428 
4429 //
4430 operand immFPacked()
4431 %{
4432   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4433   match(ConF);
4434   op_cost(0);
4435   format %{ %}
4436   interface(CONST_INTER);
4437 %}
4438 
4439 // Narrow pointer operands
4440 // Narrow Pointer Immediate
4441 operand immN()
4442 %{
4443   match(ConN);
4444 
4445   op_cost(0);
4446   format %{ %}
4447   interface(CONST_INTER);
4448 %}
4449 
4450 // Narrow NULL Pointer Immediate
4451 operand immN0()
4452 %{
4453   predicate(n->get_narrowcon() == 0);
4454   match(ConN);
4455 
4456   op_cost(0);
4457   format %{ %}
4458   interface(CONST_INTER);
4459 %}
4460 
4461 operand immNKlass()
4462 %{
4463   match(ConNKlass);
4464 
4465   op_cost(0);
4466   format %{ %}
4467   interface(CONST_INTER);
4468 %}
4469 
4470 // Integer 32 bit Register Operands
4471 // Integer 32 bitRegister (excludes SP)
4472 operand iRegI()
4473 %{
4474   constraint(ALLOC_IN_RC(any_reg32));
4475   match(RegI);
4476   match(iRegINoSp);
4477   op_cost(0);
4478   format %{ %}
4479   interface(REG_INTER);
4480 %}
4481 
4482 // Integer 32 bit Register not Special
4483 operand iRegINoSp()
4484 %{
4485   constraint(ALLOC_IN_RC(no_special_reg32));
4486   match(RegI);
4487   op_cost(0);
4488   format %{ %}
4489   interface(REG_INTER);
4490 %}
4491 
4492 // Integer 64 bit Register Operands
4493 // Integer 64 bit Register (includes SP)
4494 operand iRegL()
4495 %{
4496   constraint(ALLOC_IN_RC(any_reg));
4497   match(RegL);
4498   match(iRegLNoSp);
4499   op_cost(0);
4500   format %{ %}
4501   interface(REG_INTER);
4502 %}
4503 
4504 // Integer 64 bit Register not Special
4505 operand iRegLNoSp()
4506 %{
4507   constraint(ALLOC_IN_RC(no_special_reg));
4508   match(RegL);
4509   match(iRegL_R0);
4510   format %{ %}
4511   interface(REG_INTER);
4512 %}
4513 
4514 // Pointer Register Operands
4515 // Pointer Register
4516 operand iRegP()
4517 %{
4518   constraint(ALLOC_IN_RC(ptr_reg));
4519   match(RegP);
4520   match(iRegPNoSp);
4521   match(iRegP_R0);
4522   //match(iRegP_R2);
4523   //match(iRegP_R4);
4524   //match(iRegP_R5);
4525   match(thread_RegP);
4526   op_cost(0);
4527   format %{ %}
4528   interface(REG_INTER);
4529 %}
4530 
4531 // Pointer 64 bit Register not Special
4532 operand iRegPNoSp()
4533 %{
4534   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4535   match(RegP);
4536   // match(iRegP);
4537   // match(iRegP_R0);
4538   // match(iRegP_R2);
4539   // match(iRegP_R4);
4540   // match(iRegP_R5);
4541   // match(thread_RegP);
4542   op_cost(0);
4543   format %{ %}
4544   interface(REG_INTER);
4545 %}
4546 
4547 // Pointer 64 bit Register R0 only
4548 operand iRegP_R0()
4549 %{
4550   constraint(ALLOC_IN_RC(r0_reg));
4551   match(RegP);
4552   // match(iRegP);
4553   match(iRegPNoSp);
4554   op_cost(0);
4555   format %{ %}
4556   interface(REG_INTER);
4557 %}
4558 
4559 // Pointer 64 bit Register R1 only
4560 operand iRegP_R1()
4561 %{
4562   constraint(ALLOC_IN_RC(r1_reg));
4563   match(RegP);
4564   // match(iRegP);
4565   match(iRegPNoSp);
4566   op_cost(0);
4567   format %{ %}
4568   interface(REG_INTER);
4569 %}
4570 
4571 // Pointer 64 bit Register R2 only
4572 operand iRegP_R2()
4573 %{
4574   constraint(ALLOC_IN_RC(r2_reg));
4575   match(RegP);
4576   // match(iRegP);
4577   match(iRegPNoSp);
4578   op_cost(0);
4579   format %{ %}
4580   interface(REG_INTER);
4581 %}
4582 
4583 // Pointer 64 bit Register R3 only
4584 operand iRegP_R3()
4585 %{
4586   constraint(ALLOC_IN_RC(r3_reg));
4587   match(RegP);
4588   // match(iRegP);
4589   match(iRegPNoSp);
4590   op_cost(0);
4591   format %{ %}
4592   interface(REG_INTER);
4593 %}
4594 
4595 // Pointer 64 bit Register R4 only
4596 operand iRegP_R4()
4597 %{
4598   constraint(ALLOC_IN_RC(r4_reg));
4599   match(RegP);
4600   // match(iRegP);
4601   match(iRegPNoSp);
4602   op_cost(0);
4603   format %{ %}
4604   interface(REG_INTER);
4605 %}
4606 
4607 // Pointer 64 bit Register R5 only
4608 operand iRegP_R5()
4609 %{
4610   constraint(ALLOC_IN_RC(r5_reg));
4611   match(RegP);
4612   // match(iRegP);
4613   match(iRegPNoSp);
4614   op_cost(0);
4615   format %{ %}
4616   interface(REG_INTER);
4617 %}
4618 
4619 // Pointer 64 bit Register R10 only
4620 operand iRegP_R10()
4621 %{
4622   constraint(ALLOC_IN_RC(r10_reg));
4623   match(RegP);
4624   // match(iRegP);
4625   match(iRegPNoSp);
4626   op_cost(0);
4627   format %{ %}
4628   interface(REG_INTER);
4629 %}
4630 
4631 // Long 64 bit Register R0 only
4632 operand iRegL_R0()
4633 %{
4634   constraint(ALLOC_IN_RC(r0_reg));
4635   match(RegL);
4636   match(iRegLNoSp);
4637   op_cost(0);
4638   format %{ %}
4639   interface(REG_INTER);
4640 %}
4641 
4642 // Long 64 bit Register R2 only
4643 operand iRegL_R2()
4644 %{
4645   constraint(ALLOC_IN_RC(r2_reg));
4646   match(RegL);
4647   match(iRegLNoSp);
4648   op_cost(0);
4649   format %{ %}
4650   interface(REG_INTER);
4651 %}
4652 
4653 // Long 64 bit Register R3 only
4654 operand iRegL_R3()
4655 %{
4656   constraint(ALLOC_IN_RC(r3_reg));
4657   match(RegL);
4658   match(iRegLNoSp);
4659   op_cost(0);
4660   format %{ %}
4661   interface(REG_INTER);
4662 %}
4663 
4664 // Long 64 bit Register R11 only
4665 operand iRegL_R11()
4666 %{
4667   constraint(ALLOC_IN_RC(r11_reg));
4668   match(RegL);
4669   match(iRegLNoSp);
4670   op_cost(0);
4671   format %{ %}
4672   interface(REG_INTER);
4673 %}
4674 
4675 // Pointer 64 bit Register FP only
4676 operand iRegP_FP()
4677 %{
4678   constraint(ALLOC_IN_RC(fp_reg));
4679   match(RegP);
4680   // match(iRegP);
4681   op_cost(0);
4682   format %{ %}
4683   interface(REG_INTER);
4684 %}
4685 
4686 // Register R0 only
4687 operand iRegI_R0()
4688 %{
4689   constraint(ALLOC_IN_RC(int_r0_reg));
4690   match(RegI);
4691   match(iRegINoSp);
4692   op_cost(0);
4693   format %{ %}
4694   interface(REG_INTER);
4695 %}
4696 
4697 // Register R2 only
4698 operand iRegI_R2()
4699 %{
4700   constraint(ALLOC_IN_RC(int_r2_reg));
4701   match(RegI);
4702   match(iRegINoSp);
4703   op_cost(0);
4704   format %{ %}
4705   interface(REG_INTER);
4706 %}
4707 
4708 // Register R3 only
4709 operand iRegI_R3()
4710 %{
4711   constraint(ALLOC_IN_RC(int_r3_reg));
4712   match(RegI);
4713   match(iRegINoSp);
4714   op_cost(0);
4715   format %{ %}
4716   interface(REG_INTER);
4717 %}
4718 
4719 
4720 // Register R4 only
4721 operand iRegI_R4()
4722 %{
4723   constraint(ALLOC_IN_RC(int_r4_reg));
4724   match(RegI);
4725   match(iRegINoSp);
4726   op_cost(0);
4727   format %{ %}
4728   interface(REG_INTER);
4729 %}
4730 
4731 
4732 // Pointer Register Operands
4733 // Narrow Pointer Register
4734 operand iRegN()
4735 %{
4736   constraint(ALLOC_IN_RC(any_reg32));
4737   match(RegN);
4738   match(iRegNNoSp);
4739   op_cost(0);
4740   format %{ %}
4741   interface(REG_INTER);
4742 %}
4743 
4744 operand iRegN_R0()
4745 %{
4746   constraint(ALLOC_IN_RC(r0_reg));
4747   match(iRegN);
4748   op_cost(0);
4749   format %{ %}
4750   interface(REG_INTER);
4751 %}
4752 
4753 operand iRegN_R2()
4754 %{
4755   constraint(ALLOC_IN_RC(r2_reg));
4756   match(iRegN);
4757   op_cost(0);
4758   format %{ %}
4759   interface(REG_INTER);
4760 %}
4761 
4762 operand iRegN_R3()
4763 %{
4764   constraint(ALLOC_IN_RC(r3_reg));
4765   match(iRegN);
4766   op_cost(0);
4767   format %{ %}
4768   interface(REG_INTER);
4769 %}
4770 
4771 // Integer 64 bit Register not Special
4772 operand iRegNNoSp()
4773 %{
4774   constraint(ALLOC_IN_RC(no_special_reg32));
4775   match(RegN);
4776   op_cost(0);
4777   format %{ %}
4778   interface(REG_INTER);
4779 %}
4780 
4781 // heap base register -- used for encoding immN0
4782 
4783 operand iRegIHeapbase()
4784 %{
4785   constraint(ALLOC_IN_RC(heapbase_reg));
4786   match(RegI);
4787   op_cost(0);
4788   format %{ %}
4789   interface(REG_INTER);
4790 %}
4791 
4792 // Float Register
4793 // Float register operands
4794 operand vRegF()
4795 %{
4796   constraint(ALLOC_IN_RC(float_reg));
4797   match(RegF);
4798 
4799   op_cost(0);
4800   format %{ %}
4801   interface(REG_INTER);
4802 %}
4803 
4804 // Double Register
4805 // Double register operands
4806 operand vRegD()
4807 %{
4808   constraint(ALLOC_IN_RC(double_reg));
4809   match(RegD);
4810 
4811   op_cost(0);
4812   format %{ %}
4813   interface(REG_INTER);
4814 %}
4815 
4816 operand vecD()
4817 %{
4818   constraint(ALLOC_IN_RC(vectord_reg));
4819   match(VecD);
4820 
4821   op_cost(0);
4822   format %{ %}
4823   interface(REG_INTER);
4824 %}
4825 
4826 operand vecX()
4827 %{
4828   constraint(ALLOC_IN_RC(vectorx_reg));
4829   match(VecX);
4830 
4831   op_cost(0);
4832   format %{ %}
4833   interface(REG_INTER);
4834 %}
4835 
4836 operand vRegD_V0()
4837 %{
4838   constraint(ALLOC_IN_RC(v0_reg));
4839   match(RegD);
4840   op_cost(0);
4841   format %{ %}
4842   interface(REG_INTER);
4843 %}
4844 
4845 operand vRegD_V1()
4846 %{
4847   constraint(ALLOC_IN_RC(v1_reg));
4848   match(RegD);
4849   op_cost(0);
4850   format %{ %}
4851   interface(REG_INTER);
4852 %}
4853 
4854 operand vRegD_V2()
4855 %{
4856   constraint(ALLOC_IN_RC(v2_reg));
4857   match(RegD);
4858   op_cost(0);
4859   format %{ %}
4860   interface(REG_INTER);
4861 %}
4862 
4863 operand vRegD_V3()
4864 %{
4865   constraint(ALLOC_IN_RC(v3_reg));
4866   match(RegD);
4867   op_cost(0);
4868   format %{ %}
4869   interface(REG_INTER);
4870 %}
4871 
4872 // Flags register, used as output of signed compare instructions
4873 
4874 // note that on AArch64 we also use this register as the output for
4875 // for floating point compare instructions (CmpF CmpD). this ensures
4876 // that ordered inequality tests use GT, GE, LT or LE none of which
4877 // pass through cases where the result is unordered i.e. one or both
4878 // inputs to the compare is a NaN. this means that the ideal code can
4879 // replace e.g. a GT with an LE and not end up capturing the NaN case
4880 // (where the comparison should always fail). EQ and NE tests are
4881 // always generated in ideal code so that unordered folds into the NE
4882 // case, matching the behaviour of AArch64 NE.
4883 //
4884 // This differs from x86 where the outputs of FP compares use a
4885 // special FP flags registers and where compares based on this
4886 // register are distinguished into ordered inequalities (cmpOpUCF) and
4887 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4888 // to explicitly handle the unordered case in branches. x86 also has
4889 // to include extra CMoveX rules to accept a cmpOpUCF input.
4890 
4891 operand rFlagsReg()
4892 %{
4893   constraint(ALLOC_IN_RC(int_flags));
4894   match(RegFlags);
4895 
4896   op_cost(0);
4897   format %{ "RFLAGS" %}
4898   interface(REG_INTER);
4899 %}
4900 
4901 // Flags register, used as output of unsigned compare instructions
4902 operand rFlagsRegU()
4903 %{
4904   constraint(ALLOC_IN_RC(int_flags));
4905   match(RegFlags);
4906 
4907   op_cost(0);
4908   format %{ "RFLAGSU" %}
4909   interface(REG_INTER);
4910 %}
4911 
4912 // Special Registers
4913 
4914 // Method Register
4915 operand inline_cache_RegP(iRegP reg)
4916 %{
4917   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4918   match(reg);
4919   match(iRegPNoSp);
4920   op_cost(0);
4921   format %{ %}
4922   interface(REG_INTER);
4923 %}
4924 
4925 operand interpreter_method_oop_RegP(iRegP reg)
4926 %{
4927   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4928   match(reg);
4929   match(iRegPNoSp);
4930   op_cost(0);
4931   format %{ %}
4932   interface(REG_INTER);
4933 %}
4934 
4935 // Thread Register
4936 operand thread_RegP(iRegP reg)
4937 %{
4938   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
4939   match(reg);
4940   op_cost(0);
4941   format %{ %}
4942   interface(REG_INTER);
4943 %}
4944 
4945 operand lr_RegP(iRegP reg)
4946 %{
4947   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
4948   match(reg);
4949   op_cost(0);
4950   format %{ %}
4951   interface(REG_INTER);
4952 %}
4953 
4954 //----------Memory Operands----------------------------------------------------
4955 
4956 operand indirect(iRegP reg)
4957 %{
4958   constraint(ALLOC_IN_RC(ptr_reg));
4959   match(reg);
4960   op_cost(0);
4961   format %{ "[$reg]" %}
4962   interface(MEMORY_INTER) %{
4963     base($reg);
4964     index(0xffffffff);
4965     scale(0x0);
4966     disp(0x0);
4967   %}
4968 %}
4969 
4970 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
4971 %{
4972   constraint(ALLOC_IN_RC(ptr_reg));
4973   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4974   match(AddP reg (LShiftL (ConvI2L ireg) scale));
4975   op_cost(0);
4976   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
4977   interface(MEMORY_INTER) %{
4978     base($reg);
4979     index($ireg);
4980     scale($scale);
4981     disp(0x0);
4982   %}
4983 %}
4984 
4985 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
4986 %{
4987   constraint(ALLOC_IN_RC(ptr_reg));
4988   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
4989   match(AddP reg (LShiftL lreg scale));
4990   op_cost(0);
4991   format %{ "$reg, $lreg lsl($scale)" %}
4992   interface(MEMORY_INTER) %{
4993     base($reg);
4994     index($lreg);
4995     scale($scale);
4996     disp(0x0);
4997   %}
4998 %}
4999 
5000 operand indIndexI2L(iRegP reg, iRegI ireg)
5001 %{
5002   constraint(ALLOC_IN_RC(ptr_reg));
5003   match(AddP reg (ConvI2L ireg));
5004   op_cost(0);
5005   format %{ "$reg, $ireg, 0, I2L" %}
5006   interface(MEMORY_INTER) %{
5007     base($reg);
5008     index($ireg);
5009     scale(0x0);
5010     disp(0x0);
5011   %}
5012 %}
5013 
5014 operand indIndex(iRegP reg, iRegL lreg)
5015 %{
5016   constraint(ALLOC_IN_RC(ptr_reg));
5017   match(AddP reg lreg);
5018   op_cost(0);
5019   format %{ "$reg, $lreg" %}
5020   interface(MEMORY_INTER) %{
5021     base($reg);
5022     index($lreg);
5023     scale(0x0);
5024     disp(0x0);
5025   %}
5026 %}
5027 
5028 operand indOffI(iRegP reg, immIOffset off)
5029 %{
5030   constraint(ALLOC_IN_RC(ptr_reg));
5031   match(AddP reg off);
5032   op_cost(0);
5033   format %{ "[$reg, $off]" %}
5034   interface(MEMORY_INTER) %{
5035     base($reg);
5036     index(0xffffffff);
5037     scale(0x0);
5038     disp($off);
5039   %}
5040 %}
5041 
5042 operand indOffI4(iRegP reg, immIOffset4 off)
5043 %{
5044   constraint(ALLOC_IN_RC(ptr_reg));
5045   match(AddP reg off);
5046   op_cost(0);
5047   format %{ "[$reg, $off]" %}
5048   interface(MEMORY_INTER) %{
5049     base($reg);
5050     index(0xffffffff);
5051     scale(0x0);
5052     disp($off);
5053   %}
5054 %}
5055 
5056 operand indOffI8(iRegP reg, immIOffset8 off)
5057 %{
5058   constraint(ALLOC_IN_RC(ptr_reg));
5059   match(AddP reg off);
5060   op_cost(0);
5061   format %{ "[$reg, $off]" %}
5062   interface(MEMORY_INTER) %{
5063     base($reg);
5064     index(0xffffffff);
5065     scale(0x0);
5066     disp($off);
5067   %}
5068 %}
5069 
5070 operand indOffI16(iRegP reg, immIOffset16 off)
5071 %{
5072   constraint(ALLOC_IN_RC(ptr_reg));
5073   match(AddP reg off);
5074   op_cost(0);
5075   format %{ "[$reg, $off]" %}
5076   interface(MEMORY_INTER) %{
5077     base($reg);
5078     index(0xffffffff);
5079     scale(0x0);
5080     disp($off);
5081   %}
5082 %}
5083 
5084 operand indOffL(iRegP reg, immLoffset off)
5085 %{
5086   constraint(ALLOC_IN_RC(ptr_reg));
5087   match(AddP reg off);
5088   op_cost(0);
5089   format %{ "[$reg, $off]" %}
5090   interface(MEMORY_INTER) %{
5091     base($reg);
5092     index(0xffffffff);
5093     scale(0x0);
5094     disp($off);
5095   %}
5096 %}
5097 
5098 operand indOffL4(iRegP reg, immLoffset4 off)
5099 %{
5100   constraint(ALLOC_IN_RC(ptr_reg));
5101   match(AddP reg off);
5102   op_cost(0);
5103   format %{ "[$reg, $off]" %}
5104   interface(MEMORY_INTER) %{
5105     base($reg);
5106     index(0xffffffff);
5107     scale(0x0);
5108     disp($off);
5109   %}
5110 %}
5111 
5112 operand indOffL8(iRegP reg, immLoffset8 off)
5113 %{
5114   constraint(ALLOC_IN_RC(ptr_reg));
5115   match(AddP reg off);
5116   op_cost(0);
5117   format %{ "[$reg, $off]" %}
5118   interface(MEMORY_INTER) %{
5119     base($reg);
5120     index(0xffffffff);
5121     scale(0x0);
5122     disp($off);
5123   %}
5124 %}
5125 
5126 operand indOffL16(iRegP reg, immLoffset16 off)
5127 %{
5128   constraint(ALLOC_IN_RC(ptr_reg));
5129   match(AddP reg off);
5130   op_cost(0);
5131   format %{ "[$reg, $off]" %}
5132   interface(MEMORY_INTER) %{
5133     base($reg);
5134     index(0xffffffff);
5135     scale(0x0);
5136     disp($off);
5137   %}
5138 %}
5139 
5140 operand indirectN(iRegN reg)
5141 %{
5142   predicate(Universe::narrow_oop_shift() == 0);
5143   constraint(ALLOC_IN_RC(ptr_reg));
5144   match(DecodeN reg);
5145   op_cost(0);
5146   format %{ "[$reg]\t# narrow" %}
5147   interface(MEMORY_INTER) %{
5148     base($reg);
5149     index(0xffffffff);
5150     scale(0x0);
5151     disp(0x0);
5152   %}
5153 %}
5154 
5155 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5156 %{
5157   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5158   constraint(ALLOC_IN_RC(ptr_reg));
5159   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5160   op_cost(0);
5161   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5162   interface(MEMORY_INTER) %{
5163     base($reg);
5164     index($ireg);
5165     scale($scale);
5166     disp(0x0);
5167   %}
5168 %}
5169 
5170 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5171 %{
5172   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
5173   constraint(ALLOC_IN_RC(ptr_reg));
5174   match(AddP (DecodeN reg) (LShiftL lreg scale));
5175   op_cost(0);
5176   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5177   interface(MEMORY_INTER) %{
5178     base($reg);
5179     index($lreg);
5180     scale($scale);
5181     disp(0x0);
5182   %}
5183 %}
5184 
5185 operand indIndexI2LN(iRegN reg, iRegI ireg)
5186 %{
5187   predicate(Universe::narrow_oop_shift() == 0);
5188   constraint(ALLOC_IN_RC(ptr_reg));
5189   match(AddP (DecodeN reg) (ConvI2L ireg));
5190   op_cost(0);
5191   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
5192   interface(MEMORY_INTER) %{
5193     base($reg);
5194     index($ireg);
5195     scale(0x0);
5196     disp(0x0);
5197   %}
5198 %}
5199 
5200 operand indIndexN(iRegN reg, iRegL lreg)
5201 %{
5202   predicate(Universe::narrow_oop_shift() == 0);
5203   constraint(ALLOC_IN_RC(ptr_reg));
5204   match(AddP (DecodeN reg) lreg);
5205   op_cost(0);
5206   format %{ "$reg, $lreg\t# narrow" %}
5207   interface(MEMORY_INTER) %{
5208     base($reg);
5209     index($lreg);
5210     scale(0x0);
5211     disp(0x0);
5212   %}
5213 %}
5214 
5215 operand indOffIN(iRegN reg, immIOffset off)
5216 %{
5217   predicate(Universe::narrow_oop_shift() == 0);
5218   constraint(ALLOC_IN_RC(ptr_reg));
5219   match(AddP (DecodeN reg) off);
5220   op_cost(0);
5221   format %{ "[$reg, $off]\t# narrow" %}
5222   interface(MEMORY_INTER) %{
5223     base($reg);
5224     index(0xffffffff);
5225     scale(0x0);
5226     disp($off);
5227   %}
5228 %}
5229 
5230 operand indOffLN(iRegN reg, immLoffset off)
5231 %{
5232   predicate(Universe::narrow_oop_shift() == 0);
5233   constraint(ALLOC_IN_RC(ptr_reg));
5234   match(AddP (DecodeN reg) off);
5235   op_cost(0);
5236   format %{ "[$reg, $off]\t# narrow" %}
5237   interface(MEMORY_INTER) %{
5238     base($reg);
5239     index(0xffffffff);
5240     scale(0x0);
5241     disp($off);
5242   %}
5243 %}
5244 
5245 
5246 
5247 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5248 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5249 %{
5250   constraint(ALLOC_IN_RC(ptr_reg));
5251   match(AddP reg off);
5252   op_cost(0);
5253   format %{ "[$reg, $off]" %}
5254   interface(MEMORY_INTER) %{
5255     base($reg);
5256     index(0xffffffff);
5257     scale(0x0);
5258     disp($off);
5259   %}
5260 %}
5261 
5262 //----------Special Memory Operands--------------------------------------------
5263 // Stack Slot Operand - This operand is used for loading and storing temporary
5264 //                      values on the stack where a match requires a value to
5265 //                      flow through memory.
5266 operand stackSlotP(sRegP reg)
5267 %{
5268   constraint(ALLOC_IN_RC(stack_slots));
5269   op_cost(100);
5270   // No match rule because this operand is only generated in matching
5271   // match(RegP);
5272   format %{ "[$reg]" %}
5273   interface(MEMORY_INTER) %{
5274     base(0x1e);  // RSP
5275     index(0x0);  // No Index
5276     scale(0x0);  // No Scale
5277     disp($reg);  // Stack Offset
5278   %}
5279 %}
5280 
5281 operand stackSlotI(sRegI reg)
5282 %{
5283   constraint(ALLOC_IN_RC(stack_slots));
5284   // No match rule because this operand is only generated in matching
5285   // match(RegI);
5286   format %{ "[$reg]" %}
5287   interface(MEMORY_INTER) %{
5288     base(0x1e);  // RSP
5289     index(0x0);  // No Index
5290     scale(0x0);  // No Scale
5291     disp($reg);  // Stack Offset
5292   %}
5293 %}
5294 
5295 operand stackSlotF(sRegF reg)
5296 %{
5297   constraint(ALLOC_IN_RC(stack_slots));
5298   // No match rule because this operand is only generated in matching
5299   // match(RegF);
5300   format %{ "[$reg]" %}
5301   interface(MEMORY_INTER) %{
5302     base(0x1e);  // RSP
5303     index(0x0);  // No Index
5304     scale(0x0);  // No Scale
5305     disp($reg);  // Stack Offset
5306   %}
5307 %}
5308 
5309 operand stackSlotD(sRegD reg)
5310 %{
5311   constraint(ALLOC_IN_RC(stack_slots));
5312   // No match rule because this operand is only generated in matching
5313   // match(RegD);
5314   format %{ "[$reg]" %}
5315   interface(MEMORY_INTER) %{
5316     base(0x1e);  // RSP
5317     index(0x0);  // No Index
5318     scale(0x0);  // No Scale
5319     disp($reg);  // Stack Offset
5320   %}
5321 %}
5322 
5323 operand stackSlotL(sRegL reg)
5324 %{
5325   constraint(ALLOC_IN_RC(stack_slots));
5326   // No match rule because this operand is only generated in matching
5327   // match(RegL);
5328   format %{ "[$reg]" %}
5329   interface(MEMORY_INTER) %{
5330     base(0x1e);  // RSP
5331     index(0x0);  // No Index
5332     scale(0x0);  // No Scale
5333     disp($reg);  // Stack Offset
5334   %}
5335 %}
5336 
5337 // Operands for expressing Control Flow
5338 // NOTE: Label is a predefined operand which should not be redefined in
5339 //       the AD file. It is generically handled within the ADLC.
5340 
5341 //----------Conditional Branch Operands----------------------------------------
5342 // Comparison Op  - This is the operation of the comparison, and is limited to
5343 //                  the following set of codes:
5344 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5345 //
5346 // Other attributes of the comparison, such as unsignedness, are specified
5347 // by the comparison instruction that sets a condition code flags register.
5348 // That result is represented by a flags operand whose subtype is appropriate
5349 // to the unsignedness (etc.) of the comparison.
5350 //
5351 // Later, the instruction which matches both the Comparison Op (a Bool) and
5352 // the flags (produced by the Cmp) specifies the coding of the comparison op
5353 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5354 
5355 // used for signed integral comparisons and fp comparisons
5356 
5357 operand cmpOp()
5358 %{
5359   match(Bool);
5360 
5361   format %{ "" %}
5362   interface(COND_INTER) %{
5363     equal(0x0, "eq");
5364     not_equal(0x1, "ne");
5365     less(0xb, "lt");
5366     greater_equal(0xa, "ge");
5367     less_equal(0xd, "le");
5368     greater(0xc, "gt");
5369     overflow(0x6, "vs");
5370     no_overflow(0x7, "vc");
5371   %}
5372 %}
5373 
5374 // used for unsigned integral comparisons
5375 
5376 operand cmpOpU()
5377 %{
5378   match(Bool);
5379 
5380   format %{ "" %}
5381   interface(COND_INTER) %{
5382     equal(0x0, "eq");
5383     not_equal(0x1, "ne");
5384     less(0x3, "lo");
5385     greater_equal(0x2, "hs");
5386     less_equal(0x9, "ls");
5387     greater(0x8, "hi");
5388     overflow(0x6, "vs");
5389     no_overflow(0x7, "vc");
5390   %}
5391 %}
5392 
5393 // used for certain integral comparisons which can be
5394 // converted to cbxx or tbxx instructions
5395 
5396 operand cmpOpEqNe()
5397 %{
5398   match(Bool);
5399   match(CmpOp);
5400   op_cost(0);
5401   predicate(n->as_Bool()->_test._test == BoolTest::ne
5402             || n->as_Bool()->_test._test == BoolTest::eq);
5403 
5404   format %{ "" %}
5405   interface(COND_INTER) %{
5406     equal(0x0, "eq");
5407     not_equal(0x1, "ne");
5408     less(0xb, "lt");
5409     greater_equal(0xa, "ge");
5410     less_equal(0xd, "le");
5411     greater(0xc, "gt");
5412     overflow(0x6, "vs");
5413     no_overflow(0x7, "vc");
5414   %}
5415 %}
5416 
5417 // used for certain integral comparisons which can be
5418 // converted to cbxx or tbxx instructions
5419 
5420 operand cmpOpLtGe()
5421 %{
5422   match(Bool);
5423   match(CmpOp);
5424   op_cost(0);
5425 
5426   predicate(n->as_Bool()->_test._test == BoolTest::lt
5427             || n->as_Bool()->_test._test == BoolTest::ge);
5428 
5429   format %{ "" %}
5430   interface(COND_INTER) %{
5431     equal(0x0, "eq");
5432     not_equal(0x1, "ne");
5433     less(0xb, "lt");
5434     greater_equal(0xa, "ge");
5435     less_equal(0xd, "le");
5436     greater(0xc, "gt");
5437     overflow(0x6, "vs");
5438     no_overflow(0x7, "vc");
5439   %}
5440 %}
5441 
5442 // used for certain unsigned integral comparisons which can be
5443 // converted to cbxx or tbxx instructions
5444 
5445 operand cmpOpUEqNeLtGe()
5446 %{
5447   match(Bool);
5448   match(CmpOp);
5449   op_cost(0);
5450 
5451   predicate(n->as_Bool()->_test._test == BoolTest::eq
5452             || n->as_Bool()->_test._test == BoolTest::ne
5453             || n->as_Bool()->_test._test == BoolTest::lt
5454             || n->as_Bool()->_test._test == BoolTest::ge);
5455 
5456   format %{ "" %}
5457   interface(COND_INTER) %{
5458     equal(0x0, "eq");
5459     not_equal(0x1, "ne");
5460     less(0xb, "lt");
5461     greater_equal(0xa, "ge");
5462     less_equal(0xd, "le");
5463     greater(0xc, "gt");
5464     overflow(0x6, "vs");
5465     no_overflow(0x7, "vc");
5466   %}
5467 %}
5468 
5469 // Special operand allowing long args to int ops to be truncated for free
5470 
5471 operand iRegL2I(iRegL reg) %{
5472 
5473   op_cost(0);
5474 
5475   match(ConvL2I reg);
5476 
5477   format %{ "l2i($reg)" %}
5478 
5479   interface(REG_INTER)
5480 %}
5481 
5482 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5483 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5484 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5485 
5486 //----------OPERAND CLASSES----------------------------------------------------
5487 // Operand Classes are groups of operands that are used as to simplify
5488 // instruction definitions by not requiring the AD writer to specify
5489 // separate instructions for every form of operand when the
5490 // instruction accepts multiple operand types with the same basic
5491 // encoding and format. The classic case of this is memory operands.
5492 
5493 // memory is used to define read/write location for load/store
5494 // instruction defs. we can turn a memory op into an Address
5495 
5496 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
5497                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
5498 
5499 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5500 // operations. it allows the src to be either an iRegI or a (ConvL2I
5501 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5502 // can be elided because the 32-bit instruction will just employ the
5503 // lower 32 bits anyway.
5504 //
5505 // n.b. this does not elide all L2I conversions. if the truncated
5506 // value is consumed by more than one operation then the ConvL2I
5507 // cannot be bundled into the consuming nodes so an l2i gets planted
5508 // (actually a movw $dst $src) and the downstream instructions consume
5509 // the result of the l2i as an iRegI input. That's a shame since the
5510 // movw is actually redundant but its not too costly.
5511 
5512 opclass iRegIorL2I(iRegI, iRegL2I);
5513 
5514 //----------PIPELINE-----------------------------------------------------------
5515 // Rules which define the behavior of the target architectures pipeline.
5516 
5517 // For specific pipelines, eg A53, define the stages of that pipeline
5518 //pipe_desc(ISS, EX1, EX2, WR);
5519 #define ISS S0
5520 #define EX1 S1
5521 #define EX2 S2
5522 #define WR  S3
5523 
5524 // Integer ALU reg operation
5525 pipeline %{
5526 
5527 attributes %{
5528   // ARM instructions are of fixed length
5529   fixed_size_instructions;        // Fixed size instructions TODO does
5530   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5531   // ARM instructions come in 32-bit word units
5532   instruction_unit_size = 4;         // An instruction is 4 bytes long
5533   instruction_fetch_unit_size = 64;  // The processor fetches one line
5534   instruction_fetch_units = 1;       // of 64 bytes
5535 
5536   // List of nop instructions
5537   nops( MachNop );
5538 %}
5539 
5540 // We don't use an actual pipeline model so don't care about resources
5541 // or description. we do use pipeline classes to introduce fixed
5542 // latencies
5543 
5544 //----------RESOURCES----------------------------------------------------------
5545 // Resources are the functional units available to the machine
5546 
5547 resources( INS0, INS1, INS01 = INS0 | INS1,
5548            ALU0, ALU1, ALU = ALU0 | ALU1,
5549            MAC,
5550            DIV,
5551            BRANCH,
5552            LDST,
5553            NEON_FP);
5554 
5555 //----------PIPELINE DESCRIPTION-----------------------------------------------
5556 // Pipeline Description specifies the stages in the machine's pipeline
5557 
5558 // Define the pipeline as a generic 6 stage pipeline
5559 pipe_desc(S0, S1, S2, S3, S4, S5);
5560 
5561 //----------PIPELINE CLASSES---------------------------------------------------
5562 // Pipeline Classes describe the stages in which input and output are
5563 // referenced by the hardware pipeline.
5564 
5565 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5566 %{
5567   single_instruction;
5568   src1   : S1(read);
5569   src2   : S2(read);
5570   dst    : S5(write);
5571   INS01  : ISS;
5572   NEON_FP : S5;
5573 %}
5574 
5575 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5576 %{
5577   single_instruction;
5578   src1   : S1(read);
5579   src2   : S2(read);
5580   dst    : S5(write);
5581   INS01  : ISS;
5582   NEON_FP : S5;
5583 %}
5584 
5585 pipe_class fp_uop_s(vRegF dst, vRegF src)
5586 %{
5587   single_instruction;
5588   src    : S1(read);
5589   dst    : S5(write);
5590   INS01  : ISS;
5591   NEON_FP : S5;
5592 %}
5593 
5594 pipe_class fp_uop_d(vRegD dst, vRegD src)
5595 %{
5596   single_instruction;
5597   src    : S1(read);
5598   dst    : S5(write);
5599   INS01  : ISS;
5600   NEON_FP : S5;
5601 %}
5602 
5603 pipe_class fp_d2f(vRegF dst, vRegD src)
5604 %{
5605   single_instruction;
5606   src    : S1(read);
5607   dst    : S5(write);
5608   INS01  : ISS;
5609   NEON_FP : S5;
5610 %}
5611 
5612 pipe_class fp_f2d(vRegD dst, vRegF src)
5613 %{
5614   single_instruction;
5615   src    : S1(read);
5616   dst    : S5(write);
5617   INS01  : ISS;
5618   NEON_FP : S5;
5619 %}
5620 
5621 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5622 %{
5623   single_instruction;
5624   src    : S1(read);
5625   dst    : S5(write);
5626   INS01  : ISS;
5627   NEON_FP : S5;
5628 %}
5629 
5630 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5631 %{
5632   single_instruction;
5633   src    : S1(read);
5634   dst    : S5(write);
5635   INS01  : ISS;
5636   NEON_FP : S5;
5637 %}
5638 
5639 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5640 %{
5641   single_instruction;
5642   src    : S1(read);
5643   dst    : S5(write);
5644   INS01  : ISS;
5645   NEON_FP : S5;
5646 %}
5647 
5648 pipe_class fp_l2f(vRegF dst, iRegL src)
5649 %{
5650   single_instruction;
5651   src    : S1(read);
5652   dst    : S5(write);
5653   INS01  : ISS;
5654   NEON_FP : S5;
5655 %}
5656 
5657 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5658 %{
5659   single_instruction;
5660   src    : S1(read);
5661   dst    : S5(write);
5662   INS01  : ISS;
5663   NEON_FP : S5;
5664 %}
5665 
5666 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5667 %{
5668   single_instruction;
5669   src    : S1(read);
5670   dst    : S5(write);
5671   INS01  : ISS;
5672   NEON_FP : S5;
5673 %}
5674 
5675 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5676 %{
5677   single_instruction;
5678   src    : S1(read);
5679   dst    : S5(write);
5680   INS01  : ISS;
5681   NEON_FP : S5;
5682 %}
5683 
5684 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5685 %{
5686   single_instruction;
5687   src    : S1(read);
5688   dst    : S5(write);
5689   INS01  : ISS;
5690   NEON_FP : S5;
5691 %}
5692 
5693 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5694 %{
5695   single_instruction;
5696   src1   : S1(read);
5697   src2   : S2(read);
5698   dst    : S5(write);
5699   INS0   : ISS;
5700   NEON_FP : S5;
5701 %}
5702 
5703 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5704 %{
5705   single_instruction;
5706   src1   : S1(read);
5707   src2   : S2(read);
5708   dst    : S5(write);
5709   INS0   : ISS;
5710   NEON_FP : S5;
5711 %}
5712 
5713 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5714 %{
5715   single_instruction;
5716   cr     : S1(read);
5717   src1   : S1(read);
5718   src2   : S1(read);
5719   dst    : S3(write);
5720   INS01  : ISS;
5721   NEON_FP : S3;
5722 %}
5723 
5724 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5725 %{
5726   single_instruction;
5727   cr     : S1(read);
5728   src1   : S1(read);
5729   src2   : S1(read);
5730   dst    : S3(write);
5731   INS01  : ISS;
5732   NEON_FP : S3;
5733 %}
5734 
5735 pipe_class fp_imm_s(vRegF dst)
5736 %{
5737   single_instruction;
5738   dst    : S3(write);
5739   INS01  : ISS;
5740   NEON_FP : S3;
5741 %}
5742 
5743 pipe_class fp_imm_d(vRegD dst)
5744 %{
5745   single_instruction;
5746   dst    : S3(write);
5747   INS01  : ISS;
5748   NEON_FP : S3;
5749 %}
5750 
5751 pipe_class fp_load_constant_s(vRegF dst)
5752 %{
5753   single_instruction;
5754   dst    : S4(write);
5755   INS01  : ISS;
5756   NEON_FP : S4;
5757 %}
5758 
5759 pipe_class fp_load_constant_d(vRegD dst)
5760 %{
5761   single_instruction;
5762   dst    : S4(write);
5763   INS01  : ISS;
5764   NEON_FP : S4;
5765 %}
5766 
5767 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
5768 %{
5769   single_instruction;
5770   dst    : S5(write);
5771   src1   : S1(read);
5772   src2   : S1(read);
5773   INS01  : ISS;
5774   NEON_FP : S5;
5775 %}
5776 
5777 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
5778 %{
5779   single_instruction;
5780   dst    : S5(write);
5781   src1   : S1(read);
5782   src2   : S1(read);
5783   INS0   : ISS;
5784   NEON_FP : S5;
5785 %}
5786 
5787 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
5788 %{
5789   single_instruction;
5790   dst    : S5(write);
5791   src1   : S1(read);
5792   src2   : S1(read);
5793   dst    : S1(read);
5794   INS01  : ISS;
5795   NEON_FP : S5;
5796 %}
5797 
5798 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
5799 %{
5800   single_instruction;
5801   dst    : S5(write);
5802   src1   : S1(read);
5803   src2   : S1(read);
5804   dst    : S1(read);
5805   INS0   : ISS;
5806   NEON_FP : S5;
5807 %}
5808 
5809 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
5810 %{
5811   single_instruction;
5812   dst    : S4(write);
5813   src1   : S2(read);
5814   src2   : S2(read);
5815   INS01  : ISS;
5816   NEON_FP : S4;
5817 %}
5818 
5819 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
5820 %{
5821   single_instruction;
5822   dst    : S4(write);
5823   src1   : S2(read);
5824   src2   : S2(read);
5825   INS0   : ISS;
5826   NEON_FP : S4;
5827 %}
5828 
5829 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
5830 %{
5831   single_instruction;
5832   dst    : S3(write);
5833   src1   : S2(read);
5834   src2   : S2(read);
5835   INS01  : ISS;
5836   NEON_FP : S3;
5837 %}
5838 
5839 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
5840 %{
5841   single_instruction;
5842   dst    : S3(write);
5843   src1   : S2(read);
5844   src2   : S2(read);
5845   INS0   : ISS;
5846   NEON_FP : S3;
5847 %}
5848 
5849 pipe_class vshift64(vecD dst, vecD src, vecX shift)
5850 %{
5851   single_instruction;
5852   dst    : S3(write);
5853   src    : S1(read);
5854   shift  : S1(read);
5855   INS01  : ISS;
5856   NEON_FP : S3;
5857 %}
5858 
5859 pipe_class vshift128(vecX dst, vecX src, vecX shift)
5860 %{
5861   single_instruction;
5862   dst    : S3(write);
5863   src    : S1(read);
5864   shift  : S1(read);
5865   INS0   : ISS;
5866   NEON_FP : S3;
5867 %}
5868 
5869 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
5870 %{
5871   single_instruction;
5872   dst    : S3(write);
5873   src    : S1(read);
5874   INS01  : ISS;
5875   NEON_FP : S3;
5876 %}
5877 
5878 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
5879 %{
5880   single_instruction;
5881   dst    : S3(write);
5882   src    : S1(read);
5883   INS0   : ISS;
5884   NEON_FP : S3;
5885 %}
5886 
5887 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
5888 %{
5889   single_instruction;
5890   dst    : S5(write);
5891   src1   : S1(read);
5892   src2   : S1(read);
5893   INS01  : ISS;
5894   NEON_FP : S5;
5895 %}
5896 
5897 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
5898 %{
5899   single_instruction;
5900   dst    : S5(write);
5901   src1   : S1(read);
5902   src2   : S1(read);
5903   INS0   : ISS;
5904   NEON_FP : S5;
5905 %}
5906 
5907 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
5908 %{
5909   single_instruction;
5910   dst    : S5(write);
5911   src1   : S1(read);
5912   src2   : S1(read);
5913   INS0   : ISS;
5914   NEON_FP : S5;
5915 %}
5916 
5917 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
5918 %{
5919   single_instruction;
5920   dst    : S5(write);
5921   src1   : S1(read);
5922   src2   : S1(read);
5923   INS0   : ISS;
5924   NEON_FP : S5;
5925 %}
5926 
5927 pipe_class vsqrt_fp128(vecX dst, vecX src)
5928 %{
5929   single_instruction;
5930   dst    : S5(write);
5931   src    : S1(read);
5932   INS0   : ISS;
5933   NEON_FP : S5;
5934 %}
5935 
5936 pipe_class vunop_fp64(vecD dst, vecD src)
5937 %{
5938   single_instruction;
5939   dst    : S5(write);
5940   src    : S1(read);
5941   INS01  : ISS;
5942   NEON_FP : S5;
5943 %}
5944 
5945 pipe_class vunop_fp128(vecX dst, vecX src)
5946 %{
5947   single_instruction;
5948   dst    : S5(write);
5949   src    : S1(read);
5950   INS0   : ISS;
5951   NEON_FP : S5;
5952 %}
5953 
5954 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
5955 %{
5956   single_instruction;
5957   dst    : S3(write);
5958   src    : S1(read);
5959   INS01  : ISS;
5960   NEON_FP : S3;
5961 %}
5962 
5963 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
5964 %{
5965   single_instruction;
5966   dst    : S3(write);
5967   src    : S1(read);
5968   INS01  : ISS;
5969   NEON_FP : S3;
5970 %}
5971 
5972 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
5973 %{
5974   single_instruction;
5975   dst    : S3(write);
5976   src    : S1(read);
5977   INS01  : ISS;
5978   NEON_FP : S3;
5979 %}
5980 
5981 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
5982 %{
5983   single_instruction;
5984   dst    : S3(write);
5985   src    : S1(read);
5986   INS01  : ISS;
5987   NEON_FP : S3;
5988 %}
5989 
5990 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
5991 %{
5992   single_instruction;
5993   dst    : S3(write);
5994   src    : S1(read);
5995   INS01  : ISS;
5996   NEON_FP : S3;
5997 %}
5998 
5999 pipe_class vmovi_reg_imm64(vecD dst)
6000 %{
6001   single_instruction;
6002   dst    : S3(write);
6003   INS01  : ISS;
6004   NEON_FP : S3;
6005 %}
6006 
6007 pipe_class vmovi_reg_imm128(vecX dst)
6008 %{
6009   single_instruction;
6010   dst    : S3(write);
6011   INS0   : ISS;
6012   NEON_FP : S3;
6013 %}
6014 
6015 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
6016 %{
6017   single_instruction;
6018   dst    : S5(write);
6019   mem    : ISS(read);
6020   INS01  : ISS;
6021   NEON_FP : S3;
6022 %}
6023 
6024 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
6025 %{
6026   single_instruction;
6027   dst    : S5(write);
6028   mem    : ISS(read);
6029   INS01  : ISS;
6030   NEON_FP : S3;
6031 %}
6032 
6033 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
6034 %{
6035   single_instruction;
6036   mem    : ISS(read);
6037   src    : S2(read);
6038   INS01  : ISS;
6039   NEON_FP : S3;
6040 %}
6041 
6042 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
6043 %{
6044   single_instruction;
6045   mem    : ISS(read);
6046   src    : S2(read);
6047   INS01  : ISS;
6048   NEON_FP : S3;
6049 %}
6050 
6051 //------- Integer ALU operations --------------------------
6052 
6053 // Integer ALU reg-reg operation
6054 // Operands needed in EX1, result generated in EX2
6055 // Eg.  ADD     x0, x1, x2
6056 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6057 %{
6058   single_instruction;
6059   dst    : EX2(write);
6060   src1   : EX1(read);
6061   src2   : EX1(read);
6062   INS01  : ISS; // Dual issue as instruction 0 or 1
6063   ALU    : EX2;
6064 %}
6065 
6066 // Integer ALU reg-reg operation with constant shift
6067 // Shifted register must be available in LATE_ISS instead of EX1
6068 // Eg.  ADD     x0, x1, x2, LSL #2
6069 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6070 %{
6071   single_instruction;
6072   dst    : EX2(write);
6073   src1   : EX1(read);
6074   src2   : ISS(read);
6075   INS01  : ISS;
6076   ALU    : EX2;
6077 %}
6078 
6079 // Integer ALU reg operation with constant shift
6080 // Eg.  LSL     x0, x1, #shift
6081 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6082 %{
6083   single_instruction;
6084   dst    : EX2(write);
6085   src1   : ISS(read);
6086   INS01  : ISS;
6087   ALU    : EX2;
6088 %}
6089 
6090 // Integer ALU reg-reg operation with variable shift
6091 // Both operands must be available in LATE_ISS instead of EX1
6092 // Result is available in EX1 instead of EX2
6093 // Eg.  LSLV    x0, x1, x2
6094 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6095 %{
6096   single_instruction;
6097   dst    : EX1(write);
6098   src1   : ISS(read);
6099   src2   : ISS(read);
6100   INS01  : ISS;
6101   ALU    : EX1;
6102 %}
6103 
6104 // Integer ALU reg-reg operation with extract
6105 // As for _vshift above, but result generated in EX2
6106 // Eg.  EXTR    x0, x1, x2, #N
6107 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6108 %{
6109   single_instruction;
6110   dst    : EX2(write);
6111   src1   : ISS(read);
6112   src2   : ISS(read);
6113   INS1   : ISS; // Can only dual issue as Instruction 1
6114   ALU    : EX1;
6115 %}
6116 
6117 // Integer ALU reg operation
6118 // Eg.  NEG     x0, x1
6119 pipe_class ialu_reg(iRegI dst, iRegI src)
6120 %{
6121   single_instruction;
6122   dst    : EX2(write);
6123   src    : EX1(read);
6124   INS01  : ISS;
6125   ALU    : EX2;
6126 %}
6127 
6128 // Integer ALU reg mmediate operation
6129 // Eg.  ADD     x0, x1, #N
6130 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6131 %{
6132   single_instruction;
6133   dst    : EX2(write);
6134   src1   : EX1(read);
6135   INS01  : ISS;
6136   ALU    : EX2;
6137 %}
6138 
6139 // Integer ALU immediate operation (no source operands)
6140 // Eg.  MOV     x0, #N
6141 pipe_class ialu_imm(iRegI dst)
6142 %{
6143   single_instruction;
6144   dst    : EX1(write);
6145   INS01  : ISS;
6146   ALU    : EX1;
6147 %}
6148 
6149 //------- Compare operation -------------------------------
6150 
6151 // Compare reg-reg
6152 // Eg.  CMP     x0, x1
6153 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6154 %{
6155   single_instruction;
6156 //  fixed_latency(16);
6157   cr     : EX2(write);
6158   op1    : EX1(read);
6159   op2    : EX1(read);
6160   INS01  : ISS;
6161   ALU    : EX2;
6162 %}
6163 
6164 // Compare reg-reg
6165 // Eg.  CMP     x0, #N
6166 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6167 %{
6168   single_instruction;
6169 //  fixed_latency(16);
6170   cr     : EX2(write);
6171   op1    : EX1(read);
6172   INS01  : ISS;
6173   ALU    : EX2;
6174 %}
6175 
6176 //------- Conditional instructions ------------------------
6177 
6178 // Conditional no operands
6179 // Eg.  CSINC   x0, zr, zr, <cond>
6180 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6181 %{
6182   single_instruction;
6183   cr     : EX1(read);
6184   dst    : EX2(write);
6185   INS01  : ISS;
6186   ALU    : EX2;
6187 %}
6188 
6189 // Conditional 2 operand
6190 // EG.  CSEL    X0, X1, X2, <cond>
6191 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6192 %{
6193   single_instruction;
6194   cr     : EX1(read);
6195   src1   : EX1(read);
6196   src2   : EX1(read);
6197   dst    : EX2(write);
6198   INS01  : ISS;
6199   ALU    : EX2;
6200 %}
6201 
6202 // Conditional 2 operand
6203 // EG.  CSEL    X0, X1, X2, <cond>
6204 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6205 %{
6206   single_instruction;
6207   cr     : EX1(read);
6208   src    : EX1(read);
6209   dst    : EX2(write);
6210   INS01  : ISS;
6211   ALU    : EX2;
6212 %}
6213 
6214 //------- Multiply pipeline operations --------------------
6215 
6216 // Multiply reg-reg
6217 // Eg.  MUL     w0, w1, w2
6218 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6219 %{
6220   single_instruction;
6221   dst    : WR(write);
6222   src1   : ISS(read);
6223   src2   : ISS(read);
6224   INS01  : ISS;
6225   MAC    : WR;
6226 %}
6227 
6228 // Multiply accumulate
6229 // Eg.  MADD    w0, w1, w2, w3
6230 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6231 %{
6232   single_instruction;
6233   dst    : WR(write);
6234   src1   : ISS(read);
6235   src2   : ISS(read);
6236   src3   : ISS(read);
6237   INS01  : ISS;
6238   MAC    : WR;
6239 %}
6240 
6241 // Eg.  MUL     w0, w1, w2
6242 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6243 %{
6244   single_instruction;
6245   fixed_latency(3); // Maximum latency for 64 bit mul
6246   dst    : WR(write);
6247   src1   : ISS(read);
6248   src2   : ISS(read);
6249   INS01  : ISS;
6250   MAC    : WR;
6251 %}
6252 
6253 // Multiply accumulate
6254 // Eg.  MADD    w0, w1, w2, w3
6255 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6256 %{
6257   single_instruction;
6258   fixed_latency(3); // Maximum latency for 64 bit mul
6259   dst    : WR(write);
6260   src1   : ISS(read);
6261   src2   : ISS(read);
6262   src3   : ISS(read);
6263   INS01  : ISS;
6264   MAC    : WR;
6265 %}
6266 
6267 //------- Divide pipeline operations --------------------
6268 
6269 // Eg.  SDIV    w0, w1, w2
6270 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6271 %{
6272   single_instruction;
6273   fixed_latency(8); // Maximum latency for 32 bit divide
6274   dst    : WR(write);
6275   src1   : ISS(read);
6276   src2   : ISS(read);
6277   INS0   : ISS; // Can only dual issue as instruction 0
6278   DIV    : WR;
6279 %}
6280 
6281 // Eg.  SDIV    x0, x1, x2
6282 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6283 %{
6284   single_instruction;
6285   fixed_latency(16); // Maximum latency for 64 bit divide
6286   dst    : WR(write);
6287   src1   : ISS(read);
6288   src2   : ISS(read);
6289   INS0   : ISS; // Can only dual issue as instruction 0
6290   DIV    : WR;
6291 %}
6292 
6293 //------- Load pipeline operations ------------------------
6294 
6295 // Load - prefetch
6296 // Eg.  PFRM    <mem>
6297 pipe_class iload_prefetch(memory mem)
6298 %{
6299   single_instruction;
6300   mem    : ISS(read);
6301   INS01  : ISS;
6302   LDST   : WR;
6303 %}
6304 
6305 // Load - reg, mem
6306 // Eg.  LDR     x0, <mem>
6307 pipe_class iload_reg_mem(iRegI dst, memory mem)
6308 %{
6309   single_instruction;
6310   dst    : WR(write);
6311   mem    : ISS(read);
6312   INS01  : ISS;
6313   LDST   : WR;
6314 %}
6315 
6316 // Load - reg, reg
6317 // Eg.  LDR     x0, [sp, x1]
6318 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6319 %{
6320   single_instruction;
6321   dst    : WR(write);
6322   src    : ISS(read);
6323   INS01  : ISS;
6324   LDST   : WR;
6325 %}
6326 
6327 //------- Store pipeline operations -----------------------
6328 
6329 // Store - zr, mem
6330 // Eg.  STR     zr, <mem>
6331 pipe_class istore_mem(memory mem)
6332 %{
6333   single_instruction;
6334   mem    : ISS(read);
6335   INS01  : ISS;
6336   LDST   : WR;
6337 %}
6338 
6339 // Store - reg, mem
6340 // Eg.  STR     x0, <mem>
6341 pipe_class istore_reg_mem(iRegI src, memory mem)
6342 %{
6343   single_instruction;
6344   mem    : ISS(read);
6345   src    : EX2(read);
6346   INS01  : ISS;
6347   LDST   : WR;
6348 %}
6349 
6350 // Store - reg, reg
6351 // Eg. STR      x0, [sp, x1]
6352 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6353 %{
6354   single_instruction;
6355   dst    : ISS(read);
6356   src    : EX2(read);
6357   INS01  : ISS;
6358   LDST   : WR;
6359 %}
6360 
6361 //------- Store pipeline operations -----------------------
6362 
6363 // Branch
6364 pipe_class pipe_branch()
6365 %{
6366   single_instruction;
6367   INS01  : ISS;
6368   BRANCH : EX1;
6369 %}
6370 
6371 // Conditional branch
6372 pipe_class pipe_branch_cond(rFlagsReg cr)
6373 %{
6374   single_instruction;
6375   cr     : EX1(read);
6376   INS01  : ISS;
6377   BRANCH : EX1;
6378 %}
6379 
6380 // Compare & Branch
6381 // EG.  CBZ/CBNZ
6382 pipe_class pipe_cmp_branch(iRegI op1)
6383 %{
6384   single_instruction;
6385   op1    : EX1(read);
6386   INS01  : ISS;
6387   BRANCH : EX1;
6388 %}
6389 
6390 //------- Synchronisation operations ----------------------
6391 
6392 // Any operation requiring serialization.
6393 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6394 pipe_class pipe_serial()
6395 %{
6396   single_instruction;
6397   force_serialization;
6398   fixed_latency(16);
6399   INS01  : ISS(2); // Cannot dual issue with any other instruction
6400   LDST   : WR;
6401 %}
6402 
6403 // Generic big/slow expanded idiom - also serialized
6404 pipe_class pipe_slow()
6405 %{
6406   instruction_count(10);
6407   multiple_bundles;
6408   force_serialization;
6409   fixed_latency(16);
6410   INS01  : ISS(2); // Cannot dual issue with any other instruction
6411   LDST   : WR;
6412 %}
6413 
6414 // Empty pipeline class
6415 pipe_class pipe_class_empty()
6416 %{
6417   single_instruction;
6418   fixed_latency(0);
6419 %}
6420 
6421 // Default pipeline class.
6422 pipe_class pipe_class_default()
6423 %{
6424   single_instruction;
6425   fixed_latency(2);
6426 %}
6427 
6428 // Pipeline class for compares.
6429 pipe_class pipe_class_compare()
6430 %{
6431   single_instruction;
6432   fixed_latency(16);
6433 %}
6434 
6435 // Pipeline class for memory operations.
6436 pipe_class pipe_class_memory()
6437 %{
6438   single_instruction;
6439   fixed_latency(16);
6440 %}
6441 
6442 // Pipeline class for call.
6443 pipe_class pipe_class_call()
6444 %{
6445   single_instruction;
6446   fixed_latency(100);
6447 %}
6448 
6449 // Define the class for the Nop node.
6450 define %{
6451    MachNop = pipe_class_empty;
6452 %}
6453 
6454 %}
6455 //----------INSTRUCTIONS-------------------------------------------------------
6456 //
6457 // match      -- States which machine-independent subtree may be replaced
6458 //               by this instruction.
6459 // ins_cost   -- The estimated cost of this instruction is used by instruction
6460 //               selection to identify a minimum cost tree of machine
6461 //               instructions that matches a tree of machine-independent
6462 //               instructions.
6463 // format     -- A string providing the disassembly for this instruction.
6464 //               The value of an instruction's operand may be inserted
6465 //               by referring to it with a '$' prefix.
6466 // opcode     -- Three instruction opcodes may be provided.  These are referred
6467 //               to within an encode class as $primary, $secondary, and $tertiary
6468 //               rrspectively.  The primary opcode is commonly used to
6469 //               indicate the type of machine instruction, while secondary
6470 //               and tertiary are often used for prefix options or addressing
6471 //               modes.
6472 // ins_encode -- A list of encode classes with parameters. The encode class
6473 //               name must have been defined in an 'enc_class' specification
6474 //               in the encode section of the architecture description.
6475 
6476 // ============================================================================
6477 // Memory (Load/Store) Instructions
6478 
6479 // Load Instructions
6480 
6481 // Load Byte (8 bit signed)
6482 instruct loadB(iRegINoSp dst, memory mem)
6483 %{
6484   match(Set dst (LoadB mem));
6485   predicate(!needs_acquiring_load(n));
6486 
6487   ins_cost(4 * INSN_COST);
6488   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6489 
6490   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6491 
6492   ins_pipe(iload_reg_mem);
6493 %}
6494 
6495 // Load Byte (8 bit signed) into long
6496 instruct loadB2L(iRegLNoSp dst, memory mem)
6497 %{
6498   match(Set dst (ConvI2L (LoadB mem)));
6499   predicate(!needs_acquiring_load(n->in(1)));
6500 
6501   ins_cost(4 * INSN_COST);
6502   format %{ "ldrsb  $dst, $mem\t# byte" %}
6503 
6504   ins_encode(aarch64_enc_ldrsb(dst, mem));
6505 
6506   ins_pipe(iload_reg_mem);
6507 %}
6508 
6509 // Load Byte (8 bit unsigned)
6510 instruct loadUB(iRegINoSp dst, memory mem)
6511 %{
6512   match(Set dst (LoadUB mem));
6513   predicate(!needs_acquiring_load(n));
6514 
6515   ins_cost(4 * INSN_COST);
6516   format %{ "ldrbw  $dst, $mem\t# byte" %}
6517 
6518   ins_encode(aarch64_enc_ldrb(dst, mem));
6519 
6520   ins_pipe(iload_reg_mem);
6521 %}
6522 
6523 // Load Byte (8 bit unsigned) into long
6524 instruct loadUB2L(iRegLNoSp dst, memory mem)
6525 %{
6526   match(Set dst (ConvI2L (LoadUB mem)));
6527   predicate(!needs_acquiring_load(n->in(1)));
6528 
6529   ins_cost(4 * INSN_COST);
6530   format %{ "ldrb  $dst, $mem\t# byte" %}
6531 
6532   ins_encode(aarch64_enc_ldrb(dst, mem));
6533 
6534   ins_pipe(iload_reg_mem);
6535 %}
6536 
6537 // Load Short (16 bit signed)
6538 instruct loadS(iRegINoSp dst, memory mem)
6539 %{
6540   match(Set dst (LoadS mem));
6541   predicate(!needs_acquiring_load(n));
6542 
6543   ins_cost(4 * INSN_COST);
6544   format %{ "ldrshw  $dst, $mem\t# short" %}
6545 
6546   ins_encode(aarch64_enc_ldrshw(dst, mem));
6547 
6548   ins_pipe(iload_reg_mem);
6549 %}
6550 
6551 // Load Short (16 bit signed) into long
6552 instruct loadS2L(iRegLNoSp dst, memory mem)
6553 %{
6554   match(Set dst (ConvI2L (LoadS mem)));
6555   predicate(!needs_acquiring_load(n->in(1)));
6556 
6557   ins_cost(4 * INSN_COST);
6558   format %{ "ldrsh  $dst, $mem\t# short" %}
6559 
6560   ins_encode(aarch64_enc_ldrsh(dst, mem));
6561 
6562   ins_pipe(iload_reg_mem);
6563 %}
6564 
6565 // Load Char (16 bit unsigned)
6566 instruct loadUS(iRegINoSp dst, memory mem)
6567 %{
6568   match(Set dst (LoadUS mem));
6569   predicate(!needs_acquiring_load(n));
6570 
6571   ins_cost(4 * INSN_COST);
6572   format %{ "ldrh  $dst, $mem\t# short" %}
6573 
6574   ins_encode(aarch64_enc_ldrh(dst, mem));
6575 
6576   ins_pipe(iload_reg_mem);
6577 %}
6578 
6579 // Load Short/Char (16 bit unsigned) into long
6580 instruct loadUS2L(iRegLNoSp dst, memory mem)
6581 %{
6582   match(Set dst (ConvI2L (LoadUS mem)));
6583   predicate(!needs_acquiring_load(n->in(1)));
6584 
6585   ins_cost(4 * INSN_COST);
6586   format %{ "ldrh  $dst, $mem\t# short" %}
6587 
6588   ins_encode(aarch64_enc_ldrh(dst, mem));
6589 
6590   ins_pipe(iload_reg_mem);
6591 %}
6592 
6593 // Load Integer (32 bit signed)
6594 instruct loadI(iRegINoSp dst, memory mem)
6595 %{
6596   match(Set dst (LoadI mem));
6597   predicate(!needs_acquiring_load(n));
6598 
6599   ins_cost(4 * INSN_COST);
6600   format %{ "ldrw  $dst, $mem\t# int" %}
6601 
6602   ins_encode(aarch64_enc_ldrw(dst, mem));
6603 
6604   ins_pipe(iload_reg_mem);
6605 %}
6606 
6607 // Load Integer (32 bit signed) into long
6608 instruct loadI2L(iRegLNoSp dst, memory mem)
6609 %{
6610   match(Set dst (ConvI2L (LoadI mem)));
6611   predicate(!needs_acquiring_load(n->in(1)));
6612 
6613   ins_cost(4 * INSN_COST);
6614   format %{ "ldrsw  $dst, $mem\t# int" %}
6615 
6616   ins_encode(aarch64_enc_ldrsw(dst, mem));
6617 
6618   ins_pipe(iload_reg_mem);
6619 %}
6620 
6621 // Load Integer (32 bit unsigned) into long
6622 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6623 %{
6624   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6625   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6626 
6627   ins_cost(4 * INSN_COST);
6628   format %{ "ldrw  $dst, $mem\t# int" %}
6629 
6630   ins_encode(aarch64_enc_ldrw(dst, mem));
6631 
6632   ins_pipe(iload_reg_mem);
6633 %}
6634 
6635 // Load Long (64 bit signed)
6636 instruct loadL(iRegLNoSp dst, memory mem)
6637 %{
6638   match(Set dst (LoadL mem));
6639   predicate(!needs_acquiring_load(n));
6640 
6641   ins_cost(4 * INSN_COST);
6642   format %{ "ldr  $dst, $mem\t# int" %}
6643 
6644   ins_encode(aarch64_enc_ldr(dst, mem));
6645 
6646   ins_pipe(iload_reg_mem);
6647 %}
6648 
6649 // Load Range
6650 instruct loadRange(iRegINoSp dst, memory mem)
6651 %{
6652   match(Set dst (LoadRange mem));
6653 
6654   ins_cost(4 * INSN_COST);
6655   format %{ "ldrw  $dst, $mem\t# range" %}
6656 
6657   ins_encode(aarch64_enc_ldrw(dst, mem));
6658 
6659   ins_pipe(iload_reg_mem);
6660 %}
6661 
6662 // Load Pointer
6663 instruct loadP(iRegPNoSp dst, memory mem)
6664 %{
6665   match(Set dst (LoadP mem));
6666   predicate(!needs_acquiring_load(n));
6667 
6668   ins_cost(4 * INSN_COST);
6669   format %{ "ldr  $dst, $mem\t# ptr" %}
6670 
6671   ins_encode(aarch64_enc_ldr(dst, mem));
6672 
6673   ins_pipe(iload_reg_mem);
6674 %}
6675 
6676 // Load Compressed Pointer
6677 instruct loadN(iRegNNoSp dst, memory mem)
6678 %{
6679   match(Set dst (LoadN mem));
6680   predicate(!needs_acquiring_load(n));
6681 
6682   ins_cost(4 * INSN_COST);
6683   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6684 
6685   ins_encode(aarch64_enc_ldrw(dst, mem));
6686 
6687   ins_pipe(iload_reg_mem);
6688 %}
6689 
6690 // Load Klass Pointer
6691 instruct loadKlass(iRegPNoSp dst, memory mem)
6692 %{
6693   match(Set dst (LoadKlass mem));
6694   predicate(!needs_acquiring_load(n));
6695 
6696   ins_cost(4 * INSN_COST);
6697   format %{ "ldr  $dst, $mem\t# class" %}
6698 
6699   ins_encode(aarch64_enc_ldr(dst, mem));
6700 
6701   ins_pipe(iload_reg_mem);
6702 %}
6703 
6704 // Load Narrow Klass Pointer
6705 instruct loadNKlass(iRegNNoSp dst, memory mem)
6706 %{
6707   match(Set dst (LoadNKlass mem));
6708   predicate(!needs_acquiring_load(n));
6709 
6710   ins_cost(4 * INSN_COST);
6711   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6712 
6713   ins_encode(aarch64_enc_ldrw(dst, mem));
6714 
6715   ins_pipe(iload_reg_mem);
6716 %}
6717 
6718 // Load Float
6719 instruct loadF(vRegF dst, memory mem)
6720 %{
6721   match(Set dst (LoadF mem));
6722   predicate(!needs_acquiring_load(n));
6723 
6724   ins_cost(4 * INSN_COST);
6725   format %{ "ldrs  $dst, $mem\t# float" %}
6726 
6727   ins_encode( aarch64_enc_ldrs(dst, mem) );
6728 
6729   ins_pipe(pipe_class_memory);
6730 %}
6731 
6732 // Load Double
6733 instruct loadD(vRegD dst, memory mem)
6734 %{
6735   match(Set dst (LoadD mem));
6736   predicate(!needs_acquiring_load(n));
6737 
6738   ins_cost(4 * INSN_COST);
6739   format %{ "ldrd  $dst, $mem\t# double" %}
6740 
6741   ins_encode( aarch64_enc_ldrd(dst, mem) );
6742 
6743   ins_pipe(pipe_class_memory);
6744 %}
6745 
6746 
6747 // Load Int Constant
6748 instruct loadConI(iRegINoSp dst, immI src)
6749 %{
6750   match(Set dst src);
6751 
6752   ins_cost(INSN_COST);
6753   format %{ "mov $dst, $src\t# int" %}
6754 
6755   ins_encode( aarch64_enc_movw_imm(dst, src) );
6756 
6757   ins_pipe(ialu_imm);
6758 %}
6759 
6760 // Load Long Constant
6761 instruct loadConL(iRegLNoSp dst, immL src)
6762 %{
6763   match(Set dst src);
6764 
6765   ins_cost(INSN_COST);
6766   format %{ "mov $dst, $src\t# long" %}
6767 
6768   ins_encode( aarch64_enc_mov_imm(dst, src) );
6769 
6770   ins_pipe(ialu_imm);
6771 %}
6772 
6773 // Load Pointer Constant
6774 
6775 instruct loadConP(iRegPNoSp dst, immP con)
6776 %{
6777   match(Set dst con);
6778 
6779   ins_cost(INSN_COST * 4);
6780   format %{
6781     "mov  $dst, $con\t# ptr\n\t"
6782   %}
6783 
6784   ins_encode(aarch64_enc_mov_p(dst, con));
6785 
6786   ins_pipe(ialu_imm);
6787 %}
6788 
6789 // Load Null Pointer Constant
6790 
6791 instruct loadConP0(iRegPNoSp dst, immP0 con)
6792 %{
6793   match(Set dst con);
6794 
6795   ins_cost(INSN_COST);
6796   format %{ "mov  $dst, $con\t# NULL ptr" %}
6797 
6798   ins_encode(aarch64_enc_mov_p0(dst, con));
6799 
6800   ins_pipe(ialu_imm);
6801 %}
6802 
6803 // Load Pointer Constant One
6804 
6805 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6806 %{
6807   match(Set dst con);
6808 
6809   ins_cost(INSN_COST);
6810   format %{ "mov  $dst, $con\t# NULL ptr" %}
6811 
6812   ins_encode(aarch64_enc_mov_p1(dst, con));
6813 
6814   ins_pipe(ialu_imm);
6815 %}
6816 
6817 // Load Poll Page Constant
6818 
6819 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6820 %{
6821   match(Set dst con);
6822 
6823   ins_cost(INSN_COST);
6824   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6825 
6826   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6827 
6828   ins_pipe(ialu_imm);
6829 %}
6830 
6831 // Load Byte Map Base Constant
6832 
6833 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6834 %{
6835   match(Set dst con);
6836 
6837   ins_cost(INSN_COST);
6838   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6839 
6840   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6841 
6842   ins_pipe(ialu_imm);
6843 %}
6844 
6845 // Load Narrow Pointer Constant
6846 
6847 instruct loadConN(iRegNNoSp dst, immN con)
6848 %{
6849   match(Set dst con);
6850 
6851   ins_cost(INSN_COST * 4);
6852   format %{ "mov  $dst, $con\t# compressed ptr" %}
6853 
6854   ins_encode(aarch64_enc_mov_n(dst, con));
6855 
6856   ins_pipe(ialu_imm);
6857 %}
6858 
6859 // Load Narrow Null Pointer Constant
6860 
6861 instruct loadConN0(iRegNNoSp dst, immN0 con)
6862 %{
6863   match(Set dst con);
6864 
6865   ins_cost(INSN_COST);
6866   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6867 
6868   ins_encode(aarch64_enc_mov_n0(dst, con));
6869 
6870   ins_pipe(ialu_imm);
6871 %}
6872 
6873 // Load Narrow Klass Constant
6874 
6875 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6876 %{
6877   match(Set dst con);
6878 
6879   ins_cost(INSN_COST);
6880   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6881 
6882   ins_encode(aarch64_enc_mov_nk(dst, con));
6883 
6884   ins_pipe(ialu_imm);
6885 %}
6886 
6887 // Load Packed Float Constant
6888 
6889 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6890   match(Set dst con);
6891   ins_cost(INSN_COST * 4);
6892   format %{ "fmovs  $dst, $con"%}
6893   ins_encode %{
6894     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6895   %}
6896 
6897   ins_pipe(fp_imm_s);
6898 %}
6899 
6900 // Load Float Constant
6901 
6902 instruct loadConF(vRegF dst, immF con) %{
6903   match(Set dst con);
6904 
6905   ins_cost(INSN_COST * 4);
6906 
6907   format %{
6908     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6909   %}
6910 
6911   ins_encode %{
6912     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6913   %}
6914 
6915   ins_pipe(fp_load_constant_s);
6916 %}
6917 
6918 // Load Packed Double Constant
6919 
6920 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6921   match(Set dst con);
6922   ins_cost(INSN_COST);
6923   format %{ "fmovd  $dst, $con"%}
6924   ins_encode %{
6925     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6926   %}
6927 
6928   ins_pipe(fp_imm_d);
6929 %}
6930 
6931 // Load Double Constant
6932 
6933 instruct loadConD(vRegD dst, immD con) %{
6934   match(Set dst con);
6935 
6936   ins_cost(INSN_COST * 5);
6937   format %{
6938     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6939   %}
6940 
6941   ins_encode %{
6942     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6943   %}
6944 
6945   ins_pipe(fp_load_constant_d);
6946 %}
6947 
6948 // Store Instructions
6949 
6950 // Store CMS card-mark Immediate
6951 instruct storeimmCM0(immI0 zero, memory mem)
6952 %{
6953   match(Set mem (StoreCM mem zero));
6954   predicate(unnecessary_storestore(n));
6955 
6956   ins_cost(INSN_COST);
6957   format %{ "storestore (elided)\n\t"
6958             "strb zr, $mem\t# byte" %}
6959 
6960   ins_encode(aarch64_enc_strb0(mem));
6961 
6962   ins_pipe(istore_mem);
6963 %}
6964 
6965 // Store CMS card-mark Immediate with intervening StoreStore
6966 // needed when using CMS with no conditional card marking
6967 instruct storeimmCM0_ordered(immI0 zero, memory mem)
6968 %{
6969   match(Set mem (StoreCM mem zero));
6970 
6971   ins_cost(INSN_COST * 2);
6972   format %{ "storestore\n\t"
6973             "dmb ishst"
6974             "\n\tstrb zr, $mem\t# byte" %}
6975 
6976   ins_encode(aarch64_enc_strb0_ordered(mem));
6977 
6978   ins_pipe(istore_mem);
6979 %}
6980 
6981 // Store Byte
6982 instruct storeB(iRegIorL2I src, memory mem)
6983 %{
6984   match(Set mem (StoreB mem src));
6985   predicate(!needs_releasing_store(n));
6986 
6987   ins_cost(INSN_COST);
6988   format %{ "strb  $src, $mem\t# byte" %}
6989 
6990   ins_encode(aarch64_enc_strb(src, mem));
6991 
6992   ins_pipe(istore_reg_mem);
6993 %}
6994 
6995 
6996 instruct storeimmB0(immI0 zero, memory mem)
6997 %{
6998   match(Set mem (StoreB mem zero));
6999   predicate(!needs_releasing_store(n));
7000 
7001   ins_cost(INSN_COST);
7002   format %{ "strb rscractch2, $mem\t# byte" %}
7003 
7004   ins_encode(aarch64_enc_strb0(mem));
7005 
7006   ins_pipe(istore_mem);
7007 %}
7008 
7009 // Store Char/Short
7010 instruct storeC(iRegIorL2I src, memory mem)
7011 %{
7012   match(Set mem (StoreC mem src));
7013   predicate(!needs_releasing_store(n));
7014 
7015   ins_cost(INSN_COST);
7016   format %{ "strh  $src, $mem\t# short" %}
7017 
7018   ins_encode(aarch64_enc_strh(src, mem));
7019 
7020   ins_pipe(istore_reg_mem);
7021 %}
7022 
7023 instruct storeimmC0(immI0 zero, memory mem)
7024 %{
7025   match(Set mem (StoreC mem zero));
7026   predicate(!needs_releasing_store(n));
7027 
7028   ins_cost(INSN_COST);
7029   format %{ "strh  zr, $mem\t# short" %}
7030 
7031   ins_encode(aarch64_enc_strh0(mem));
7032 
7033   ins_pipe(istore_mem);
7034 %}
7035 
7036 // Store Integer
7037 
7038 instruct storeI(iRegIorL2I src, memory mem)
7039 %{
7040   match(Set mem(StoreI mem src));
7041   predicate(!needs_releasing_store(n));
7042 
7043   ins_cost(INSN_COST);
7044   format %{ "strw  $src, $mem\t# int" %}
7045 
7046   ins_encode(aarch64_enc_strw(src, mem));
7047 
7048   ins_pipe(istore_reg_mem);
7049 %}
7050 
7051 instruct storeimmI0(immI0 zero, memory mem)
7052 %{
7053   match(Set mem(StoreI mem zero));
7054   predicate(!needs_releasing_store(n));
7055 
7056   ins_cost(INSN_COST);
7057   format %{ "strw  zr, $mem\t# int" %}
7058 
7059   ins_encode(aarch64_enc_strw0(mem));
7060 
7061   ins_pipe(istore_mem);
7062 %}
7063 
7064 // Store Long (64 bit signed)
7065 instruct storeL(iRegL src, memory mem)
7066 %{
7067   match(Set mem (StoreL mem src));
7068   predicate(!needs_releasing_store(n));
7069 
7070   ins_cost(INSN_COST);
7071   format %{ "str  $src, $mem\t# int" %}
7072 
7073   ins_encode(aarch64_enc_str(src, mem));
7074 
7075   ins_pipe(istore_reg_mem);
7076 %}
7077 
7078 // Store Long (64 bit signed)
7079 instruct storeimmL0(immL0 zero, memory mem)
7080 %{
7081   match(Set mem (StoreL mem zero));
7082   predicate(!needs_releasing_store(n));
7083 
7084   ins_cost(INSN_COST);
7085   format %{ "str  zr, $mem\t# int" %}
7086 
7087   ins_encode(aarch64_enc_str0(mem));
7088 
7089   ins_pipe(istore_mem);
7090 %}
7091 
7092 // Store Pointer
7093 instruct storeP(iRegP src, memory mem)
7094 %{
7095   match(Set mem (StoreP mem src));
7096   predicate(!needs_releasing_store(n));
7097 
7098   ins_cost(INSN_COST);
7099   format %{ "str  $src, $mem\t# ptr" %}
7100 
7101   ins_encode(aarch64_enc_str(src, mem));
7102 
7103   ins_pipe(istore_reg_mem);
7104 %}
7105 
7106 // Store Pointer
7107 instruct storeimmP0(immP0 zero, memory mem)
7108 %{
7109   match(Set mem (StoreP mem zero));
7110   predicate(!needs_releasing_store(n));
7111 
7112   ins_cost(INSN_COST);
7113   format %{ "str zr, $mem\t# ptr" %}
7114 
7115   ins_encode(aarch64_enc_str0(mem));
7116 
7117   ins_pipe(istore_mem);
7118 %}
7119 
7120 // Store Compressed Pointer
7121 instruct storeN(iRegN src, memory mem)
7122 %{
7123   match(Set mem (StoreN mem src));
7124   predicate(!needs_releasing_store(n));
7125 
7126   ins_cost(INSN_COST);
7127   format %{ "strw  $src, $mem\t# compressed ptr" %}
7128 
7129   ins_encode(aarch64_enc_strw(src, mem));
7130 
7131   ins_pipe(istore_reg_mem);
7132 %}
7133 
7134 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7135 %{
7136   match(Set mem (StoreN mem zero));
7137   predicate(Universe::narrow_oop_base() == NULL &&
7138             Universe::narrow_klass_base() == NULL &&
7139             (!needs_releasing_store(n)));
7140 
7141   ins_cost(INSN_COST);
7142   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7143 
7144   ins_encode(aarch64_enc_strw(heapbase, mem));
7145 
7146   ins_pipe(istore_reg_mem);
7147 %}
7148 
7149 // Store Float
7150 instruct storeF(vRegF src, memory mem)
7151 %{
7152   match(Set mem (StoreF mem src));
7153   predicate(!needs_releasing_store(n));
7154 
7155   ins_cost(INSN_COST);
7156   format %{ "strs  $src, $mem\t# float" %}
7157 
7158   ins_encode( aarch64_enc_strs(src, mem) );
7159 
7160   ins_pipe(pipe_class_memory);
7161 %}
7162 
7163 // TODO
7164 // implement storeImmF0 and storeFImmPacked
7165 
7166 // Store Double
7167 instruct storeD(vRegD src, memory mem)
7168 %{
7169   match(Set mem (StoreD mem src));
7170   predicate(!needs_releasing_store(n));
7171 
7172   ins_cost(INSN_COST);
7173   format %{ "strd  $src, $mem\t# double" %}
7174 
7175   ins_encode( aarch64_enc_strd(src, mem) );
7176 
7177   ins_pipe(pipe_class_memory);
7178 %}
7179 
7180 // Store Compressed Klass Pointer
7181 instruct storeNKlass(iRegN src, memory mem)
7182 %{
7183   predicate(!needs_releasing_store(n));
7184   match(Set mem (StoreNKlass mem src));
7185 
7186   ins_cost(INSN_COST);
7187   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7188 
7189   ins_encode(aarch64_enc_strw(src, mem));
7190 
7191   ins_pipe(istore_reg_mem);
7192 %}
7193 
7194 // TODO
7195 // implement storeImmD0 and storeDImmPacked
7196 
7197 // prefetch instructions
7198 // Must be safe to execute with invalid address (cannot fault).
7199 
7200 instruct prefetchalloc( memory mem ) %{
7201   match(PrefetchAllocation mem);
7202 
7203   ins_cost(INSN_COST);
7204   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7205 
7206   ins_encode( aarch64_enc_prefetchw(mem) );
7207 
7208   ins_pipe(iload_prefetch);
7209 %}
7210 
7211 //  ---------------- volatile loads and stores ----------------
7212 
7213 // Load Byte (8 bit signed)
7214 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7215 %{
7216   match(Set dst (LoadB mem));
7217 
7218   ins_cost(VOLATILE_REF_COST);
7219   format %{ "ldarsb  $dst, $mem\t# byte" %}
7220 
7221   ins_encode(aarch64_enc_ldarsb(dst, mem));
7222 
7223   ins_pipe(pipe_serial);
7224 %}
7225 
7226 // Load Byte (8 bit signed) into long
7227 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7228 %{
7229   match(Set dst (ConvI2L (LoadB mem)));
7230 
7231   ins_cost(VOLATILE_REF_COST);
7232   format %{ "ldarsb  $dst, $mem\t# byte" %}
7233 
7234   ins_encode(aarch64_enc_ldarsb(dst, mem));
7235 
7236   ins_pipe(pipe_serial);
7237 %}
7238 
7239 // Load Byte (8 bit unsigned)
7240 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7241 %{
7242   match(Set dst (LoadUB mem));
7243 
7244   ins_cost(VOLATILE_REF_COST);
7245   format %{ "ldarb  $dst, $mem\t# byte" %}
7246 
7247   ins_encode(aarch64_enc_ldarb(dst, mem));
7248 
7249   ins_pipe(pipe_serial);
7250 %}
7251 
7252 // Load Byte (8 bit unsigned) into long
7253 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7254 %{
7255   match(Set dst (ConvI2L (LoadUB mem)));
7256 
7257   ins_cost(VOLATILE_REF_COST);
7258   format %{ "ldarb  $dst, $mem\t# byte" %}
7259 
7260   ins_encode(aarch64_enc_ldarb(dst, mem));
7261 
7262   ins_pipe(pipe_serial);
7263 %}
7264 
7265 // Load Short (16 bit signed)
7266 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7267 %{
7268   match(Set dst (LoadS mem));
7269 
7270   ins_cost(VOLATILE_REF_COST);
7271   format %{ "ldarshw  $dst, $mem\t# short" %}
7272 
7273   ins_encode(aarch64_enc_ldarshw(dst, mem));
7274 
7275   ins_pipe(pipe_serial);
7276 %}
7277 
7278 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7279 %{
7280   match(Set dst (LoadUS mem));
7281 
7282   ins_cost(VOLATILE_REF_COST);
7283   format %{ "ldarhw  $dst, $mem\t# short" %}
7284 
7285   ins_encode(aarch64_enc_ldarhw(dst, mem));
7286 
7287   ins_pipe(pipe_serial);
7288 %}
7289 
7290 // Load Short/Char (16 bit unsigned) into long
7291 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7292 %{
7293   match(Set dst (ConvI2L (LoadUS mem)));
7294 
7295   ins_cost(VOLATILE_REF_COST);
7296   format %{ "ldarh  $dst, $mem\t# short" %}
7297 
7298   ins_encode(aarch64_enc_ldarh(dst, mem));
7299 
7300   ins_pipe(pipe_serial);
7301 %}
7302 
7303 // Load Short/Char (16 bit signed) into long
7304 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7305 %{
7306   match(Set dst (ConvI2L (LoadS mem)));
7307 
7308   ins_cost(VOLATILE_REF_COST);
7309   format %{ "ldarh  $dst, $mem\t# short" %}
7310 
7311   ins_encode(aarch64_enc_ldarsh(dst, mem));
7312 
7313   ins_pipe(pipe_serial);
7314 %}
7315 
7316 // Load Integer (32 bit signed)
7317 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7318 %{
7319   match(Set dst (LoadI mem));
7320 
7321   ins_cost(VOLATILE_REF_COST);
7322   format %{ "ldarw  $dst, $mem\t# int" %}
7323 
7324   ins_encode(aarch64_enc_ldarw(dst, mem));
7325 
7326   ins_pipe(pipe_serial);
7327 %}
7328 
7329 // Load Integer (32 bit unsigned) into long
7330 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7331 %{
7332   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7333 
7334   ins_cost(VOLATILE_REF_COST);
7335   format %{ "ldarw  $dst, $mem\t# int" %}
7336 
7337   ins_encode(aarch64_enc_ldarw(dst, mem));
7338 
7339   ins_pipe(pipe_serial);
7340 %}
7341 
7342 // Load Long (64 bit signed)
7343 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7344 %{
7345   match(Set dst (LoadL mem));
7346 
7347   ins_cost(VOLATILE_REF_COST);
7348   format %{ "ldar  $dst, $mem\t# int" %}
7349 
7350   ins_encode(aarch64_enc_ldar(dst, mem));
7351 
7352   ins_pipe(pipe_serial);
7353 %}
7354 
7355 // Load Pointer
7356 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7357 %{
7358   match(Set dst (LoadP mem));
7359 
7360   ins_cost(VOLATILE_REF_COST);
7361   format %{ "ldar  $dst, $mem\t# ptr" %}
7362 
7363   ins_encode(aarch64_enc_ldar(dst, mem));
7364 
7365   ins_pipe(pipe_serial);
7366 %}
7367 
7368 // Load Compressed Pointer
7369 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7370 %{
7371   match(Set dst (LoadN mem));
7372 
7373   ins_cost(VOLATILE_REF_COST);
7374   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7375 
7376   ins_encode(aarch64_enc_ldarw(dst, mem));
7377 
7378   ins_pipe(pipe_serial);
7379 %}
7380 
7381 // Load Float
7382 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7383 %{
7384   match(Set dst (LoadF mem));
7385 
7386   ins_cost(VOLATILE_REF_COST);
7387   format %{ "ldars  $dst, $mem\t# float" %}
7388 
7389   ins_encode( aarch64_enc_fldars(dst, mem) );
7390 
7391   ins_pipe(pipe_serial);
7392 %}
7393 
7394 // Load Double
7395 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7396 %{
7397   match(Set dst (LoadD mem));
7398 
7399   ins_cost(VOLATILE_REF_COST);
7400   format %{ "ldard  $dst, $mem\t# double" %}
7401 
7402   ins_encode( aarch64_enc_fldard(dst, mem) );
7403 
7404   ins_pipe(pipe_serial);
7405 %}
7406 
7407 // Store Byte
7408 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7409 %{
7410   match(Set mem (StoreB mem src));
7411 
7412   ins_cost(VOLATILE_REF_COST);
7413   format %{ "stlrb  $src, $mem\t# byte" %}
7414 
7415   ins_encode(aarch64_enc_stlrb(src, mem));
7416 
7417   ins_pipe(pipe_class_memory);
7418 %}
7419 
7420 // Store Char/Short
7421 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7422 %{
7423   match(Set mem (StoreC mem src));
7424 
7425   ins_cost(VOLATILE_REF_COST);
7426   format %{ "stlrh  $src, $mem\t# short" %}
7427 
7428   ins_encode(aarch64_enc_stlrh(src, mem));
7429 
7430   ins_pipe(pipe_class_memory);
7431 %}
7432 
7433 // Store Integer
7434 
7435 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7436 %{
7437   match(Set mem(StoreI mem src));
7438 
7439   ins_cost(VOLATILE_REF_COST);
7440   format %{ "stlrw  $src, $mem\t# int" %}
7441 
7442   ins_encode(aarch64_enc_stlrw(src, mem));
7443 
7444   ins_pipe(pipe_class_memory);
7445 %}
7446 
7447 // Store Long (64 bit signed)
7448 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7449 %{
7450   match(Set mem (StoreL mem src));
7451 
7452   ins_cost(VOLATILE_REF_COST);
7453   format %{ "stlr  $src, $mem\t# int" %}
7454 
7455   ins_encode(aarch64_enc_stlr(src, mem));
7456 
7457   ins_pipe(pipe_class_memory);
7458 %}
7459 
7460 // Store Pointer
7461 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7462 %{
7463   match(Set mem (StoreP mem src));
7464 
7465   ins_cost(VOLATILE_REF_COST);
7466   format %{ "stlr  $src, $mem\t# ptr" %}
7467 
7468   ins_encode(aarch64_enc_stlr(src, mem));
7469 
7470   ins_pipe(pipe_class_memory);
7471 %}
7472 
7473 // Store Compressed Pointer
7474 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7475 %{
7476   match(Set mem (StoreN mem src));
7477 
7478   ins_cost(VOLATILE_REF_COST);
7479   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7480 
7481   ins_encode(aarch64_enc_stlrw(src, mem));
7482 
7483   ins_pipe(pipe_class_memory);
7484 %}
7485 
7486 // Store Float
7487 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7488 %{
7489   match(Set mem (StoreF mem src));
7490 
7491   ins_cost(VOLATILE_REF_COST);
7492   format %{ "stlrs  $src, $mem\t# float" %}
7493 
7494   ins_encode( aarch64_enc_fstlrs(src, mem) );
7495 
7496   ins_pipe(pipe_class_memory);
7497 %}
7498 
7499 // TODO
7500 // implement storeImmF0 and storeFImmPacked
7501 
7502 // Store Double
7503 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7504 %{
7505   match(Set mem (StoreD mem src));
7506 
7507   ins_cost(VOLATILE_REF_COST);
7508   format %{ "stlrd  $src, $mem\t# double" %}
7509 
7510   ins_encode( aarch64_enc_fstlrd(src, mem) );
7511 
7512   ins_pipe(pipe_class_memory);
7513 %}
7514 
7515 //  ---------------- end of volatile loads and stores ----------------
7516 
7517 // ============================================================================
7518 // BSWAP Instructions
7519 
7520 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7521   match(Set dst (ReverseBytesI src));
7522 
7523   ins_cost(INSN_COST);
7524   format %{ "revw  $dst, $src" %}
7525 
7526   ins_encode %{
7527     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7528   %}
7529 
7530   ins_pipe(ialu_reg);
7531 %}
7532 
7533 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7534   match(Set dst (ReverseBytesL src));
7535 
7536   ins_cost(INSN_COST);
7537   format %{ "rev  $dst, $src" %}
7538 
7539   ins_encode %{
7540     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7541   %}
7542 
7543   ins_pipe(ialu_reg);
7544 %}
7545 
7546 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7547   match(Set dst (ReverseBytesUS src));
7548 
7549   ins_cost(INSN_COST);
7550   format %{ "rev16w  $dst, $src" %}
7551 
7552   ins_encode %{
7553     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7554   %}
7555 
7556   ins_pipe(ialu_reg);
7557 %}
7558 
7559 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7560   match(Set dst (ReverseBytesS src));
7561 
7562   ins_cost(INSN_COST);
7563   format %{ "rev16w  $dst, $src\n\t"
7564             "sbfmw $dst, $dst, #0, #15" %}
7565 
7566   ins_encode %{
7567     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7568     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7569   %}
7570 
7571   ins_pipe(ialu_reg);
7572 %}
7573 
7574 // ============================================================================
7575 // Zero Count Instructions
7576 
7577 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7578   match(Set dst (CountLeadingZerosI src));
7579 
7580   ins_cost(INSN_COST);
7581   format %{ "clzw  $dst, $src" %}
7582   ins_encode %{
7583     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7584   %}
7585 
7586   ins_pipe(ialu_reg);
7587 %}
7588 
7589 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7590   match(Set dst (CountLeadingZerosL src));
7591 
7592   ins_cost(INSN_COST);
7593   format %{ "clz   $dst, $src" %}
7594   ins_encode %{
7595     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7596   %}
7597 
7598   ins_pipe(ialu_reg);
7599 %}
7600 
7601 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7602   match(Set dst (CountTrailingZerosI src));
7603 
7604   ins_cost(INSN_COST * 2);
7605   format %{ "rbitw  $dst, $src\n\t"
7606             "clzw   $dst, $dst" %}
7607   ins_encode %{
7608     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7609     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7610   %}
7611 
7612   ins_pipe(ialu_reg);
7613 %}
7614 
7615 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7616   match(Set dst (CountTrailingZerosL src));
7617 
7618   ins_cost(INSN_COST * 2);
7619   format %{ "rbit   $dst, $src\n\t"
7620             "clz    $dst, $dst" %}
7621   ins_encode %{
7622     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7623     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7624   %}
7625 
7626   ins_pipe(ialu_reg);
7627 %}
7628 
7629 //---------- Population Count Instructions -------------------------------------
7630 //
7631 
7632 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7633   predicate(UsePopCountInstruction);
7634   match(Set dst (PopCountI src));
7635   effect(TEMP tmp);
7636   ins_cost(INSN_COST * 13);
7637 
7638   format %{ "movw   $src, $src\n\t"
7639             "mov    $tmp, $src\t# vector (1D)\n\t"
7640             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7641             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7642             "mov    $dst, $tmp\t# vector (1D)" %}
7643   ins_encode %{
7644     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7645     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7646     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7647     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7648     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7649   %}
7650 
7651   ins_pipe(pipe_class_default);
7652 %}
7653 
7654 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7655   predicate(UsePopCountInstruction);
7656   match(Set dst (PopCountI (LoadI mem)));
7657   effect(TEMP tmp);
7658   ins_cost(INSN_COST * 13);
7659 
7660   format %{ "ldrs   $tmp, $mem\n\t"
7661             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7662             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7663             "mov    $dst, $tmp\t# vector (1D)" %}
7664   ins_encode %{
7665     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7666     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7667                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7668     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7669     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7670     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7671   %}
7672 
7673   ins_pipe(pipe_class_default);
7674 %}
7675 
7676 // Note: Long.bitCount(long) returns an int.
7677 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7678   predicate(UsePopCountInstruction);
7679   match(Set dst (PopCountL src));
7680   effect(TEMP tmp);
7681   ins_cost(INSN_COST * 13);
7682 
7683   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7684             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7685             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7686             "mov    $dst, $tmp\t# vector (1D)" %}
7687   ins_encode %{
7688     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7689     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7690     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7691     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7692   %}
7693 
7694   ins_pipe(pipe_class_default);
7695 %}
7696 
7697 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7698   predicate(UsePopCountInstruction);
7699   match(Set dst (PopCountL (LoadL mem)));
7700   effect(TEMP tmp);
7701   ins_cost(INSN_COST * 13);
7702 
7703   format %{ "ldrd   $tmp, $mem\n\t"
7704             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7705             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7706             "mov    $dst, $tmp\t# vector (1D)" %}
7707   ins_encode %{
7708     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7709     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7710                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7711     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7712     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7713     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7714   %}
7715 
7716   ins_pipe(pipe_class_default);
7717 %}
7718 
7719 // ============================================================================
7720 // MemBar Instruction
7721 
7722 instruct load_fence() %{
7723   match(LoadFence);
7724   ins_cost(VOLATILE_REF_COST);
7725 
7726   format %{ "load_fence" %}
7727 
7728   ins_encode %{
7729     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7730   %}
7731   ins_pipe(pipe_serial);
7732 %}
7733 
7734 instruct unnecessary_membar_acquire() %{
7735   predicate(unnecessary_acquire(n));
7736   match(MemBarAcquire);
7737   ins_cost(0);
7738 
7739   format %{ "membar_acquire (elided)" %}
7740 
7741   ins_encode %{
7742     __ block_comment("membar_acquire (elided)");
7743   %}
7744 
7745   ins_pipe(pipe_class_empty);
7746 %}
7747 
7748 instruct membar_acquire() %{
7749   match(MemBarAcquire);
7750   ins_cost(VOLATILE_REF_COST);
7751 
7752   format %{ "membar_acquire\n\t"
7753             "dmb ish" %}
7754 
7755   ins_encode %{
7756     __ block_comment("membar_acquire");
7757     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7758   %}
7759 
7760   ins_pipe(pipe_serial);
7761 %}
7762 
7763 
7764 instruct membar_acquire_lock() %{
7765   match(MemBarAcquireLock);
7766   ins_cost(VOLATILE_REF_COST);
7767 
7768   format %{ "membar_acquire_lock (elided)" %}
7769 
7770   ins_encode %{
7771     __ block_comment("membar_acquire_lock (elided)");
7772   %}
7773 
7774   ins_pipe(pipe_serial);
7775 %}
7776 
7777 instruct store_fence() %{
7778   match(StoreFence);
7779   ins_cost(VOLATILE_REF_COST);
7780 
7781   format %{ "store_fence" %}
7782 
7783   ins_encode %{
7784     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7785   %}
7786   ins_pipe(pipe_serial);
7787 %}
7788 
7789 instruct unnecessary_membar_release() %{
7790   predicate(unnecessary_release(n));
7791   match(MemBarRelease);
7792   ins_cost(0);
7793 
7794   format %{ "membar_release (elided)" %}
7795 
7796   ins_encode %{
7797     __ block_comment("membar_release (elided)");
7798   %}
7799   ins_pipe(pipe_serial);
7800 %}
7801 
7802 instruct membar_release() %{
7803   match(MemBarRelease);
7804   ins_cost(VOLATILE_REF_COST);
7805 
7806   format %{ "membar_release\n\t"
7807             "dmb ish" %}
7808 
7809   ins_encode %{
7810     __ block_comment("membar_release");
7811     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7812   %}
7813   ins_pipe(pipe_serial);
7814 %}
7815 
7816 instruct membar_storestore() %{
7817   match(MemBarStoreStore);
7818   ins_cost(VOLATILE_REF_COST);
7819 
7820   format %{ "MEMBAR-store-store" %}
7821 
7822   ins_encode %{
7823     __ membar(Assembler::StoreStore);
7824   %}
7825   ins_pipe(pipe_serial);
7826 %}
7827 
7828 instruct membar_release_lock() %{
7829   match(MemBarReleaseLock);
7830   ins_cost(VOLATILE_REF_COST);
7831 
7832   format %{ "membar_release_lock (elided)" %}
7833 
7834   ins_encode %{
7835     __ block_comment("membar_release_lock (elided)");
7836   %}
7837 
7838   ins_pipe(pipe_serial);
7839 %}
7840 
7841 instruct unnecessary_membar_volatile() %{
7842   predicate(unnecessary_volatile(n));
7843   match(MemBarVolatile);
7844   ins_cost(0);
7845 
7846   format %{ "membar_volatile (elided)" %}
7847 
7848   ins_encode %{
7849     __ block_comment("membar_volatile (elided)");
7850   %}
7851 
7852   ins_pipe(pipe_serial);
7853 %}
7854 
7855 instruct membar_volatile() %{
7856   match(MemBarVolatile);
7857   ins_cost(VOLATILE_REF_COST*100);
7858 
7859   format %{ "membar_volatile\n\t"
7860              "dmb ish"%}
7861 
7862   ins_encode %{
7863     __ block_comment("membar_volatile");
7864     __ membar(Assembler::StoreLoad);
7865   %}
7866 
7867   ins_pipe(pipe_serial);
7868 %}
7869 
7870 // ============================================================================
7871 // Cast/Convert Instructions
7872 
7873 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7874   match(Set dst (CastX2P src));
7875 
7876   ins_cost(INSN_COST);
7877   format %{ "mov $dst, $src\t# long -> ptr" %}
7878 
7879   ins_encode %{
7880     if ($dst$$reg != $src$$reg) {
7881       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7882     }
7883   %}
7884 
7885   ins_pipe(ialu_reg);
7886 %}
7887 
7888 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7889   match(Set dst (CastP2X src));
7890 
7891   ins_cost(INSN_COST);
7892   format %{ "mov $dst, $src\t# ptr -> long" %}
7893 
7894   ins_encode %{
7895     if ($dst$$reg != $src$$reg) {
7896       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7897     }
7898   %}
7899 
7900   ins_pipe(ialu_reg);
7901 %}
7902 
7903 // Convert oop into int for vectors alignment masking
7904 instruct convP2I(iRegINoSp dst, iRegP src) %{
7905   match(Set dst (ConvL2I (CastP2X src)));
7906 
7907   ins_cost(INSN_COST);
7908   format %{ "movw $dst, $src\t# ptr -> int" %}
7909   ins_encode %{
7910     __ movw($dst$$Register, $src$$Register);
7911   %}
7912 
7913   ins_pipe(ialu_reg);
7914 %}
7915 
7916 // Convert compressed oop into int for vectors alignment masking
7917 // in case of 32bit oops (heap < 4Gb).
7918 instruct convN2I(iRegINoSp dst, iRegN src)
7919 %{
7920   predicate(Universe::narrow_oop_shift() == 0);
7921   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7922 
7923   ins_cost(INSN_COST);
7924   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7925   ins_encode %{
7926     __ movw($dst$$Register, $src$$Register);
7927   %}
7928 
7929   ins_pipe(ialu_reg);
7930 %}
7931 
7932 
7933 // Convert oop pointer into compressed form
7934 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7935   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7936   match(Set dst (EncodeP src));
7937   effect(KILL cr);
7938   ins_cost(INSN_COST * 3);
7939   format %{ "encode_heap_oop $dst, $src" %}
7940   ins_encode %{
7941     Register s = $src$$Register;
7942     Register d = $dst$$Register;
7943     __ encode_heap_oop(d, s);
7944   %}
7945   ins_pipe(ialu_reg);
7946 %}
7947 
7948 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7949   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7950   match(Set dst (EncodeP src));
7951   ins_cost(INSN_COST * 3);
7952   format %{ "encode_heap_oop_not_null $dst, $src" %}
7953   ins_encode %{
7954     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7955   %}
7956   ins_pipe(ialu_reg);
7957 %}
7958 
7959 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7960   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7961             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7962   match(Set dst (DecodeN src));
7963   ins_cost(INSN_COST * 3);
7964   format %{ "decode_heap_oop $dst, $src" %}
7965   ins_encode %{
7966     Register s = $src$$Register;
7967     Register d = $dst$$Register;
7968     __ decode_heap_oop(d, s);
7969   %}
7970   ins_pipe(ialu_reg);
7971 %}
7972 
7973 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7974   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7975             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7976   match(Set dst (DecodeN src));
7977   ins_cost(INSN_COST * 3);
7978   format %{ "decode_heap_oop_not_null $dst, $src" %}
7979   ins_encode %{
7980     Register s = $src$$Register;
7981     Register d = $dst$$Register;
7982     __ decode_heap_oop_not_null(d, s);
7983   %}
7984   ins_pipe(ialu_reg);
7985 %}
7986 
7987 // n.b. AArch64 implementations of encode_klass_not_null and
7988 // decode_klass_not_null do not modify the flags register so, unlike
7989 // Intel, we don't kill CR as a side effect here
7990 
7991 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7992   match(Set dst (EncodePKlass src));
7993 
7994   ins_cost(INSN_COST * 3);
7995   format %{ "encode_klass_not_null $dst,$src" %}
7996 
7997   ins_encode %{
7998     Register src_reg = as_Register($src$$reg);
7999     Register dst_reg = as_Register($dst$$reg);
8000     __ encode_klass_not_null(dst_reg, src_reg);
8001   %}
8002 
8003    ins_pipe(ialu_reg);
8004 %}
8005 
8006 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8007   match(Set dst (DecodeNKlass src));
8008 
8009   ins_cost(INSN_COST * 3);
8010   format %{ "decode_klass_not_null $dst,$src" %}
8011 
8012   ins_encode %{
8013     Register src_reg = as_Register($src$$reg);
8014     Register dst_reg = as_Register($dst$$reg);
8015     if (dst_reg != src_reg) {
8016       __ decode_klass_not_null(dst_reg, src_reg);
8017     } else {
8018       __ decode_klass_not_null(dst_reg);
8019     }
8020   %}
8021 
8022    ins_pipe(ialu_reg);
8023 %}
8024 
8025 instruct checkCastPP(iRegPNoSp dst)
8026 %{
8027   match(Set dst (CheckCastPP dst));
8028 
8029   size(0);
8030   format %{ "# checkcastPP of $dst" %}
8031   ins_encode(/* empty encoding */);
8032   ins_pipe(pipe_class_empty);
8033 %}
8034 
8035 instruct castPP(iRegPNoSp dst)
8036 %{
8037   match(Set dst (CastPP dst));
8038 
8039   size(0);
8040   format %{ "# castPP of $dst" %}
8041   ins_encode(/* empty encoding */);
8042   ins_pipe(pipe_class_empty);
8043 %}
8044 
8045 instruct castII(iRegI dst)
8046 %{
8047   match(Set dst (CastII dst));
8048 
8049   size(0);
8050   format %{ "# castII of $dst" %}
8051   ins_encode(/* empty encoding */);
8052   ins_cost(0);
8053   ins_pipe(pipe_class_empty);
8054 %}
8055 
8056 // ============================================================================
8057 // Atomic operation instructions
8058 //
8059 // Intel and SPARC both implement Ideal Node LoadPLocked and
8060 // Store{PIL}Conditional instructions using a normal load for the
8061 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8062 //
8063 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8064 // pair to lock object allocations from Eden space when not using
8065 // TLABs.
8066 //
8067 // There does not appear to be a Load{IL}Locked Ideal Node and the
8068 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8069 // and to use StoreIConditional only for 32-bit and StoreLConditional
8070 // only for 64-bit.
8071 //
8072 // We implement LoadPLocked and StorePLocked instructions using,
8073 // respectively the AArch64 hw load-exclusive and store-conditional
8074 // instructions. Whereas we must implement each of
8075 // Store{IL}Conditional using a CAS which employs a pair of
8076 // instructions comprising a load-exclusive followed by a
8077 // store-conditional.
8078 
8079 
8080 // Locked-load (linked load) of the current heap-top
8081 // used when updating the eden heap top
8082 // implemented using ldaxr on AArch64
8083 
8084 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8085 %{
8086   match(Set dst (LoadPLocked mem));
8087 
8088   ins_cost(VOLATILE_REF_COST);
8089 
8090   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8091 
8092   ins_encode(aarch64_enc_ldaxr(dst, mem));
8093 
8094   ins_pipe(pipe_serial);
8095 %}
8096 
8097 // Conditional-store of the updated heap-top.
8098 // Used during allocation of the shared heap.
8099 // Sets flag (EQ) on success.
8100 // implemented using stlxr on AArch64.
8101 
8102 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8103 %{
8104   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8105 
8106   ins_cost(VOLATILE_REF_COST);
8107 
8108  // TODO
8109  // do we need to do a store-conditional release or can we just use a
8110  // plain store-conditional?
8111 
8112   format %{
8113     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8114     "cmpw rscratch1, zr\t# EQ on successful write"
8115   %}
8116 
8117   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8118 
8119   ins_pipe(pipe_serial);
8120 %}
8121 
8122 
8123 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8124 // when attempting to rebias a lock towards the current thread.  We
8125 // must use the acquire form of cmpxchg in order to guarantee acquire
8126 // semantics in this case.
8127 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8128 %{
8129   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8130 
8131   ins_cost(VOLATILE_REF_COST);
8132 
8133   format %{
8134     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8135     "cmpw rscratch1, zr\t# EQ on successful write"
8136   %}
8137 
8138   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8139 
8140   ins_pipe(pipe_slow);
8141 %}
8142 
8143 // storeIConditional also has acquire semantics, for no better reason
8144 // than matching storeLConditional.  At the time of writing this
8145 // comment storeIConditional was not used anywhere by AArch64.
8146 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8147 %{
8148   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8149 
8150   ins_cost(VOLATILE_REF_COST);
8151 
8152   format %{
8153     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8154     "cmpw rscratch1, zr\t# EQ on successful write"
8155   %}
8156 
8157   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8158 
8159   ins_pipe(pipe_slow);
8160 %}
8161 
8162 // standard CompareAndSwapX when we are using barriers
8163 // these have higher priority than the rules selected by a predicate
8164 
8165 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8166 // can't match them
8167 
8168 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8169 
8170   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8171   ins_cost(2 * VOLATILE_REF_COST);
8172 
8173   effect(KILL cr);
8174 
8175   format %{
8176     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8177     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8178   %}
8179 
8180   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
8181             aarch64_enc_cset_eq(res));
8182 
8183   ins_pipe(pipe_slow);
8184 %}
8185 
8186 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8187 
8188   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8189   ins_cost(2 * VOLATILE_REF_COST);
8190 
8191   effect(KILL cr);
8192 
8193   format %{
8194     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8195     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8196   %}
8197 
8198   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
8199             aarch64_enc_cset_eq(res));
8200 
8201   ins_pipe(pipe_slow);
8202 %}
8203 
8204 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8205 
8206   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8207   ins_cost(2 * VOLATILE_REF_COST);
8208 
8209   effect(KILL cr);
8210 
8211  format %{
8212     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8213     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8214  %}
8215 
8216  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8217             aarch64_enc_cset_eq(res));
8218 
8219   ins_pipe(pipe_slow);
8220 %}
8221 
8222 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8223 
8224   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8225   ins_cost(2 * VOLATILE_REF_COST);
8226 
8227   effect(KILL cr);
8228 
8229  format %{
8230     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8231     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8232  %}
8233 
8234  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8235             aarch64_enc_cset_eq(res));
8236 
8237   ins_pipe(pipe_slow);
8238 %}
8239 
8240 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8241 
8242   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8243   ins_cost(2 * VOLATILE_REF_COST);
8244 
8245   effect(KILL cr);
8246 
8247  format %{
8248     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8249     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8250  %}
8251 
8252  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8253             aarch64_enc_cset_eq(res));
8254 
8255   ins_pipe(pipe_slow);
8256 %}
8257 
8258 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8259 
8260   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8261   ins_cost(2 * VOLATILE_REF_COST);
8262 
8263   effect(KILL cr);
8264 
8265  format %{
8266     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8267     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8268  %}
8269 
8270  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8271             aarch64_enc_cset_eq(res));
8272 
8273   ins_pipe(pipe_slow);
8274 %}
8275 
8276 // alternative CompareAndSwapX when we are eliding barriers
8277 
8278 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8279 
8280   predicate(needs_acquiring_load_exclusive(n));
8281   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
8282   ins_cost(VOLATILE_REF_COST);
8283 
8284   effect(KILL cr);
8285 
8286   format %{
8287     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8288     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8289   %}
8290 
8291   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
8292             aarch64_enc_cset_eq(res));
8293 
8294   ins_pipe(pipe_slow);
8295 %}
8296 
8297 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8298 
8299   predicate(needs_acquiring_load_exclusive(n));
8300   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
8301   ins_cost(VOLATILE_REF_COST);
8302 
8303   effect(KILL cr);
8304 
8305   format %{
8306     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8307     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8308   %}
8309 
8310   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
8311             aarch64_enc_cset_eq(res));
8312 
8313   ins_pipe(pipe_slow);
8314 %}
8315 
8316 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8317 
8318   predicate(needs_acquiring_load_exclusive(n));
8319   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8320   ins_cost(VOLATILE_REF_COST);
8321 
8322   effect(KILL cr);
8323 
8324  format %{
8325     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8326     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8327  %}
8328 
8329  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8330             aarch64_enc_cset_eq(res));
8331 
8332   ins_pipe(pipe_slow);
8333 %}
8334 
8335 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8336 
8337   predicate(needs_acquiring_load_exclusive(n));
8338   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8339   ins_cost(VOLATILE_REF_COST);
8340 
8341   effect(KILL cr);
8342 
8343  format %{
8344     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8345     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8346  %}
8347 
8348  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8349             aarch64_enc_cset_eq(res));
8350 
8351   ins_pipe(pipe_slow);
8352 %}
8353 
8354 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8355 
8356   predicate(needs_acquiring_load_exclusive(n));
8357   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8358   ins_cost(VOLATILE_REF_COST);
8359 
8360   effect(KILL cr);
8361 
8362  format %{
8363     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8364     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8365  %}
8366 
8367  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8368             aarch64_enc_cset_eq(res));
8369 
8370   ins_pipe(pipe_slow);
8371 %}
8372 
8373 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8374 
8375   predicate(needs_acquiring_load_exclusive(n));
8376   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8377   ins_cost(VOLATILE_REF_COST);
8378 
8379   effect(KILL cr);
8380 
8381  format %{
8382     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8383     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8384  %}
8385 
8386  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8387             aarch64_enc_cset_eq(res));
8388 
8389   ins_pipe(pipe_slow);
8390 %}
8391 
8392 
8393 // ---------------------------------------------------------------------
8394 
8395 
8396 // BEGIN This section of the file is automatically generated. Do not edit --------------
8397 
8398 // Sundry CAS operations.  Note that release is always true,
8399 // regardless of the memory ordering of the CAS.  This is because we
8400 // need the volatile case to be sequentially consistent but there is
8401 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
8402 // can't check the type of memory ordering here, so we always emit a
8403 // STLXR.
8404 
8405 // This section is generated from aarch64_ad_cas.m4
8406 
8407 
8408 
8409 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8410   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8411   ins_cost(2 * VOLATILE_REF_COST);
8412   effect(TEMP_DEF res, KILL cr);
8413   format %{
8414     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8415   %}
8416   ins_encode %{
8417     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8418                Assembler::byte, /*acquire*/ false, /*release*/ true,
8419                /*weak*/ false, $res$$Register);
8420     __ sxtbw($res$$Register, $res$$Register);
8421   %}
8422   ins_pipe(pipe_slow);
8423 %}
8424 
8425 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8426   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8427   ins_cost(2 * VOLATILE_REF_COST);
8428   effect(TEMP_DEF res, KILL cr);
8429   format %{
8430     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8431   %}
8432   ins_encode %{
8433     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8434                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8435                /*weak*/ false, $res$$Register);
8436     __ sxthw($res$$Register, $res$$Register);
8437   %}
8438   ins_pipe(pipe_slow);
8439 %}
8440 
8441 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8442   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8443   ins_cost(2 * VOLATILE_REF_COST);
8444   effect(TEMP_DEF res, KILL cr);
8445   format %{
8446     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8447   %}
8448   ins_encode %{
8449     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8450                Assembler::word, /*acquire*/ false, /*release*/ true,
8451                /*weak*/ false, $res$$Register);
8452   %}
8453   ins_pipe(pipe_slow);
8454 %}
8455 
8456 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8457   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8458   ins_cost(2 * VOLATILE_REF_COST);
8459   effect(TEMP_DEF res, KILL cr);
8460   format %{
8461     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8462   %}
8463   ins_encode %{
8464     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8465                Assembler::xword, /*acquire*/ false, /*release*/ true,
8466                /*weak*/ false, $res$$Register);
8467   %}
8468   ins_pipe(pipe_slow);
8469 %}
8470 
8471 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8472   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8473   ins_cost(2 * VOLATILE_REF_COST);
8474   effect(TEMP_DEF res, KILL cr);
8475   format %{
8476     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8477   %}
8478   ins_encode %{
8479     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8480                Assembler::word, /*acquire*/ false, /*release*/ true,
8481                /*weak*/ false, $res$$Register);
8482   %}
8483   ins_pipe(pipe_slow);
8484 %}
8485 
8486 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8487   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8488   ins_cost(2 * VOLATILE_REF_COST);
8489   effect(TEMP_DEF res, KILL cr);
8490   format %{
8491     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8492   %}
8493   ins_encode %{
8494     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8495                Assembler::xword, /*acquire*/ false, /*release*/ true,
8496                /*weak*/ false, $res$$Register);
8497   %}
8498   ins_pipe(pipe_slow);
8499 %}
8500 
8501 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8502   predicate(needs_acquiring_load_exclusive(n));
8503   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8504   ins_cost(VOLATILE_REF_COST);
8505   effect(TEMP_DEF res, KILL cr);
8506   format %{
8507     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8508   %}
8509   ins_encode %{
8510     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8511                Assembler::byte, /*acquire*/ true, /*release*/ true,
8512                /*weak*/ false, $res$$Register);
8513     __ sxtbw($res$$Register, $res$$Register);
8514   %}
8515   ins_pipe(pipe_slow);
8516 %}
8517 
8518 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8519   predicate(needs_acquiring_load_exclusive(n));
8520   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
8521   ins_cost(VOLATILE_REF_COST);
8522   effect(TEMP_DEF res, KILL cr);
8523   format %{
8524     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8525   %}
8526   ins_encode %{
8527     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8528                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8529                /*weak*/ false, $res$$Register);
8530     __ sxthw($res$$Register, $res$$Register);
8531   %}
8532   ins_pipe(pipe_slow);
8533 %}
8534 
8535 
8536 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8537   predicate(needs_acquiring_load_exclusive(n));
8538   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8539   ins_cost(VOLATILE_REF_COST);
8540   effect(TEMP_DEF res, KILL cr);
8541   format %{
8542     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8543   %}
8544   ins_encode %{
8545     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8546                Assembler::word, /*acquire*/ true, /*release*/ true,
8547                /*weak*/ false, $res$$Register);
8548   %}
8549   ins_pipe(pipe_slow);
8550 %}
8551 
8552 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8553   predicate(needs_acquiring_load_exclusive(n));
8554   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8555   ins_cost(VOLATILE_REF_COST);
8556   effect(TEMP_DEF res, KILL cr);
8557   format %{
8558     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8559   %}
8560   ins_encode %{
8561     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8562                Assembler::xword, /*acquire*/ true, /*release*/ true,
8563                /*weak*/ false, $res$$Register);
8564   %}
8565   ins_pipe(pipe_slow);
8566 %}
8567 
8568 
8569 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8570   predicate(needs_acquiring_load_exclusive(n));
8571   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8572   ins_cost(VOLATILE_REF_COST);
8573   effect(TEMP_DEF res, KILL cr);
8574   format %{
8575     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8576   %}
8577   ins_encode %{
8578     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8579                Assembler::word, /*acquire*/ true, /*release*/ true,
8580                /*weak*/ false, $res$$Register);
8581   %}
8582   ins_pipe(pipe_slow);
8583 %}
8584 
8585 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8586   predicate(needs_acquiring_load_exclusive(n));
8587   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8588   ins_cost(VOLATILE_REF_COST);
8589   effect(TEMP_DEF res, KILL cr);
8590   format %{
8591     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8592   %}
8593   ins_encode %{
8594     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8595                Assembler::xword, /*acquire*/ true, /*release*/ true,
8596                /*weak*/ false, $res$$Register);
8597   %}
8598   ins_pipe(pipe_slow);
8599 %}
8600 
8601 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8602   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8603   ins_cost(2 * VOLATILE_REF_COST);
8604   effect(KILL cr);
8605   format %{
8606     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8607     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8608   %}
8609   ins_encode %{
8610     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8611                Assembler::byte, /*acquire*/ false, /*release*/ true,
8612                /*weak*/ true, noreg);
8613     __ csetw($res$$Register, Assembler::EQ);
8614   %}
8615   ins_pipe(pipe_slow);
8616 %}
8617 
8618 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8619   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8620   ins_cost(2 * VOLATILE_REF_COST);
8621   effect(KILL cr);
8622   format %{
8623     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8624     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8625   %}
8626   ins_encode %{
8627     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8628                Assembler::halfword, /*acquire*/ false, /*release*/ true,
8629                /*weak*/ true, noreg);
8630     __ csetw($res$$Register, Assembler::EQ);
8631   %}
8632   ins_pipe(pipe_slow);
8633 %}
8634 
8635 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8636   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8637   ins_cost(2 * VOLATILE_REF_COST);
8638   effect(KILL cr);
8639   format %{
8640     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8641     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8642   %}
8643   ins_encode %{
8644     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8645                Assembler::word, /*acquire*/ false, /*release*/ true,
8646                /*weak*/ true, noreg);
8647     __ csetw($res$$Register, Assembler::EQ);
8648   %}
8649   ins_pipe(pipe_slow);
8650 %}
8651 
8652 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8653   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8654   ins_cost(2 * VOLATILE_REF_COST);
8655   effect(KILL cr);
8656   format %{
8657     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8658     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8659   %}
8660   ins_encode %{
8661     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8662                Assembler::xword, /*acquire*/ false, /*release*/ true,
8663                /*weak*/ true, noreg);
8664     __ csetw($res$$Register, Assembler::EQ);
8665   %}
8666   ins_pipe(pipe_slow);
8667 %}
8668 
8669 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8670   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8671   ins_cost(2 * VOLATILE_REF_COST);
8672   effect(KILL cr);
8673   format %{
8674     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8675     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8676   %}
8677   ins_encode %{
8678     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8679                Assembler::word, /*acquire*/ false, /*release*/ true,
8680                /*weak*/ true, noreg);
8681     __ csetw($res$$Register, Assembler::EQ);
8682   %}
8683   ins_pipe(pipe_slow);
8684 %}
8685 
8686 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8687   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8688   ins_cost(2 * VOLATILE_REF_COST);
8689   effect(KILL cr);
8690   format %{
8691     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8692     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8693   %}
8694   ins_encode %{
8695     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8696                Assembler::xword, /*acquire*/ false, /*release*/ true,
8697                /*weak*/ true, noreg);
8698     __ csetw($res$$Register, Assembler::EQ);
8699   %}
8700   ins_pipe(pipe_slow);
8701 %}
8702 
8703 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8704   predicate(needs_acquiring_load_exclusive(n));
8705   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8706   ins_cost(VOLATILE_REF_COST);
8707   effect(KILL cr);
8708   format %{
8709     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8710     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8711   %}
8712   ins_encode %{
8713     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8714                Assembler::byte, /*acquire*/ true, /*release*/ true,
8715                /*weak*/ true, noreg);
8716     __ csetw($res$$Register, Assembler::EQ);
8717   %}
8718   ins_pipe(pipe_slow);
8719 %}
8720 
8721 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8722   predicate(needs_acquiring_load_exclusive(n));
8723   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
8724   ins_cost(VOLATILE_REF_COST);
8725   effect(KILL cr);
8726   format %{
8727     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
8728     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8729   %}
8730   ins_encode %{
8731     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8732                Assembler::halfword, /*acquire*/ true, /*release*/ true,
8733                /*weak*/ true, noreg);
8734     __ csetw($res$$Register, Assembler::EQ);
8735   %}
8736   ins_pipe(pipe_slow);
8737 %}
8738 
8739 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8740   predicate(needs_acquiring_load_exclusive(n));
8741   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
8742   ins_cost(VOLATILE_REF_COST);
8743   effect(KILL cr);
8744   format %{
8745     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8746     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8747   %}
8748   ins_encode %{
8749     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8750                Assembler::word, /*acquire*/ true, /*release*/ true,
8751                /*weak*/ true, noreg);
8752     __ csetw($res$$Register, Assembler::EQ);
8753   %}
8754   ins_pipe(pipe_slow);
8755 %}
8756 
8757 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8758   predicate(needs_acquiring_load_exclusive(n));
8759   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
8760   ins_cost(VOLATILE_REF_COST);
8761   effect(KILL cr);
8762   format %{
8763     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8764     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8765   %}
8766   ins_encode %{
8767     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8768                Assembler::xword, /*acquire*/ true, /*release*/ true,
8769                /*weak*/ true, noreg);
8770     __ csetw($res$$Register, Assembler::EQ);
8771   %}
8772   ins_pipe(pipe_slow);
8773 %}
8774 
8775 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8776   predicate(needs_acquiring_load_exclusive(n));
8777   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8778   ins_cost(VOLATILE_REF_COST);
8779   effect(KILL cr);
8780   format %{
8781     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8782     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8783   %}
8784   ins_encode %{
8785     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8786                Assembler::word, /*acquire*/ true, /*release*/ true,
8787                /*weak*/ true, noreg);
8788     __ csetw($res$$Register, Assembler::EQ);
8789   %}
8790   ins_pipe(pipe_slow);
8791 %}
8792 
8793 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8794   predicate(needs_acquiring_load_exclusive(n));
8795   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
8796   ins_cost(VOLATILE_REF_COST);
8797   effect(KILL cr);
8798   format %{
8799     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8800     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8801   %}
8802   ins_encode %{
8803     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8804                Assembler::xword, /*acquire*/ true, /*release*/ true,
8805                /*weak*/ true, noreg);
8806     __ csetw($res$$Register, Assembler::EQ);
8807   %}
8808   ins_pipe(pipe_slow);
8809 %}
8810 
8811 // END This section of the file is automatically generated. Do not edit --------------
8812 // ---------------------------------------------------------------------
8813 
8814 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
8815   match(Set prev (GetAndSetI mem newv));
8816   ins_cost(2 * VOLATILE_REF_COST);
8817   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8818   ins_encode %{
8819     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8820   %}
8821   ins_pipe(pipe_serial);
8822 %}
8823 
8824 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
8825   match(Set prev (GetAndSetL mem newv));
8826   ins_cost(2 * VOLATILE_REF_COST);
8827   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8828   ins_encode %{
8829     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8830   %}
8831   ins_pipe(pipe_serial);
8832 %}
8833 
8834 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
8835   match(Set prev (GetAndSetN mem newv));
8836   ins_cost(2 * VOLATILE_REF_COST);
8837   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8838   ins_encode %{
8839     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8840   %}
8841   ins_pipe(pipe_serial);
8842 %}
8843 
8844 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
8845   match(Set prev (GetAndSetP mem newv));
8846   ins_cost(2 * VOLATILE_REF_COST);
8847   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8848   ins_encode %{
8849     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8850   %}
8851   ins_pipe(pipe_serial);
8852 %}
8853 
8854 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
8855   predicate(needs_acquiring_load_exclusive(n));
8856   match(Set prev (GetAndSetI mem newv));
8857   ins_cost(VOLATILE_REF_COST);
8858   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
8859   ins_encode %{
8860     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8861   %}
8862   ins_pipe(pipe_serial);
8863 %}
8864 
8865 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
8866   predicate(needs_acquiring_load_exclusive(n));
8867   match(Set prev (GetAndSetL mem newv));
8868   ins_cost(VOLATILE_REF_COST);
8869   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8870   ins_encode %{
8871     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8872   %}
8873   ins_pipe(pipe_serial);
8874 %}
8875 
8876 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
8877   predicate(needs_acquiring_load_exclusive(n));
8878   match(Set prev (GetAndSetN mem newv));
8879   ins_cost(VOLATILE_REF_COST);
8880   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
8881   ins_encode %{
8882     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8883   %}
8884   ins_pipe(pipe_serial);
8885 %}
8886 
8887 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
8888   predicate(needs_acquiring_load_exclusive(n));
8889   match(Set prev (GetAndSetP mem newv));
8890   ins_cost(VOLATILE_REF_COST);
8891   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8892   ins_encode %{
8893     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8894   %}
8895   ins_pipe(pipe_serial);
8896 %}
8897 
8898 
8899 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8900   match(Set newval (GetAndAddL mem incr));
8901   ins_cost(2 * VOLATILE_REF_COST + 1);
8902   format %{ "get_and_addL $newval, [$mem], $incr" %}
8903   ins_encode %{
8904     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8905   %}
8906   ins_pipe(pipe_serial);
8907 %}
8908 
8909 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8910   predicate(n->as_LoadStore()->result_not_used());
8911   match(Set dummy (GetAndAddL mem incr));
8912   ins_cost(2 * VOLATILE_REF_COST);
8913   format %{ "get_and_addL [$mem], $incr" %}
8914   ins_encode %{
8915     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8916   %}
8917   ins_pipe(pipe_serial);
8918 %}
8919 
8920 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8921   match(Set newval (GetAndAddL mem incr));
8922   ins_cost(2 * VOLATILE_REF_COST + 1);
8923   format %{ "get_and_addL $newval, [$mem], $incr" %}
8924   ins_encode %{
8925     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8926   %}
8927   ins_pipe(pipe_serial);
8928 %}
8929 
8930 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8931   predicate(n->as_LoadStore()->result_not_used());
8932   match(Set dummy (GetAndAddL mem incr));
8933   ins_cost(2 * VOLATILE_REF_COST);
8934   format %{ "get_and_addL [$mem], $incr" %}
8935   ins_encode %{
8936     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8937   %}
8938   ins_pipe(pipe_serial);
8939 %}
8940 
8941 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8942   match(Set newval (GetAndAddI mem incr));
8943   ins_cost(2 * VOLATILE_REF_COST + 1);
8944   format %{ "get_and_addI $newval, [$mem], $incr" %}
8945   ins_encode %{
8946     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8947   %}
8948   ins_pipe(pipe_serial);
8949 %}
8950 
8951 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8952   predicate(n->as_LoadStore()->result_not_used());
8953   match(Set dummy (GetAndAddI mem incr));
8954   ins_cost(2 * VOLATILE_REF_COST);
8955   format %{ "get_and_addI [$mem], $incr" %}
8956   ins_encode %{
8957     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8958   %}
8959   ins_pipe(pipe_serial);
8960 %}
8961 
8962 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8963   match(Set newval (GetAndAddI mem incr));
8964   ins_cost(2 * VOLATILE_REF_COST + 1);
8965   format %{ "get_and_addI $newval, [$mem], $incr" %}
8966   ins_encode %{
8967     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8968   %}
8969   ins_pipe(pipe_serial);
8970 %}
8971 
8972 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
8973   predicate(n->as_LoadStore()->result_not_used());
8974   match(Set dummy (GetAndAddI mem incr));
8975   ins_cost(2 * VOLATILE_REF_COST);
8976   format %{ "get_and_addI [$mem], $incr" %}
8977   ins_encode %{
8978     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
8979   %}
8980   ins_pipe(pipe_serial);
8981 %}
8982 
8983 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
8984   predicate(needs_acquiring_load_exclusive(n));
8985   match(Set newval (GetAndAddL mem incr));
8986   ins_cost(VOLATILE_REF_COST + 1);
8987   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
8988   ins_encode %{
8989     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
8990   %}
8991   ins_pipe(pipe_serial);
8992 %}
8993 
8994 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
8995   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8996   match(Set dummy (GetAndAddL mem incr));
8997   ins_cost(VOLATILE_REF_COST);
8998   format %{ "get_and_addL_acq [$mem], $incr" %}
8999   ins_encode %{
9000     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
9001   %}
9002   ins_pipe(pipe_serial);
9003 %}
9004 
9005 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9006   predicate(needs_acquiring_load_exclusive(n));
9007   match(Set newval (GetAndAddL mem incr));
9008   ins_cost(VOLATILE_REF_COST + 1);
9009   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
9010   ins_encode %{
9011     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
9012   %}
9013   ins_pipe(pipe_serial);
9014 %}
9015 
9016 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
9017   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9018   match(Set dummy (GetAndAddL mem incr));
9019   ins_cost(VOLATILE_REF_COST);
9020   format %{ "get_and_addL_acq [$mem], $incr" %}
9021   ins_encode %{
9022     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
9023   %}
9024   ins_pipe(pipe_serial);
9025 %}
9026 
9027 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9028   predicate(needs_acquiring_load_exclusive(n));
9029   match(Set newval (GetAndAddI mem incr));
9030   ins_cost(VOLATILE_REF_COST + 1);
9031   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9032   ins_encode %{
9033     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9034   %}
9035   ins_pipe(pipe_serial);
9036 %}
9037 
9038 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
9039   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9040   match(Set dummy (GetAndAddI mem incr));
9041   ins_cost(VOLATILE_REF_COST);
9042   format %{ "get_and_addI_acq [$mem], $incr" %}
9043   ins_encode %{
9044     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
9045   %}
9046   ins_pipe(pipe_serial);
9047 %}
9048 
9049 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9050   predicate(needs_acquiring_load_exclusive(n));
9051   match(Set newval (GetAndAddI mem incr));
9052   ins_cost(VOLATILE_REF_COST + 1);
9053   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
9054   ins_encode %{
9055     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9056   %}
9057   ins_pipe(pipe_serial);
9058 %}
9059 
9060 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
9061   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
9062   match(Set dummy (GetAndAddI mem incr));
9063   ins_cost(VOLATILE_REF_COST);
9064   format %{ "get_and_addI_acq [$mem], $incr" %}
9065   ins_encode %{
9066     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
9067   %}
9068   ins_pipe(pipe_serial);
9069 %}
9070 
9071 // Manifest a CmpL result in an integer register.
9072 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9073 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9074 %{
9075   match(Set dst (CmpL3 src1 src2));
9076   effect(KILL flags);
9077 
9078   ins_cost(INSN_COST * 6);
9079   format %{
9080       "cmp $src1, $src2"
9081       "csetw $dst, ne"
9082       "cnegw $dst, lt"
9083   %}
9084   // format %{ "CmpL3 $dst, $src1, $src2" %}
9085   ins_encode %{
9086     __ cmp($src1$$Register, $src2$$Register);
9087     __ csetw($dst$$Register, Assembler::NE);
9088     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9089   %}
9090 
9091   ins_pipe(pipe_class_default);
9092 %}
9093 
9094 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9095 %{
9096   match(Set dst (CmpL3 src1 src2));
9097   effect(KILL flags);
9098 
9099   ins_cost(INSN_COST * 6);
9100   format %{
9101       "cmp $src1, $src2"
9102       "csetw $dst, ne"
9103       "cnegw $dst, lt"
9104   %}
9105   ins_encode %{
9106     int32_t con = (int32_t)$src2$$constant;
9107      if (con < 0) {
9108       __ adds(zr, $src1$$Register, -con);
9109     } else {
9110       __ subs(zr, $src1$$Register, con);
9111     }
9112     __ csetw($dst$$Register, Assembler::NE);
9113     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9114   %}
9115 
9116   ins_pipe(pipe_class_default);
9117 %}
9118 
9119 // ============================================================================
9120 // Conditional Move Instructions
9121 
9122 // n.b. we have identical rules for both a signed compare op (cmpOp)
9123 // and an unsigned compare op (cmpOpU). it would be nice if we could
9124 // define an op class which merged both inputs and use it to type the
9125 // argument to a single rule. unfortunatelyt his fails because the
9126 // opclass does not live up to the COND_INTER interface of its
9127 // component operands. When the generic code tries to negate the
9128 // operand it ends up running the generci Machoper::negate method
9129 // which throws a ShouldNotHappen. So, we have to provide two flavours
9130 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9131 
9132 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9133   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9134 
9135   ins_cost(INSN_COST * 2);
9136   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9137 
9138   ins_encode %{
9139     __ cselw(as_Register($dst$$reg),
9140              as_Register($src2$$reg),
9141              as_Register($src1$$reg),
9142              (Assembler::Condition)$cmp$$cmpcode);
9143   %}
9144 
9145   ins_pipe(icond_reg_reg);
9146 %}
9147 
9148 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9149   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9150 
9151   ins_cost(INSN_COST * 2);
9152   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9153 
9154   ins_encode %{
9155     __ cselw(as_Register($dst$$reg),
9156              as_Register($src2$$reg),
9157              as_Register($src1$$reg),
9158              (Assembler::Condition)$cmp$$cmpcode);
9159   %}
9160 
9161   ins_pipe(icond_reg_reg);
9162 %}
9163 
9164 // special cases where one arg is zero
9165 
9166 // n.b. this is selected in preference to the rule above because it
9167 // avoids loading constant 0 into a source register
9168 
9169 // TODO
9170 // we ought only to be able to cull one of these variants as the ideal
9171 // transforms ought always to order the zero consistently (to left/right?)
9172 
9173 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9174   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9175 
9176   ins_cost(INSN_COST * 2);
9177   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9178 
9179   ins_encode %{
9180     __ cselw(as_Register($dst$$reg),
9181              as_Register($src$$reg),
9182              zr,
9183              (Assembler::Condition)$cmp$$cmpcode);
9184   %}
9185 
9186   ins_pipe(icond_reg);
9187 %}
9188 
9189 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9190   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9191 
9192   ins_cost(INSN_COST * 2);
9193   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9194 
9195   ins_encode %{
9196     __ cselw(as_Register($dst$$reg),
9197              as_Register($src$$reg),
9198              zr,
9199              (Assembler::Condition)$cmp$$cmpcode);
9200   %}
9201 
9202   ins_pipe(icond_reg);
9203 %}
9204 
9205 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9206   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9207 
9208   ins_cost(INSN_COST * 2);
9209   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9210 
9211   ins_encode %{
9212     __ cselw(as_Register($dst$$reg),
9213              zr,
9214              as_Register($src$$reg),
9215              (Assembler::Condition)$cmp$$cmpcode);
9216   %}
9217 
9218   ins_pipe(icond_reg);
9219 %}
9220 
9221 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9222   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9223 
9224   ins_cost(INSN_COST * 2);
9225   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9226 
9227   ins_encode %{
9228     __ cselw(as_Register($dst$$reg),
9229              zr,
9230              as_Register($src$$reg),
9231              (Assembler::Condition)$cmp$$cmpcode);
9232   %}
9233 
9234   ins_pipe(icond_reg);
9235 %}
9236 
9237 // special case for creating a boolean 0 or 1
9238 
9239 // n.b. this is selected in preference to the rule above because it
9240 // avoids loading constants 0 and 1 into a source register
9241 
9242 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9243   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9244 
9245   ins_cost(INSN_COST * 2);
9246   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9247 
9248   ins_encode %{
9249     // equivalently
9250     // cset(as_Register($dst$$reg),
9251     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9252     __ csincw(as_Register($dst$$reg),
9253              zr,
9254              zr,
9255              (Assembler::Condition)$cmp$$cmpcode);
9256   %}
9257 
9258   ins_pipe(icond_none);
9259 %}
9260 
9261 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9262   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9263 
9264   ins_cost(INSN_COST * 2);
9265   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9266 
9267   ins_encode %{
9268     // equivalently
9269     // cset(as_Register($dst$$reg),
9270     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9271     __ csincw(as_Register($dst$$reg),
9272              zr,
9273              zr,
9274              (Assembler::Condition)$cmp$$cmpcode);
9275   %}
9276 
9277   ins_pipe(icond_none);
9278 %}
9279 
9280 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9281   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9282 
9283   ins_cost(INSN_COST * 2);
9284   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9285 
9286   ins_encode %{
9287     __ csel(as_Register($dst$$reg),
9288             as_Register($src2$$reg),
9289             as_Register($src1$$reg),
9290             (Assembler::Condition)$cmp$$cmpcode);
9291   %}
9292 
9293   ins_pipe(icond_reg_reg);
9294 %}
9295 
9296 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9297   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9298 
9299   ins_cost(INSN_COST * 2);
9300   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9301 
9302   ins_encode %{
9303     __ csel(as_Register($dst$$reg),
9304             as_Register($src2$$reg),
9305             as_Register($src1$$reg),
9306             (Assembler::Condition)$cmp$$cmpcode);
9307   %}
9308 
9309   ins_pipe(icond_reg_reg);
9310 %}
9311 
9312 // special cases where one arg is zero
9313 
9314 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9315   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9316 
9317   ins_cost(INSN_COST * 2);
9318   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9319 
9320   ins_encode %{
9321     __ csel(as_Register($dst$$reg),
9322             zr,
9323             as_Register($src$$reg),
9324             (Assembler::Condition)$cmp$$cmpcode);
9325   %}
9326 
9327   ins_pipe(icond_reg);
9328 %}
9329 
9330 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9331   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9332 
9333   ins_cost(INSN_COST * 2);
9334   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9335 
9336   ins_encode %{
9337     __ csel(as_Register($dst$$reg),
9338             zr,
9339             as_Register($src$$reg),
9340             (Assembler::Condition)$cmp$$cmpcode);
9341   %}
9342 
9343   ins_pipe(icond_reg);
9344 %}
9345 
9346 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9347   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9348 
9349   ins_cost(INSN_COST * 2);
9350   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9351 
9352   ins_encode %{
9353     __ csel(as_Register($dst$$reg),
9354             as_Register($src$$reg),
9355             zr,
9356             (Assembler::Condition)$cmp$$cmpcode);
9357   %}
9358 
9359   ins_pipe(icond_reg);
9360 %}
9361 
9362 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9363   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9364 
9365   ins_cost(INSN_COST * 2);
9366   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9367 
9368   ins_encode %{
9369     __ csel(as_Register($dst$$reg),
9370             as_Register($src$$reg),
9371             zr,
9372             (Assembler::Condition)$cmp$$cmpcode);
9373   %}
9374 
9375   ins_pipe(icond_reg);
9376 %}
9377 
9378 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9379   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9380 
9381   ins_cost(INSN_COST * 2);
9382   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9383 
9384   ins_encode %{
9385     __ csel(as_Register($dst$$reg),
9386             as_Register($src2$$reg),
9387             as_Register($src1$$reg),
9388             (Assembler::Condition)$cmp$$cmpcode);
9389   %}
9390 
9391   ins_pipe(icond_reg_reg);
9392 %}
9393 
9394 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9395   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9396 
9397   ins_cost(INSN_COST * 2);
9398   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9399 
9400   ins_encode %{
9401     __ csel(as_Register($dst$$reg),
9402             as_Register($src2$$reg),
9403             as_Register($src1$$reg),
9404             (Assembler::Condition)$cmp$$cmpcode);
9405   %}
9406 
9407   ins_pipe(icond_reg_reg);
9408 %}
9409 
9410 // special cases where one arg is zero
9411 
9412 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9413   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9414 
9415   ins_cost(INSN_COST * 2);
9416   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9417 
9418   ins_encode %{
9419     __ csel(as_Register($dst$$reg),
9420             zr,
9421             as_Register($src$$reg),
9422             (Assembler::Condition)$cmp$$cmpcode);
9423   %}
9424 
9425   ins_pipe(icond_reg);
9426 %}
9427 
9428 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9429   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9430 
9431   ins_cost(INSN_COST * 2);
9432   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9433 
9434   ins_encode %{
9435     __ csel(as_Register($dst$$reg),
9436             zr,
9437             as_Register($src$$reg),
9438             (Assembler::Condition)$cmp$$cmpcode);
9439   %}
9440 
9441   ins_pipe(icond_reg);
9442 %}
9443 
9444 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9445   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9446 
9447   ins_cost(INSN_COST * 2);
9448   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9449 
9450   ins_encode %{
9451     __ csel(as_Register($dst$$reg),
9452             as_Register($src$$reg),
9453             zr,
9454             (Assembler::Condition)$cmp$$cmpcode);
9455   %}
9456 
9457   ins_pipe(icond_reg);
9458 %}
9459 
9460 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9461   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9462 
9463   ins_cost(INSN_COST * 2);
9464   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9465 
9466   ins_encode %{
9467     __ csel(as_Register($dst$$reg),
9468             as_Register($src$$reg),
9469             zr,
9470             (Assembler::Condition)$cmp$$cmpcode);
9471   %}
9472 
9473   ins_pipe(icond_reg);
9474 %}
9475 
9476 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9477   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9478 
9479   ins_cost(INSN_COST * 2);
9480   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9481 
9482   ins_encode %{
9483     __ cselw(as_Register($dst$$reg),
9484              as_Register($src2$$reg),
9485              as_Register($src1$$reg),
9486              (Assembler::Condition)$cmp$$cmpcode);
9487   %}
9488 
9489   ins_pipe(icond_reg_reg);
9490 %}
9491 
9492 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9493   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9494 
9495   ins_cost(INSN_COST * 2);
9496   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9497 
9498   ins_encode %{
9499     __ cselw(as_Register($dst$$reg),
9500              as_Register($src2$$reg),
9501              as_Register($src1$$reg),
9502              (Assembler::Condition)$cmp$$cmpcode);
9503   %}
9504 
9505   ins_pipe(icond_reg_reg);
9506 %}
9507 
9508 // special cases where one arg is zero
9509 
9510 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9511   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9512 
9513   ins_cost(INSN_COST * 2);
9514   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9515 
9516   ins_encode %{
9517     __ cselw(as_Register($dst$$reg),
9518              zr,
9519              as_Register($src$$reg),
9520              (Assembler::Condition)$cmp$$cmpcode);
9521   %}
9522 
9523   ins_pipe(icond_reg);
9524 %}
9525 
9526 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9527   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9528 
9529   ins_cost(INSN_COST * 2);
9530   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9531 
9532   ins_encode %{
9533     __ cselw(as_Register($dst$$reg),
9534              zr,
9535              as_Register($src$$reg),
9536              (Assembler::Condition)$cmp$$cmpcode);
9537   %}
9538 
9539   ins_pipe(icond_reg);
9540 %}
9541 
9542 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9543   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9544 
9545   ins_cost(INSN_COST * 2);
9546   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9547 
9548   ins_encode %{
9549     __ cselw(as_Register($dst$$reg),
9550              as_Register($src$$reg),
9551              zr,
9552              (Assembler::Condition)$cmp$$cmpcode);
9553   %}
9554 
9555   ins_pipe(icond_reg);
9556 %}
9557 
9558 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9559   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9560 
9561   ins_cost(INSN_COST * 2);
9562   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9563 
9564   ins_encode %{
9565     __ cselw(as_Register($dst$$reg),
9566              as_Register($src$$reg),
9567              zr,
9568              (Assembler::Condition)$cmp$$cmpcode);
9569   %}
9570 
9571   ins_pipe(icond_reg);
9572 %}
9573 
9574 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9575 %{
9576   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9577 
9578   ins_cost(INSN_COST * 3);
9579 
9580   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9581   ins_encode %{
9582     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9583     __ fcsels(as_FloatRegister($dst$$reg),
9584               as_FloatRegister($src2$$reg),
9585               as_FloatRegister($src1$$reg),
9586               cond);
9587   %}
9588 
9589   ins_pipe(fp_cond_reg_reg_s);
9590 %}
9591 
9592 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9593 %{
9594   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9595 
9596   ins_cost(INSN_COST * 3);
9597 
9598   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9599   ins_encode %{
9600     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9601     __ fcsels(as_FloatRegister($dst$$reg),
9602               as_FloatRegister($src2$$reg),
9603               as_FloatRegister($src1$$reg),
9604               cond);
9605   %}
9606 
9607   ins_pipe(fp_cond_reg_reg_s);
9608 %}
9609 
9610 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9611 %{
9612   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9613 
9614   ins_cost(INSN_COST * 3);
9615 
9616   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9617   ins_encode %{
9618     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9619     __ fcseld(as_FloatRegister($dst$$reg),
9620               as_FloatRegister($src2$$reg),
9621               as_FloatRegister($src1$$reg),
9622               cond);
9623   %}
9624 
9625   ins_pipe(fp_cond_reg_reg_d);
9626 %}
9627 
9628 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9629 %{
9630   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9631 
9632   ins_cost(INSN_COST * 3);
9633 
9634   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9635   ins_encode %{
9636     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9637     __ fcseld(as_FloatRegister($dst$$reg),
9638               as_FloatRegister($src2$$reg),
9639               as_FloatRegister($src1$$reg),
9640               cond);
9641   %}
9642 
9643   ins_pipe(fp_cond_reg_reg_d);
9644 %}
9645 
9646 // ============================================================================
9647 // Arithmetic Instructions
9648 //
9649 
9650 // Integer Addition
9651 
9652 // TODO
9653 // these currently employ operations which do not set CR and hence are
9654 // not flagged as killing CR but we would like to isolate the cases
9655 // where we want to set flags from those where we don't. need to work
9656 // out how to do that.
9657 
9658 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9659   match(Set dst (AddI src1 src2));
9660 
9661   ins_cost(INSN_COST);
9662   format %{ "addw  $dst, $src1, $src2" %}
9663 
9664   ins_encode %{
9665     __ addw(as_Register($dst$$reg),
9666             as_Register($src1$$reg),
9667             as_Register($src2$$reg));
9668   %}
9669 
9670   ins_pipe(ialu_reg_reg);
9671 %}
9672 
9673 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9674   match(Set dst (AddI src1 src2));
9675 
9676   ins_cost(INSN_COST);
9677   format %{ "addw $dst, $src1, $src2" %}
9678 
9679   // use opcode to indicate that this is an add not a sub
9680   opcode(0x0);
9681 
9682   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9683 
9684   ins_pipe(ialu_reg_imm);
9685 %}
9686 
9687 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9688   match(Set dst (AddI (ConvL2I src1) src2));
9689 
9690   ins_cost(INSN_COST);
9691   format %{ "addw $dst, $src1, $src2" %}
9692 
9693   // use opcode to indicate that this is an add not a sub
9694   opcode(0x0);
9695 
9696   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9697 
9698   ins_pipe(ialu_reg_imm);
9699 %}
9700 
9701 // Pointer Addition
9702 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9703   match(Set dst (AddP src1 src2));
9704 
9705   ins_cost(INSN_COST);
9706   format %{ "add $dst, $src1, $src2\t# ptr" %}
9707 
9708   ins_encode %{
9709     __ add(as_Register($dst$$reg),
9710            as_Register($src1$$reg),
9711            as_Register($src2$$reg));
9712   %}
9713 
9714   ins_pipe(ialu_reg_reg);
9715 %}
9716 
9717 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9718   match(Set dst (AddP src1 (ConvI2L src2)));
9719 
9720   ins_cost(1.9 * INSN_COST);
9721   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9722 
9723   ins_encode %{
9724     __ add(as_Register($dst$$reg),
9725            as_Register($src1$$reg),
9726            as_Register($src2$$reg), ext::sxtw);
9727   %}
9728 
9729   ins_pipe(ialu_reg_reg);
9730 %}
9731 
9732 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9733   match(Set dst (AddP src1 (LShiftL src2 scale)));
9734 
9735   ins_cost(1.9 * INSN_COST);
9736   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9737 
9738   ins_encode %{
9739     __ lea(as_Register($dst$$reg),
9740            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9741                    Address::lsl($scale$$constant)));
9742   %}
9743 
9744   ins_pipe(ialu_reg_reg_shift);
9745 %}
9746 
9747 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9748   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9749 
9750   ins_cost(1.9 * INSN_COST);
9751   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9752 
9753   ins_encode %{
9754     __ lea(as_Register($dst$$reg),
9755            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9756                    Address::sxtw($scale$$constant)));
9757   %}
9758 
9759   ins_pipe(ialu_reg_reg_shift);
9760 %}
9761 
9762 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9763   match(Set dst (LShiftL (ConvI2L src) scale));
9764 
9765   ins_cost(INSN_COST);
9766   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9767 
9768   ins_encode %{
9769     __ sbfiz(as_Register($dst$$reg),
9770           as_Register($src$$reg),
9771           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9772   %}
9773 
9774   ins_pipe(ialu_reg_shift);
9775 %}
9776 
9777 // Pointer Immediate Addition
9778 // n.b. this needs to be more expensive than using an indirect memory
9779 // operand
9780 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9781   match(Set dst (AddP src1 src2));
9782 
9783   ins_cost(INSN_COST);
9784   format %{ "add $dst, $src1, $src2\t# ptr" %}
9785 
9786   // use opcode to indicate that this is an add not a sub
9787   opcode(0x0);
9788 
9789   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9790 
9791   ins_pipe(ialu_reg_imm);
9792 %}
9793 
9794 // Long Addition
9795 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9796 
9797   match(Set dst (AddL src1 src2));
9798 
9799   ins_cost(INSN_COST);
9800   format %{ "add  $dst, $src1, $src2" %}
9801 
9802   ins_encode %{
9803     __ add(as_Register($dst$$reg),
9804            as_Register($src1$$reg),
9805            as_Register($src2$$reg));
9806   %}
9807 
9808   ins_pipe(ialu_reg_reg);
9809 %}
9810 
9811 // No constant pool entries requiredLong Immediate Addition.
9812 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9813   match(Set dst (AddL src1 src2));
9814 
9815   ins_cost(INSN_COST);
9816   format %{ "add $dst, $src1, $src2" %}
9817 
9818   // use opcode to indicate that this is an add not a sub
9819   opcode(0x0);
9820 
9821   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9822 
9823   ins_pipe(ialu_reg_imm);
9824 %}
9825 
9826 // Integer Subtraction
9827 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9828   match(Set dst (SubI src1 src2));
9829 
9830   ins_cost(INSN_COST);
9831   format %{ "subw  $dst, $src1, $src2" %}
9832 
9833   ins_encode %{
9834     __ subw(as_Register($dst$$reg),
9835             as_Register($src1$$reg),
9836             as_Register($src2$$reg));
9837   %}
9838 
9839   ins_pipe(ialu_reg_reg);
9840 %}
9841 
9842 // Immediate Subtraction
9843 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9844   match(Set dst (SubI src1 src2));
9845 
9846   ins_cost(INSN_COST);
9847   format %{ "subw $dst, $src1, $src2" %}
9848 
9849   // use opcode to indicate that this is a sub not an add
9850   opcode(0x1);
9851 
9852   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9853 
9854   ins_pipe(ialu_reg_imm);
9855 %}
9856 
9857 // Long Subtraction
9858 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9859 
9860   match(Set dst (SubL src1 src2));
9861 
9862   ins_cost(INSN_COST);
9863   format %{ "sub  $dst, $src1, $src2" %}
9864 
9865   ins_encode %{
9866     __ sub(as_Register($dst$$reg),
9867            as_Register($src1$$reg),
9868            as_Register($src2$$reg));
9869   %}
9870 
9871   ins_pipe(ialu_reg_reg);
9872 %}
9873 
9874 // No constant pool entries requiredLong Immediate Subtraction.
9875 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9876   match(Set dst (SubL src1 src2));
9877 
9878   ins_cost(INSN_COST);
9879   format %{ "sub$dst, $src1, $src2" %}
9880 
9881   // use opcode to indicate that this is a sub not an add
9882   opcode(0x1);
9883 
9884   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9885 
9886   ins_pipe(ialu_reg_imm);
9887 %}
9888 
9889 // Integer Negation (special case for sub)
9890 
9891 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9892   match(Set dst (SubI zero src));
9893 
9894   ins_cost(INSN_COST);
9895   format %{ "negw $dst, $src\t# int" %}
9896 
9897   ins_encode %{
9898     __ negw(as_Register($dst$$reg),
9899             as_Register($src$$reg));
9900   %}
9901 
9902   ins_pipe(ialu_reg);
9903 %}
9904 
9905 // Long Negation
9906 
9907 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
9908   match(Set dst (SubL zero src));
9909 
9910   ins_cost(INSN_COST);
9911   format %{ "neg $dst, $src\t# long" %}
9912 
9913   ins_encode %{
9914     __ neg(as_Register($dst$$reg),
9915            as_Register($src$$reg));
9916   %}
9917 
9918   ins_pipe(ialu_reg);
9919 %}
9920 
9921 // Integer Multiply
9922 
9923 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9924   match(Set dst (MulI src1 src2));
9925 
9926   ins_cost(INSN_COST * 3);
9927   format %{ "mulw  $dst, $src1, $src2" %}
9928 
9929   ins_encode %{
9930     __ mulw(as_Register($dst$$reg),
9931             as_Register($src1$$reg),
9932             as_Register($src2$$reg));
9933   %}
9934 
9935   ins_pipe(imul_reg_reg);
9936 %}
9937 
9938 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9939   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9940 
9941   ins_cost(INSN_COST * 3);
9942   format %{ "smull  $dst, $src1, $src2" %}
9943 
9944   ins_encode %{
9945     __ smull(as_Register($dst$$reg),
9946              as_Register($src1$$reg),
9947              as_Register($src2$$reg));
9948   %}
9949 
9950   ins_pipe(imul_reg_reg);
9951 %}
9952 
9953 // Long Multiply
9954 
9955 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9956   match(Set dst (MulL src1 src2));
9957 
9958   ins_cost(INSN_COST * 5);
9959   format %{ "mul  $dst, $src1, $src2" %}
9960 
9961   ins_encode %{
9962     __ mul(as_Register($dst$$reg),
9963            as_Register($src1$$reg),
9964            as_Register($src2$$reg));
9965   %}
9966 
9967   ins_pipe(lmul_reg_reg);
9968 %}
9969 
9970 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9971 %{
9972   match(Set dst (MulHiL src1 src2));
9973 
9974   ins_cost(INSN_COST * 7);
9975   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9976 
9977   ins_encode %{
9978     __ smulh(as_Register($dst$$reg),
9979              as_Register($src1$$reg),
9980              as_Register($src2$$reg));
9981   %}
9982 
9983   ins_pipe(lmul_reg_reg);
9984 %}
9985 
9986 // Combined Integer Multiply & Add/Sub
9987 
9988 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9989   match(Set dst (AddI src3 (MulI src1 src2)));
9990 
9991   ins_cost(INSN_COST * 3);
9992   format %{ "madd  $dst, $src1, $src2, $src3" %}
9993 
9994   ins_encode %{
9995     __ maddw(as_Register($dst$$reg),
9996              as_Register($src1$$reg),
9997              as_Register($src2$$reg),
9998              as_Register($src3$$reg));
9999   %}
10000 
10001   ins_pipe(imac_reg_reg);
10002 %}
10003 
10004 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10005   match(Set dst (SubI src3 (MulI src1 src2)));
10006 
10007   ins_cost(INSN_COST * 3);
10008   format %{ "msub  $dst, $src1, $src2, $src3" %}
10009 
10010   ins_encode %{
10011     __ msubw(as_Register($dst$$reg),
10012              as_Register($src1$$reg),
10013              as_Register($src2$$reg),
10014              as_Register($src3$$reg));
10015   %}
10016 
10017   ins_pipe(imac_reg_reg);
10018 %}
10019 
10020 // Combined Integer Multiply & Neg
10021 
10022 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10023   match(Set dst (MulI (SubI zero src1) src2));
10024   match(Set dst (MulI src1 (SubI zero src2)));
10025 
10026   ins_cost(INSN_COST * 3);
10027   format %{ "mneg  $dst, $src1, $src2" %}
10028 
10029   ins_encode %{
10030     __ mnegw(as_Register($dst$$reg),
10031              as_Register($src1$$reg),
10032              as_Register($src2$$reg));
10033   %}
10034 
10035   ins_pipe(imac_reg_reg);
10036 %}
10037 
10038 // Combined Long Multiply & Add/Sub
10039 
10040 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10041   match(Set dst (AddL src3 (MulL src1 src2)));
10042 
10043   ins_cost(INSN_COST * 5);
10044   format %{ "madd  $dst, $src1, $src2, $src3" %}
10045 
10046   ins_encode %{
10047     __ madd(as_Register($dst$$reg),
10048             as_Register($src1$$reg),
10049             as_Register($src2$$reg),
10050             as_Register($src3$$reg));
10051   %}
10052 
10053   ins_pipe(lmac_reg_reg);
10054 %}
10055 
10056 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10057   match(Set dst (SubL src3 (MulL src1 src2)));
10058 
10059   ins_cost(INSN_COST * 5);
10060   format %{ "msub  $dst, $src1, $src2, $src3" %}
10061 
10062   ins_encode %{
10063     __ msub(as_Register($dst$$reg),
10064             as_Register($src1$$reg),
10065             as_Register($src2$$reg),
10066             as_Register($src3$$reg));
10067   %}
10068 
10069   ins_pipe(lmac_reg_reg);
10070 %}
10071 
10072 // Combined Long Multiply & Neg
10073 
10074 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10075   match(Set dst (MulL (SubL zero src1) src2));
10076   match(Set dst (MulL src1 (SubL zero src2)));
10077 
10078   ins_cost(INSN_COST * 5);
10079   format %{ "mneg  $dst, $src1, $src2" %}
10080 
10081   ins_encode %{
10082     __ mneg(as_Register($dst$$reg),
10083             as_Register($src1$$reg),
10084             as_Register($src2$$reg));
10085   %}
10086 
10087   ins_pipe(lmac_reg_reg);
10088 %}
10089 
10090 // Integer Divide
10091 
10092 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10093   match(Set dst (DivI src1 src2));
10094 
10095   ins_cost(INSN_COST * 19);
10096   format %{ "sdivw  $dst, $src1, $src2" %}
10097 
10098   ins_encode(aarch64_enc_divw(dst, src1, src2));
10099   ins_pipe(idiv_reg_reg);
10100 %}
10101 
10102 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10103   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10104   ins_cost(INSN_COST);
10105   format %{ "lsrw $dst, $src1, $div1" %}
10106   ins_encode %{
10107     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10108   %}
10109   ins_pipe(ialu_reg_shift);
10110 %}
10111 
10112 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10113   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10114   ins_cost(INSN_COST);
10115   format %{ "addw $dst, $src, LSR $div1" %}
10116 
10117   ins_encode %{
10118     __ addw(as_Register($dst$$reg),
10119               as_Register($src$$reg),
10120               as_Register($src$$reg),
10121               Assembler::LSR, 31);
10122   %}
10123   ins_pipe(ialu_reg);
10124 %}
10125 
10126 // Long Divide
10127 
10128 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10129   match(Set dst (DivL src1 src2));
10130 
10131   ins_cost(INSN_COST * 35);
10132   format %{ "sdiv   $dst, $src1, $src2" %}
10133 
10134   ins_encode(aarch64_enc_div(dst, src1, src2));
10135   ins_pipe(ldiv_reg_reg);
10136 %}
10137 
10138 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
10139   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10140   ins_cost(INSN_COST);
10141   format %{ "lsr $dst, $src1, $div1" %}
10142   ins_encode %{
10143     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10144   %}
10145   ins_pipe(ialu_reg_shift);
10146 %}
10147 
10148 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
10149   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10150   ins_cost(INSN_COST);
10151   format %{ "add $dst, $src, $div1" %}
10152 
10153   ins_encode %{
10154     __ add(as_Register($dst$$reg),
10155               as_Register($src$$reg),
10156               as_Register($src$$reg),
10157               Assembler::LSR, 63);
10158   %}
10159   ins_pipe(ialu_reg);
10160 %}
10161 
10162 // Integer Remainder
10163 
10164 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10165   match(Set dst (ModI src1 src2));
10166 
10167   ins_cost(INSN_COST * 22);
10168   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10169             "msubw($dst, rscratch1, $src2, $src1" %}
10170 
10171   ins_encode(aarch64_enc_modw(dst, src1, src2));
10172   ins_pipe(idiv_reg_reg);
10173 %}
10174 
10175 // Long Remainder
10176 
10177 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10178   match(Set dst (ModL src1 src2));
10179 
10180   ins_cost(INSN_COST * 38);
10181   format %{ "sdiv   rscratch1, $src1, $src2\n"
10182             "msub($dst, rscratch1, $src2, $src1" %}
10183 
10184   ins_encode(aarch64_enc_mod(dst, src1, src2));
10185   ins_pipe(ldiv_reg_reg);
10186 %}
10187 
10188 // Integer Shifts
10189 
10190 // Shift Left Register
10191 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10192   match(Set dst (LShiftI src1 src2));
10193 
10194   ins_cost(INSN_COST * 2);
10195   format %{ "lslvw  $dst, $src1, $src2" %}
10196 
10197   ins_encode %{
10198     __ lslvw(as_Register($dst$$reg),
10199              as_Register($src1$$reg),
10200              as_Register($src2$$reg));
10201   %}
10202 
10203   ins_pipe(ialu_reg_reg_vshift);
10204 %}
10205 
10206 // Shift Left Immediate
10207 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10208   match(Set dst (LShiftI src1 src2));
10209 
10210   ins_cost(INSN_COST);
10211   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10212 
10213   ins_encode %{
10214     __ lslw(as_Register($dst$$reg),
10215             as_Register($src1$$reg),
10216             $src2$$constant & 0x1f);
10217   %}
10218 
10219   ins_pipe(ialu_reg_shift);
10220 %}
10221 
10222 // Shift Right Logical Register
10223 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10224   match(Set dst (URShiftI src1 src2));
10225 
10226   ins_cost(INSN_COST * 2);
10227   format %{ "lsrvw  $dst, $src1, $src2" %}
10228 
10229   ins_encode %{
10230     __ lsrvw(as_Register($dst$$reg),
10231              as_Register($src1$$reg),
10232              as_Register($src2$$reg));
10233   %}
10234 
10235   ins_pipe(ialu_reg_reg_vshift);
10236 %}
10237 
10238 // Shift Right Logical Immediate
10239 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10240   match(Set dst (URShiftI src1 src2));
10241 
10242   ins_cost(INSN_COST);
10243   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10244 
10245   ins_encode %{
10246     __ lsrw(as_Register($dst$$reg),
10247             as_Register($src1$$reg),
10248             $src2$$constant & 0x1f);
10249   %}
10250 
10251   ins_pipe(ialu_reg_shift);
10252 %}
10253 
10254 // Shift Right Arithmetic Register
10255 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10256   match(Set dst (RShiftI src1 src2));
10257 
10258   ins_cost(INSN_COST * 2);
10259   format %{ "asrvw  $dst, $src1, $src2" %}
10260 
10261   ins_encode %{
10262     __ asrvw(as_Register($dst$$reg),
10263              as_Register($src1$$reg),
10264              as_Register($src2$$reg));
10265   %}
10266 
10267   ins_pipe(ialu_reg_reg_vshift);
10268 %}
10269 
10270 // Shift Right Arithmetic Immediate
10271 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10272   match(Set dst (RShiftI src1 src2));
10273 
10274   ins_cost(INSN_COST);
10275   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10276 
10277   ins_encode %{
10278     __ asrw(as_Register($dst$$reg),
10279             as_Register($src1$$reg),
10280             $src2$$constant & 0x1f);
10281   %}
10282 
10283   ins_pipe(ialu_reg_shift);
10284 %}
10285 
10286 // Combined Int Mask and Right Shift (using UBFM)
10287 // TODO
10288 
10289 // Long Shifts
10290 
10291 // Shift Left Register
10292 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10293   match(Set dst (LShiftL src1 src2));
10294 
10295   ins_cost(INSN_COST * 2);
10296   format %{ "lslv  $dst, $src1, $src2" %}
10297 
10298   ins_encode %{
10299     __ lslv(as_Register($dst$$reg),
10300             as_Register($src1$$reg),
10301             as_Register($src2$$reg));
10302   %}
10303 
10304   ins_pipe(ialu_reg_reg_vshift);
10305 %}
10306 
10307 // Shift Left Immediate
10308 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10309   match(Set dst (LShiftL src1 src2));
10310 
10311   ins_cost(INSN_COST);
10312   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10313 
10314   ins_encode %{
10315     __ lsl(as_Register($dst$$reg),
10316             as_Register($src1$$reg),
10317             $src2$$constant & 0x3f);
10318   %}
10319 
10320   ins_pipe(ialu_reg_shift);
10321 %}
10322 
10323 // Shift Right Logical Register
10324 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10325   match(Set dst (URShiftL src1 src2));
10326 
10327   ins_cost(INSN_COST * 2);
10328   format %{ "lsrv  $dst, $src1, $src2" %}
10329 
10330   ins_encode %{
10331     __ lsrv(as_Register($dst$$reg),
10332             as_Register($src1$$reg),
10333             as_Register($src2$$reg));
10334   %}
10335 
10336   ins_pipe(ialu_reg_reg_vshift);
10337 %}
10338 
10339 // Shift Right Logical Immediate
10340 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10341   match(Set dst (URShiftL src1 src2));
10342 
10343   ins_cost(INSN_COST);
10344   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10345 
10346   ins_encode %{
10347     __ lsr(as_Register($dst$$reg),
10348            as_Register($src1$$reg),
10349            $src2$$constant & 0x3f);
10350   %}
10351 
10352   ins_pipe(ialu_reg_shift);
10353 %}
10354 
10355 // A special-case pattern for card table stores.
10356 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10357   match(Set dst (URShiftL (CastP2X src1) src2));
10358 
10359   ins_cost(INSN_COST);
10360   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10361 
10362   ins_encode %{
10363     __ lsr(as_Register($dst$$reg),
10364            as_Register($src1$$reg),
10365            $src2$$constant & 0x3f);
10366   %}
10367 
10368   ins_pipe(ialu_reg_shift);
10369 %}
10370 
10371 // Shift Right Arithmetic Register
10372 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10373   match(Set dst (RShiftL src1 src2));
10374 
10375   ins_cost(INSN_COST * 2);
10376   format %{ "asrv  $dst, $src1, $src2" %}
10377 
10378   ins_encode %{
10379     __ asrv(as_Register($dst$$reg),
10380             as_Register($src1$$reg),
10381             as_Register($src2$$reg));
10382   %}
10383 
10384   ins_pipe(ialu_reg_reg_vshift);
10385 %}
10386 
10387 // Shift Right Arithmetic Immediate
10388 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10389   match(Set dst (RShiftL src1 src2));
10390 
10391   ins_cost(INSN_COST);
10392   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10393 
10394   ins_encode %{
10395     __ asr(as_Register($dst$$reg),
10396            as_Register($src1$$reg),
10397            $src2$$constant & 0x3f);
10398   %}
10399 
10400   ins_pipe(ialu_reg_shift);
10401 %}
10402 
10403 // BEGIN This section of the file is automatically generated. Do not edit --------------
10404 
10405 instruct regL_not_reg(iRegLNoSp dst,
10406                          iRegL src1, immL_M1 m1,
10407                          rFlagsReg cr) %{
10408   match(Set dst (XorL src1 m1));
10409   ins_cost(INSN_COST);
10410   format %{ "eon  $dst, $src1, zr" %}
10411 
10412   ins_encode %{
10413     __ eon(as_Register($dst$$reg),
10414               as_Register($src1$$reg),
10415               zr,
10416               Assembler::LSL, 0);
10417   %}
10418 
10419   ins_pipe(ialu_reg);
10420 %}
10421 instruct regI_not_reg(iRegINoSp dst,
10422                          iRegIorL2I src1, immI_M1 m1,
10423                          rFlagsReg cr) %{
10424   match(Set dst (XorI src1 m1));
10425   ins_cost(INSN_COST);
10426   format %{ "eonw  $dst, $src1, zr" %}
10427 
10428   ins_encode %{
10429     __ eonw(as_Register($dst$$reg),
10430               as_Register($src1$$reg),
10431               zr,
10432               Assembler::LSL, 0);
10433   %}
10434 
10435   ins_pipe(ialu_reg);
10436 %}
10437 
10438 instruct AndI_reg_not_reg(iRegINoSp dst,
10439                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10440                          rFlagsReg cr) %{
10441   match(Set dst (AndI src1 (XorI src2 m1)));
10442   ins_cost(INSN_COST);
10443   format %{ "bicw  $dst, $src1, $src2" %}
10444 
10445   ins_encode %{
10446     __ bicw(as_Register($dst$$reg),
10447               as_Register($src1$$reg),
10448               as_Register($src2$$reg),
10449               Assembler::LSL, 0);
10450   %}
10451 
10452   ins_pipe(ialu_reg_reg);
10453 %}
10454 
10455 instruct AndL_reg_not_reg(iRegLNoSp dst,
10456                          iRegL src1, iRegL src2, immL_M1 m1,
10457                          rFlagsReg cr) %{
10458   match(Set dst (AndL src1 (XorL src2 m1)));
10459   ins_cost(INSN_COST);
10460   format %{ "bic  $dst, $src1, $src2" %}
10461 
10462   ins_encode %{
10463     __ bic(as_Register($dst$$reg),
10464               as_Register($src1$$reg),
10465               as_Register($src2$$reg),
10466               Assembler::LSL, 0);
10467   %}
10468 
10469   ins_pipe(ialu_reg_reg);
10470 %}
10471 
10472 instruct OrI_reg_not_reg(iRegINoSp dst,
10473                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10474                          rFlagsReg cr) %{
10475   match(Set dst (OrI src1 (XorI src2 m1)));
10476   ins_cost(INSN_COST);
10477   format %{ "ornw  $dst, $src1, $src2" %}
10478 
10479   ins_encode %{
10480     __ ornw(as_Register($dst$$reg),
10481               as_Register($src1$$reg),
10482               as_Register($src2$$reg),
10483               Assembler::LSL, 0);
10484   %}
10485 
10486   ins_pipe(ialu_reg_reg);
10487 %}
10488 
10489 instruct OrL_reg_not_reg(iRegLNoSp dst,
10490                          iRegL src1, iRegL src2, immL_M1 m1,
10491                          rFlagsReg cr) %{
10492   match(Set dst (OrL src1 (XorL src2 m1)));
10493   ins_cost(INSN_COST);
10494   format %{ "orn  $dst, $src1, $src2" %}
10495 
10496   ins_encode %{
10497     __ orn(as_Register($dst$$reg),
10498               as_Register($src1$$reg),
10499               as_Register($src2$$reg),
10500               Assembler::LSL, 0);
10501   %}
10502 
10503   ins_pipe(ialu_reg_reg);
10504 %}
10505 
10506 instruct XorI_reg_not_reg(iRegINoSp dst,
10507                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10508                          rFlagsReg cr) %{
10509   match(Set dst (XorI m1 (XorI src2 src1)));
10510   ins_cost(INSN_COST);
10511   format %{ "eonw  $dst, $src1, $src2" %}
10512 
10513   ins_encode %{
10514     __ eonw(as_Register($dst$$reg),
10515               as_Register($src1$$reg),
10516               as_Register($src2$$reg),
10517               Assembler::LSL, 0);
10518   %}
10519 
10520   ins_pipe(ialu_reg_reg);
10521 %}
10522 
10523 instruct XorL_reg_not_reg(iRegLNoSp dst,
10524                          iRegL src1, iRegL src2, immL_M1 m1,
10525                          rFlagsReg cr) %{
10526   match(Set dst (XorL m1 (XorL src2 src1)));
10527   ins_cost(INSN_COST);
10528   format %{ "eon  $dst, $src1, $src2" %}
10529 
10530   ins_encode %{
10531     __ eon(as_Register($dst$$reg),
10532               as_Register($src1$$reg),
10533               as_Register($src2$$reg),
10534               Assembler::LSL, 0);
10535   %}
10536 
10537   ins_pipe(ialu_reg_reg);
10538 %}
10539 
10540 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10541                          iRegIorL2I src1, iRegIorL2I src2,
10542                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10543   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10544   ins_cost(1.9 * INSN_COST);
10545   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10546 
10547   ins_encode %{
10548     __ bicw(as_Register($dst$$reg),
10549               as_Register($src1$$reg),
10550               as_Register($src2$$reg),
10551               Assembler::LSR,
10552               $src3$$constant & 0x1f);
10553   %}
10554 
10555   ins_pipe(ialu_reg_reg_shift);
10556 %}
10557 
10558 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10559                          iRegL src1, iRegL src2,
10560                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10561   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10562   ins_cost(1.9 * INSN_COST);
10563   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10564 
10565   ins_encode %{
10566     __ bic(as_Register($dst$$reg),
10567               as_Register($src1$$reg),
10568               as_Register($src2$$reg),
10569               Assembler::LSR,
10570               $src3$$constant & 0x3f);
10571   %}
10572 
10573   ins_pipe(ialu_reg_reg_shift);
10574 %}
10575 
10576 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10577                          iRegIorL2I src1, iRegIorL2I src2,
10578                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10579   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10580   ins_cost(1.9 * INSN_COST);
10581   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10582 
10583   ins_encode %{
10584     __ bicw(as_Register($dst$$reg),
10585               as_Register($src1$$reg),
10586               as_Register($src2$$reg),
10587               Assembler::ASR,
10588               $src3$$constant & 0x1f);
10589   %}
10590 
10591   ins_pipe(ialu_reg_reg_shift);
10592 %}
10593 
10594 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10595                          iRegL src1, iRegL src2,
10596                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10597   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10598   ins_cost(1.9 * INSN_COST);
10599   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10600 
10601   ins_encode %{
10602     __ bic(as_Register($dst$$reg),
10603               as_Register($src1$$reg),
10604               as_Register($src2$$reg),
10605               Assembler::ASR,
10606               $src3$$constant & 0x3f);
10607   %}
10608 
10609   ins_pipe(ialu_reg_reg_shift);
10610 %}
10611 
10612 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10613                          iRegIorL2I src1, iRegIorL2I src2,
10614                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10615   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10616   ins_cost(1.9 * INSN_COST);
10617   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10618 
10619   ins_encode %{
10620     __ bicw(as_Register($dst$$reg),
10621               as_Register($src1$$reg),
10622               as_Register($src2$$reg),
10623               Assembler::LSL,
10624               $src3$$constant & 0x1f);
10625   %}
10626 
10627   ins_pipe(ialu_reg_reg_shift);
10628 %}
10629 
10630 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10631                          iRegL src1, iRegL src2,
10632                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10633   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10634   ins_cost(1.9 * INSN_COST);
10635   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10636 
10637   ins_encode %{
10638     __ bic(as_Register($dst$$reg),
10639               as_Register($src1$$reg),
10640               as_Register($src2$$reg),
10641               Assembler::LSL,
10642               $src3$$constant & 0x3f);
10643   %}
10644 
10645   ins_pipe(ialu_reg_reg_shift);
10646 %}
10647 
10648 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10649                          iRegIorL2I src1, iRegIorL2I src2,
10650                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10651   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10652   ins_cost(1.9 * INSN_COST);
10653   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10654 
10655   ins_encode %{
10656     __ eonw(as_Register($dst$$reg),
10657               as_Register($src1$$reg),
10658               as_Register($src2$$reg),
10659               Assembler::LSR,
10660               $src3$$constant & 0x1f);
10661   %}
10662 
10663   ins_pipe(ialu_reg_reg_shift);
10664 %}
10665 
10666 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10667                          iRegL src1, iRegL src2,
10668                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10669   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10670   ins_cost(1.9 * INSN_COST);
10671   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10672 
10673   ins_encode %{
10674     __ eon(as_Register($dst$$reg),
10675               as_Register($src1$$reg),
10676               as_Register($src2$$reg),
10677               Assembler::LSR,
10678               $src3$$constant & 0x3f);
10679   %}
10680 
10681   ins_pipe(ialu_reg_reg_shift);
10682 %}
10683 
10684 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10685                          iRegIorL2I src1, iRegIorL2I src2,
10686                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10687   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10688   ins_cost(1.9 * INSN_COST);
10689   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10690 
10691   ins_encode %{
10692     __ eonw(as_Register($dst$$reg),
10693               as_Register($src1$$reg),
10694               as_Register($src2$$reg),
10695               Assembler::ASR,
10696               $src3$$constant & 0x1f);
10697   %}
10698 
10699   ins_pipe(ialu_reg_reg_shift);
10700 %}
10701 
10702 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10703                          iRegL src1, iRegL src2,
10704                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10705   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10706   ins_cost(1.9 * INSN_COST);
10707   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10708 
10709   ins_encode %{
10710     __ eon(as_Register($dst$$reg),
10711               as_Register($src1$$reg),
10712               as_Register($src2$$reg),
10713               Assembler::ASR,
10714               $src3$$constant & 0x3f);
10715   %}
10716 
10717   ins_pipe(ialu_reg_reg_shift);
10718 %}
10719 
10720 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10721                          iRegIorL2I src1, iRegIorL2I src2,
10722                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10723   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10724   ins_cost(1.9 * INSN_COST);
10725   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10726 
10727   ins_encode %{
10728     __ eonw(as_Register($dst$$reg),
10729               as_Register($src1$$reg),
10730               as_Register($src2$$reg),
10731               Assembler::LSL,
10732               $src3$$constant & 0x1f);
10733   %}
10734 
10735   ins_pipe(ialu_reg_reg_shift);
10736 %}
10737 
10738 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10739                          iRegL src1, iRegL src2,
10740                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10741   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10742   ins_cost(1.9 * INSN_COST);
10743   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10744 
10745   ins_encode %{
10746     __ eon(as_Register($dst$$reg),
10747               as_Register($src1$$reg),
10748               as_Register($src2$$reg),
10749               Assembler::LSL,
10750               $src3$$constant & 0x3f);
10751   %}
10752 
10753   ins_pipe(ialu_reg_reg_shift);
10754 %}
10755 
10756 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10757                          iRegIorL2I src1, iRegIorL2I src2,
10758                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10759   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10760   ins_cost(1.9 * INSN_COST);
10761   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10762 
10763   ins_encode %{
10764     __ ornw(as_Register($dst$$reg),
10765               as_Register($src1$$reg),
10766               as_Register($src2$$reg),
10767               Assembler::LSR,
10768               $src3$$constant & 0x1f);
10769   %}
10770 
10771   ins_pipe(ialu_reg_reg_shift);
10772 %}
10773 
10774 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10775                          iRegL src1, iRegL src2,
10776                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10777   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10778   ins_cost(1.9 * INSN_COST);
10779   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10780 
10781   ins_encode %{
10782     __ orn(as_Register($dst$$reg),
10783               as_Register($src1$$reg),
10784               as_Register($src2$$reg),
10785               Assembler::LSR,
10786               $src3$$constant & 0x3f);
10787   %}
10788 
10789   ins_pipe(ialu_reg_reg_shift);
10790 %}
10791 
10792 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10793                          iRegIorL2I src1, iRegIorL2I src2,
10794                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10795   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10796   ins_cost(1.9 * INSN_COST);
10797   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10798 
10799   ins_encode %{
10800     __ ornw(as_Register($dst$$reg),
10801               as_Register($src1$$reg),
10802               as_Register($src2$$reg),
10803               Assembler::ASR,
10804               $src3$$constant & 0x1f);
10805   %}
10806 
10807   ins_pipe(ialu_reg_reg_shift);
10808 %}
10809 
10810 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10811                          iRegL src1, iRegL src2,
10812                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10813   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10814   ins_cost(1.9 * INSN_COST);
10815   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10816 
10817   ins_encode %{
10818     __ orn(as_Register($dst$$reg),
10819               as_Register($src1$$reg),
10820               as_Register($src2$$reg),
10821               Assembler::ASR,
10822               $src3$$constant & 0x3f);
10823   %}
10824 
10825   ins_pipe(ialu_reg_reg_shift);
10826 %}
10827 
10828 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10829                          iRegIorL2I src1, iRegIorL2I src2,
10830                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10831   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10832   ins_cost(1.9 * INSN_COST);
10833   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10834 
10835   ins_encode %{
10836     __ ornw(as_Register($dst$$reg),
10837               as_Register($src1$$reg),
10838               as_Register($src2$$reg),
10839               Assembler::LSL,
10840               $src3$$constant & 0x1f);
10841   %}
10842 
10843   ins_pipe(ialu_reg_reg_shift);
10844 %}
10845 
10846 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10847                          iRegL src1, iRegL src2,
10848                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10849   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10850   ins_cost(1.9 * INSN_COST);
10851   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10852 
10853   ins_encode %{
10854     __ orn(as_Register($dst$$reg),
10855               as_Register($src1$$reg),
10856               as_Register($src2$$reg),
10857               Assembler::LSL,
10858               $src3$$constant & 0x3f);
10859   %}
10860 
10861   ins_pipe(ialu_reg_reg_shift);
10862 %}
10863 
10864 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10865                          iRegIorL2I src1, iRegIorL2I src2,
10866                          immI src3, rFlagsReg cr) %{
10867   match(Set dst (AndI src1 (URShiftI src2 src3)));
10868 
10869   ins_cost(1.9 * INSN_COST);
10870   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10871 
10872   ins_encode %{
10873     __ andw(as_Register($dst$$reg),
10874               as_Register($src1$$reg),
10875               as_Register($src2$$reg),
10876               Assembler::LSR,
10877               $src3$$constant & 0x1f);
10878   %}
10879 
10880   ins_pipe(ialu_reg_reg_shift);
10881 %}
10882 
10883 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10884                          iRegL src1, iRegL src2,
10885                          immI src3, rFlagsReg cr) %{
10886   match(Set dst (AndL src1 (URShiftL src2 src3)));
10887 
10888   ins_cost(1.9 * INSN_COST);
10889   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10890 
10891   ins_encode %{
10892     __ andr(as_Register($dst$$reg),
10893               as_Register($src1$$reg),
10894               as_Register($src2$$reg),
10895               Assembler::LSR,
10896               $src3$$constant & 0x3f);
10897   %}
10898 
10899   ins_pipe(ialu_reg_reg_shift);
10900 %}
10901 
10902 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10903                          iRegIorL2I src1, iRegIorL2I src2,
10904                          immI src3, rFlagsReg cr) %{
10905   match(Set dst (AndI src1 (RShiftI src2 src3)));
10906 
10907   ins_cost(1.9 * INSN_COST);
10908   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10909 
10910   ins_encode %{
10911     __ andw(as_Register($dst$$reg),
10912               as_Register($src1$$reg),
10913               as_Register($src2$$reg),
10914               Assembler::ASR,
10915               $src3$$constant & 0x1f);
10916   %}
10917 
10918   ins_pipe(ialu_reg_reg_shift);
10919 %}
10920 
10921 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10922                          iRegL src1, iRegL src2,
10923                          immI src3, rFlagsReg cr) %{
10924   match(Set dst (AndL src1 (RShiftL src2 src3)));
10925 
10926   ins_cost(1.9 * INSN_COST);
10927   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10928 
10929   ins_encode %{
10930     __ andr(as_Register($dst$$reg),
10931               as_Register($src1$$reg),
10932               as_Register($src2$$reg),
10933               Assembler::ASR,
10934               $src3$$constant & 0x3f);
10935   %}
10936 
10937   ins_pipe(ialu_reg_reg_shift);
10938 %}
10939 
10940 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10941                          iRegIorL2I src1, iRegIorL2I src2,
10942                          immI src3, rFlagsReg cr) %{
10943   match(Set dst (AndI src1 (LShiftI src2 src3)));
10944 
10945   ins_cost(1.9 * INSN_COST);
10946   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10947 
10948   ins_encode %{
10949     __ andw(as_Register($dst$$reg),
10950               as_Register($src1$$reg),
10951               as_Register($src2$$reg),
10952               Assembler::LSL,
10953               $src3$$constant & 0x1f);
10954   %}
10955 
10956   ins_pipe(ialu_reg_reg_shift);
10957 %}
10958 
10959 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10960                          iRegL src1, iRegL src2,
10961                          immI src3, rFlagsReg cr) %{
10962   match(Set dst (AndL src1 (LShiftL src2 src3)));
10963 
10964   ins_cost(1.9 * INSN_COST);
10965   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10966 
10967   ins_encode %{
10968     __ andr(as_Register($dst$$reg),
10969               as_Register($src1$$reg),
10970               as_Register($src2$$reg),
10971               Assembler::LSL,
10972               $src3$$constant & 0x3f);
10973   %}
10974 
10975   ins_pipe(ialu_reg_reg_shift);
10976 %}
10977 
10978 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10979                          iRegIorL2I src1, iRegIorL2I src2,
10980                          immI src3, rFlagsReg cr) %{
10981   match(Set dst (XorI src1 (URShiftI src2 src3)));
10982 
10983   ins_cost(1.9 * INSN_COST);
10984   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10985 
10986   ins_encode %{
10987     __ eorw(as_Register($dst$$reg),
10988               as_Register($src1$$reg),
10989               as_Register($src2$$reg),
10990               Assembler::LSR,
10991               $src3$$constant & 0x1f);
10992   %}
10993 
10994   ins_pipe(ialu_reg_reg_shift);
10995 %}
10996 
10997 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10998                          iRegL src1, iRegL src2,
10999                          immI src3, rFlagsReg cr) %{
11000   match(Set dst (XorL src1 (URShiftL src2 src3)));
11001 
11002   ins_cost(1.9 * INSN_COST);
11003   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11004 
11005   ins_encode %{
11006     __ eor(as_Register($dst$$reg),
11007               as_Register($src1$$reg),
11008               as_Register($src2$$reg),
11009               Assembler::LSR,
11010               $src3$$constant & 0x3f);
11011   %}
11012 
11013   ins_pipe(ialu_reg_reg_shift);
11014 %}
11015 
11016 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11017                          iRegIorL2I src1, iRegIorL2I src2,
11018                          immI src3, rFlagsReg cr) %{
11019   match(Set dst (XorI src1 (RShiftI src2 src3)));
11020 
11021   ins_cost(1.9 * INSN_COST);
11022   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11023 
11024   ins_encode %{
11025     __ eorw(as_Register($dst$$reg),
11026               as_Register($src1$$reg),
11027               as_Register($src2$$reg),
11028               Assembler::ASR,
11029               $src3$$constant & 0x1f);
11030   %}
11031 
11032   ins_pipe(ialu_reg_reg_shift);
11033 %}
11034 
11035 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11036                          iRegL src1, iRegL src2,
11037                          immI src3, rFlagsReg cr) %{
11038   match(Set dst (XorL src1 (RShiftL src2 src3)));
11039 
11040   ins_cost(1.9 * INSN_COST);
11041   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11042 
11043   ins_encode %{
11044     __ eor(as_Register($dst$$reg),
11045               as_Register($src1$$reg),
11046               as_Register($src2$$reg),
11047               Assembler::ASR,
11048               $src3$$constant & 0x3f);
11049   %}
11050 
11051   ins_pipe(ialu_reg_reg_shift);
11052 %}
11053 
11054 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11055                          iRegIorL2I src1, iRegIorL2I src2,
11056                          immI src3, rFlagsReg cr) %{
11057   match(Set dst (XorI src1 (LShiftI src2 src3)));
11058 
11059   ins_cost(1.9 * INSN_COST);
11060   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11061 
11062   ins_encode %{
11063     __ eorw(as_Register($dst$$reg),
11064               as_Register($src1$$reg),
11065               as_Register($src2$$reg),
11066               Assembler::LSL,
11067               $src3$$constant & 0x1f);
11068   %}
11069 
11070   ins_pipe(ialu_reg_reg_shift);
11071 %}
11072 
11073 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11074                          iRegL src1, iRegL src2,
11075                          immI src3, rFlagsReg cr) %{
11076   match(Set dst (XorL src1 (LShiftL src2 src3)));
11077 
11078   ins_cost(1.9 * INSN_COST);
11079   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11080 
11081   ins_encode %{
11082     __ eor(as_Register($dst$$reg),
11083               as_Register($src1$$reg),
11084               as_Register($src2$$reg),
11085               Assembler::LSL,
11086               $src3$$constant & 0x3f);
11087   %}
11088 
11089   ins_pipe(ialu_reg_reg_shift);
11090 %}
11091 
11092 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11093                          iRegIorL2I src1, iRegIorL2I src2,
11094                          immI src3, rFlagsReg cr) %{
11095   match(Set dst (OrI src1 (URShiftI src2 src3)));
11096 
11097   ins_cost(1.9 * INSN_COST);
11098   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11099 
11100   ins_encode %{
11101     __ orrw(as_Register($dst$$reg),
11102               as_Register($src1$$reg),
11103               as_Register($src2$$reg),
11104               Assembler::LSR,
11105               $src3$$constant & 0x1f);
11106   %}
11107 
11108   ins_pipe(ialu_reg_reg_shift);
11109 %}
11110 
11111 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11112                          iRegL src1, iRegL src2,
11113                          immI src3, rFlagsReg cr) %{
11114   match(Set dst (OrL src1 (URShiftL src2 src3)));
11115 
11116   ins_cost(1.9 * INSN_COST);
11117   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11118 
11119   ins_encode %{
11120     __ orr(as_Register($dst$$reg),
11121               as_Register($src1$$reg),
11122               as_Register($src2$$reg),
11123               Assembler::LSR,
11124               $src3$$constant & 0x3f);
11125   %}
11126 
11127   ins_pipe(ialu_reg_reg_shift);
11128 %}
11129 
11130 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11131                          iRegIorL2I src1, iRegIorL2I src2,
11132                          immI src3, rFlagsReg cr) %{
11133   match(Set dst (OrI src1 (RShiftI src2 src3)));
11134 
11135   ins_cost(1.9 * INSN_COST);
11136   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11137 
11138   ins_encode %{
11139     __ orrw(as_Register($dst$$reg),
11140               as_Register($src1$$reg),
11141               as_Register($src2$$reg),
11142               Assembler::ASR,
11143               $src3$$constant & 0x1f);
11144   %}
11145 
11146   ins_pipe(ialu_reg_reg_shift);
11147 %}
11148 
11149 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11150                          iRegL src1, iRegL src2,
11151                          immI src3, rFlagsReg cr) %{
11152   match(Set dst (OrL src1 (RShiftL src2 src3)));
11153 
11154   ins_cost(1.9 * INSN_COST);
11155   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11156 
11157   ins_encode %{
11158     __ orr(as_Register($dst$$reg),
11159               as_Register($src1$$reg),
11160               as_Register($src2$$reg),
11161               Assembler::ASR,
11162               $src3$$constant & 0x3f);
11163   %}
11164 
11165   ins_pipe(ialu_reg_reg_shift);
11166 %}
11167 
11168 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11169                          iRegIorL2I src1, iRegIorL2I src2,
11170                          immI src3, rFlagsReg cr) %{
11171   match(Set dst (OrI src1 (LShiftI src2 src3)));
11172 
11173   ins_cost(1.9 * INSN_COST);
11174   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11175 
11176   ins_encode %{
11177     __ orrw(as_Register($dst$$reg),
11178               as_Register($src1$$reg),
11179               as_Register($src2$$reg),
11180               Assembler::LSL,
11181               $src3$$constant & 0x1f);
11182   %}
11183 
11184   ins_pipe(ialu_reg_reg_shift);
11185 %}
11186 
11187 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11188                          iRegL src1, iRegL src2,
11189                          immI src3, rFlagsReg cr) %{
11190   match(Set dst (OrL src1 (LShiftL src2 src3)));
11191 
11192   ins_cost(1.9 * INSN_COST);
11193   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11194 
11195   ins_encode %{
11196     __ orr(as_Register($dst$$reg),
11197               as_Register($src1$$reg),
11198               as_Register($src2$$reg),
11199               Assembler::LSL,
11200               $src3$$constant & 0x3f);
11201   %}
11202 
11203   ins_pipe(ialu_reg_reg_shift);
11204 %}
11205 
11206 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11207                          iRegIorL2I src1, iRegIorL2I src2,
11208                          immI src3, rFlagsReg cr) %{
11209   match(Set dst (AddI src1 (URShiftI src2 src3)));
11210 
11211   ins_cost(1.9 * INSN_COST);
11212   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11213 
11214   ins_encode %{
11215     __ addw(as_Register($dst$$reg),
11216               as_Register($src1$$reg),
11217               as_Register($src2$$reg),
11218               Assembler::LSR,
11219               $src3$$constant & 0x1f);
11220   %}
11221 
11222   ins_pipe(ialu_reg_reg_shift);
11223 %}
11224 
11225 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11226                          iRegL src1, iRegL src2,
11227                          immI src3, rFlagsReg cr) %{
11228   match(Set dst (AddL src1 (URShiftL src2 src3)));
11229 
11230   ins_cost(1.9 * INSN_COST);
11231   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11232 
11233   ins_encode %{
11234     __ add(as_Register($dst$$reg),
11235               as_Register($src1$$reg),
11236               as_Register($src2$$reg),
11237               Assembler::LSR,
11238               $src3$$constant & 0x3f);
11239   %}
11240 
11241   ins_pipe(ialu_reg_reg_shift);
11242 %}
11243 
11244 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11245                          iRegIorL2I src1, iRegIorL2I src2,
11246                          immI src3, rFlagsReg cr) %{
11247   match(Set dst (AddI src1 (RShiftI src2 src3)));
11248 
11249   ins_cost(1.9 * INSN_COST);
11250   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11251 
11252   ins_encode %{
11253     __ addw(as_Register($dst$$reg),
11254               as_Register($src1$$reg),
11255               as_Register($src2$$reg),
11256               Assembler::ASR,
11257               $src3$$constant & 0x1f);
11258   %}
11259 
11260   ins_pipe(ialu_reg_reg_shift);
11261 %}
11262 
11263 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11264                          iRegL src1, iRegL src2,
11265                          immI src3, rFlagsReg cr) %{
11266   match(Set dst (AddL src1 (RShiftL src2 src3)));
11267 
11268   ins_cost(1.9 * INSN_COST);
11269   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11270 
11271   ins_encode %{
11272     __ add(as_Register($dst$$reg),
11273               as_Register($src1$$reg),
11274               as_Register($src2$$reg),
11275               Assembler::ASR,
11276               $src3$$constant & 0x3f);
11277   %}
11278 
11279   ins_pipe(ialu_reg_reg_shift);
11280 %}
11281 
11282 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11283                          iRegIorL2I src1, iRegIorL2I src2,
11284                          immI src3, rFlagsReg cr) %{
11285   match(Set dst (AddI src1 (LShiftI src2 src3)));
11286 
11287   ins_cost(1.9 * INSN_COST);
11288   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11289 
11290   ins_encode %{
11291     __ addw(as_Register($dst$$reg),
11292               as_Register($src1$$reg),
11293               as_Register($src2$$reg),
11294               Assembler::LSL,
11295               $src3$$constant & 0x1f);
11296   %}
11297 
11298   ins_pipe(ialu_reg_reg_shift);
11299 %}
11300 
11301 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11302                          iRegL src1, iRegL src2,
11303                          immI src3, rFlagsReg cr) %{
11304   match(Set dst (AddL src1 (LShiftL src2 src3)));
11305 
11306   ins_cost(1.9 * INSN_COST);
11307   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11308 
11309   ins_encode %{
11310     __ add(as_Register($dst$$reg),
11311               as_Register($src1$$reg),
11312               as_Register($src2$$reg),
11313               Assembler::LSL,
11314               $src3$$constant & 0x3f);
11315   %}
11316 
11317   ins_pipe(ialu_reg_reg_shift);
11318 %}
11319 
11320 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11321                          iRegIorL2I src1, iRegIorL2I src2,
11322                          immI src3, rFlagsReg cr) %{
11323   match(Set dst (SubI src1 (URShiftI src2 src3)));
11324 
11325   ins_cost(1.9 * INSN_COST);
11326   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11327 
11328   ins_encode %{
11329     __ subw(as_Register($dst$$reg),
11330               as_Register($src1$$reg),
11331               as_Register($src2$$reg),
11332               Assembler::LSR,
11333               $src3$$constant & 0x1f);
11334   %}
11335 
11336   ins_pipe(ialu_reg_reg_shift);
11337 %}
11338 
11339 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11340                          iRegL src1, iRegL src2,
11341                          immI src3, rFlagsReg cr) %{
11342   match(Set dst (SubL src1 (URShiftL src2 src3)));
11343 
11344   ins_cost(1.9 * INSN_COST);
11345   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11346 
11347   ins_encode %{
11348     __ sub(as_Register($dst$$reg),
11349               as_Register($src1$$reg),
11350               as_Register($src2$$reg),
11351               Assembler::LSR,
11352               $src3$$constant & 0x3f);
11353   %}
11354 
11355   ins_pipe(ialu_reg_reg_shift);
11356 %}
11357 
11358 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11359                          iRegIorL2I src1, iRegIorL2I src2,
11360                          immI src3, rFlagsReg cr) %{
11361   match(Set dst (SubI src1 (RShiftI src2 src3)));
11362 
11363   ins_cost(1.9 * INSN_COST);
11364   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11365 
11366   ins_encode %{
11367     __ subw(as_Register($dst$$reg),
11368               as_Register($src1$$reg),
11369               as_Register($src2$$reg),
11370               Assembler::ASR,
11371               $src3$$constant & 0x1f);
11372   %}
11373 
11374   ins_pipe(ialu_reg_reg_shift);
11375 %}
11376 
11377 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11378                          iRegL src1, iRegL src2,
11379                          immI src3, rFlagsReg cr) %{
11380   match(Set dst (SubL src1 (RShiftL src2 src3)));
11381 
11382   ins_cost(1.9 * INSN_COST);
11383   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11384 
11385   ins_encode %{
11386     __ sub(as_Register($dst$$reg),
11387               as_Register($src1$$reg),
11388               as_Register($src2$$reg),
11389               Assembler::ASR,
11390               $src3$$constant & 0x3f);
11391   %}
11392 
11393   ins_pipe(ialu_reg_reg_shift);
11394 %}
11395 
11396 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11397                          iRegIorL2I src1, iRegIorL2I src2,
11398                          immI src3, rFlagsReg cr) %{
11399   match(Set dst (SubI src1 (LShiftI src2 src3)));
11400 
11401   ins_cost(1.9 * INSN_COST);
11402   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11403 
11404   ins_encode %{
11405     __ subw(as_Register($dst$$reg),
11406               as_Register($src1$$reg),
11407               as_Register($src2$$reg),
11408               Assembler::LSL,
11409               $src3$$constant & 0x1f);
11410   %}
11411 
11412   ins_pipe(ialu_reg_reg_shift);
11413 %}
11414 
11415 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11416                          iRegL src1, iRegL src2,
11417                          immI src3, rFlagsReg cr) %{
11418   match(Set dst (SubL src1 (LShiftL src2 src3)));
11419 
11420   ins_cost(1.9 * INSN_COST);
11421   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11422 
11423   ins_encode %{
11424     __ sub(as_Register($dst$$reg),
11425               as_Register($src1$$reg),
11426               as_Register($src2$$reg),
11427               Assembler::LSL,
11428               $src3$$constant & 0x3f);
11429   %}
11430 
11431   ins_pipe(ialu_reg_reg_shift);
11432 %}
11433 
11434 
11435 
11436 // Shift Left followed by Shift Right.
11437 // This idiom is used by the compiler for the i2b bytecode etc.
11438 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11439 %{
11440   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11441   // Make sure we are not going to exceed what sbfm can do.
11442   predicate((unsigned int)n->in(2)->get_int() <= 63
11443             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11444 
11445   ins_cost(INSN_COST * 2);
11446   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11447   ins_encode %{
11448     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11449     int s = 63 - lshift;
11450     int r = (rshift - lshift) & 63;
11451     __ sbfm(as_Register($dst$$reg),
11452             as_Register($src$$reg),
11453             r, s);
11454   %}
11455 
11456   ins_pipe(ialu_reg_shift);
11457 %}
11458 
11459 // Shift Left followed by Shift Right.
11460 // This idiom is used by the compiler for the i2b bytecode etc.
11461 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11462 %{
11463   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11464   // Make sure we are not going to exceed what sbfmw can do.
11465   predicate((unsigned int)n->in(2)->get_int() <= 31
11466             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11467 
11468   ins_cost(INSN_COST * 2);
11469   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11470   ins_encode %{
11471     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11472     int s = 31 - lshift;
11473     int r = (rshift - lshift) & 31;
11474     __ sbfmw(as_Register($dst$$reg),
11475             as_Register($src$$reg),
11476             r, s);
11477   %}
11478 
11479   ins_pipe(ialu_reg_shift);
11480 %}
11481 
11482 // Shift Left followed by Shift Right.
11483 // This idiom is used by the compiler for the i2b bytecode etc.
11484 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11485 %{
11486   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11487   // Make sure we are not going to exceed what ubfm can do.
11488   predicate((unsigned int)n->in(2)->get_int() <= 63
11489             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11490 
11491   ins_cost(INSN_COST * 2);
11492   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11493   ins_encode %{
11494     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11495     int s = 63 - lshift;
11496     int r = (rshift - lshift) & 63;
11497     __ ubfm(as_Register($dst$$reg),
11498             as_Register($src$$reg),
11499             r, s);
11500   %}
11501 
11502   ins_pipe(ialu_reg_shift);
11503 %}
11504 
11505 // Shift Left followed by Shift Right.
11506 // This idiom is used by the compiler for the i2b bytecode etc.
11507 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11508 %{
11509   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11510   // Make sure we are not going to exceed what ubfmw can do.
11511   predicate((unsigned int)n->in(2)->get_int() <= 31
11512             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11513 
11514   ins_cost(INSN_COST * 2);
11515   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11516   ins_encode %{
11517     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11518     int s = 31 - lshift;
11519     int r = (rshift - lshift) & 31;
11520     __ ubfmw(as_Register($dst$$reg),
11521             as_Register($src$$reg),
11522             r, s);
11523   %}
11524 
11525   ins_pipe(ialu_reg_shift);
11526 %}
11527 // Bitfield extract with shift & mask
11528 
11529 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11530 %{
11531   match(Set dst (AndI (URShiftI src rshift) mask));
11532 
11533   ins_cost(INSN_COST);
11534   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11535   ins_encode %{
11536     int rshift = $rshift$$constant;
11537     long mask = $mask$$constant;
11538     int width = exact_log2(mask+1);
11539     __ ubfxw(as_Register($dst$$reg),
11540             as_Register($src$$reg), rshift, width);
11541   %}
11542   ins_pipe(ialu_reg_shift);
11543 %}
11544 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11545 %{
11546   match(Set dst (AndL (URShiftL src rshift) mask));
11547 
11548   ins_cost(INSN_COST);
11549   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11550   ins_encode %{
11551     int rshift = $rshift$$constant;
11552     long mask = $mask$$constant;
11553     int width = exact_log2(mask+1);
11554     __ ubfx(as_Register($dst$$reg),
11555             as_Register($src$$reg), rshift, width);
11556   %}
11557   ins_pipe(ialu_reg_shift);
11558 %}
11559 
11560 // We can use ubfx when extending an And with a mask when we know mask
11561 // is positive.  We know that because immI_bitmask guarantees it.
11562 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11563 %{
11564   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11565 
11566   ins_cost(INSN_COST * 2);
11567   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11568   ins_encode %{
11569     int rshift = $rshift$$constant;
11570     long mask = $mask$$constant;
11571     int width = exact_log2(mask+1);
11572     __ ubfx(as_Register($dst$$reg),
11573             as_Register($src$$reg), rshift, width);
11574   %}
11575   ins_pipe(ialu_reg_shift);
11576 %}
11577 
11578 // We can use ubfiz when masking by a positive number and then left shifting the result.
11579 // We know that the mask is positive because immI_bitmask guarantees it.
11580 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11581 %{
11582   match(Set dst (LShiftI (AndI src mask) lshift));
11583   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11584     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
11585 
11586   ins_cost(INSN_COST);
11587   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11588   ins_encode %{
11589     int lshift = $lshift$$constant;
11590     long mask = $mask$$constant;
11591     int width = exact_log2(mask+1);
11592     __ ubfizw(as_Register($dst$$reg),
11593           as_Register($src$$reg), lshift, width);
11594   %}
11595   ins_pipe(ialu_reg_shift);
11596 %}
11597 // We can use ubfiz when masking by a positive number and then left shifting the result.
11598 // We know that the mask is positive because immL_bitmask guarantees it.
11599 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11600 %{
11601   match(Set dst (LShiftL (AndL src mask) lshift));
11602   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
11603     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
11604 
11605   ins_cost(INSN_COST);
11606   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11607   ins_encode %{
11608     int lshift = $lshift$$constant;
11609     long mask = $mask$$constant;
11610     int width = exact_log2(mask+1);
11611     __ ubfiz(as_Register($dst$$reg),
11612           as_Register($src$$reg), lshift, width);
11613   %}
11614   ins_pipe(ialu_reg_shift);
11615 %}
11616 
11617 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
11618 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11619 %{
11620   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
11621   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
11622     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
11623 
11624   ins_cost(INSN_COST);
11625   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11626   ins_encode %{
11627     int lshift = $lshift$$constant;
11628     long mask = $mask$$constant;
11629     int width = exact_log2(mask+1);
11630     __ ubfiz(as_Register($dst$$reg),
11631              as_Register($src$$reg), lshift, width);
11632   %}
11633   ins_pipe(ialu_reg_shift);
11634 %}
11635 
11636 // Rotations
11637 
11638 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11639 %{
11640   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11641   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11642 
11643   ins_cost(INSN_COST);
11644   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11645 
11646   ins_encode %{
11647     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11648             $rshift$$constant & 63);
11649   %}
11650   ins_pipe(ialu_reg_reg_extr);
11651 %}
11652 
11653 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11654 %{
11655   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11656   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11657 
11658   ins_cost(INSN_COST);
11659   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11660 
11661   ins_encode %{
11662     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11663             $rshift$$constant & 31);
11664   %}
11665   ins_pipe(ialu_reg_reg_extr);
11666 %}
11667 
11668 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11669 %{
11670   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11671   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11672 
11673   ins_cost(INSN_COST);
11674   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11675 
11676   ins_encode %{
11677     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11678             $rshift$$constant & 63);
11679   %}
11680   ins_pipe(ialu_reg_reg_extr);
11681 %}
11682 
11683 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11684 %{
11685   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11686   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11687 
11688   ins_cost(INSN_COST);
11689   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11690 
11691   ins_encode %{
11692     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11693             $rshift$$constant & 31);
11694   %}
11695   ins_pipe(ialu_reg_reg_extr);
11696 %}
11697 
11698 
11699 // rol expander
11700 
11701 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11702 %{
11703   effect(DEF dst, USE src, USE shift);
11704 
11705   format %{ "rol    $dst, $src, $shift" %}
11706   ins_cost(INSN_COST * 3);
11707   ins_encode %{
11708     __ subw(rscratch1, zr, as_Register($shift$$reg));
11709     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11710             rscratch1);
11711     %}
11712   ins_pipe(ialu_reg_reg_vshift);
11713 %}
11714 
11715 // rol expander
11716 
11717 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11718 %{
11719   effect(DEF dst, USE src, USE shift);
11720 
11721   format %{ "rol    $dst, $src, $shift" %}
11722   ins_cost(INSN_COST * 3);
11723   ins_encode %{
11724     __ subw(rscratch1, zr, as_Register($shift$$reg));
11725     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11726             rscratch1);
11727     %}
11728   ins_pipe(ialu_reg_reg_vshift);
11729 %}
11730 
11731 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11732 %{
11733   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11734 
11735   expand %{
11736     rolL_rReg(dst, src, shift, cr);
11737   %}
11738 %}
11739 
11740 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11741 %{
11742   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11743 
11744   expand %{
11745     rolL_rReg(dst, src, shift, cr);
11746   %}
11747 %}
11748 
11749 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11750 %{
11751   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11752 
11753   expand %{
11754     rolI_rReg(dst, src, shift, cr);
11755   %}
11756 %}
11757 
11758 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11759 %{
11760   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11761 
11762   expand %{
11763     rolI_rReg(dst, src, shift, cr);
11764   %}
11765 %}
11766 
11767 // ror expander
11768 
11769 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11770 %{
11771   effect(DEF dst, USE src, USE shift);
11772 
11773   format %{ "ror    $dst, $src, $shift" %}
11774   ins_cost(INSN_COST);
11775   ins_encode %{
11776     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11777             as_Register($shift$$reg));
11778     %}
11779   ins_pipe(ialu_reg_reg_vshift);
11780 %}
11781 
11782 // ror expander
11783 
11784 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11785 %{
11786   effect(DEF dst, USE src, USE shift);
11787 
11788   format %{ "ror    $dst, $src, $shift" %}
11789   ins_cost(INSN_COST);
11790   ins_encode %{
11791     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11792             as_Register($shift$$reg));
11793     %}
11794   ins_pipe(ialu_reg_reg_vshift);
11795 %}
11796 
11797 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11798 %{
11799   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11800 
11801   expand %{
11802     rorL_rReg(dst, src, shift, cr);
11803   %}
11804 %}
11805 
11806 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11807 %{
11808   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11809 
11810   expand %{
11811     rorL_rReg(dst, src, shift, cr);
11812   %}
11813 %}
11814 
11815 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11816 %{
11817   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11818 
11819   expand %{
11820     rorI_rReg(dst, src, shift, cr);
11821   %}
11822 %}
11823 
11824 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11825 %{
11826   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11827 
11828   expand %{
11829     rorI_rReg(dst, src, shift, cr);
11830   %}
11831 %}
11832 
11833 // Add/subtract (extended)
11834 
11835 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11836 %{
11837   match(Set dst (AddL src1 (ConvI2L src2)));
11838   ins_cost(INSN_COST);
11839   format %{ "add  $dst, $src1, $src2, sxtw" %}
11840 
11841    ins_encode %{
11842      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11843             as_Register($src2$$reg), ext::sxtw);
11844    %}
11845   ins_pipe(ialu_reg_reg);
11846 %};
11847 
11848 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11849 %{
11850   match(Set dst (SubL src1 (ConvI2L src2)));
11851   ins_cost(INSN_COST);
11852   format %{ "sub  $dst, $src1, $src2, sxtw" %}
11853 
11854    ins_encode %{
11855      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11856             as_Register($src2$$reg), ext::sxtw);
11857    %}
11858   ins_pipe(ialu_reg_reg);
11859 %};
11860 
11861 
11862 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11863 %{
11864   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11865   ins_cost(INSN_COST);
11866   format %{ "add  $dst, $src1, $src2, sxth" %}
11867 
11868    ins_encode %{
11869      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11870             as_Register($src2$$reg), ext::sxth);
11871    %}
11872   ins_pipe(ialu_reg_reg);
11873 %}
11874 
11875 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11876 %{
11877   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11878   ins_cost(INSN_COST);
11879   format %{ "add  $dst, $src1, $src2, sxtb" %}
11880 
11881    ins_encode %{
11882      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11883             as_Register($src2$$reg), ext::sxtb);
11884    %}
11885   ins_pipe(ialu_reg_reg);
11886 %}
11887 
11888 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11889 %{
11890   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11891   ins_cost(INSN_COST);
11892   format %{ "add  $dst, $src1, $src2, uxtb" %}
11893 
11894    ins_encode %{
11895      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11896             as_Register($src2$$reg), ext::uxtb);
11897    %}
11898   ins_pipe(ialu_reg_reg);
11899 %}
11900 
11901 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11902 %{
11903   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11904   ins_cost(INSN_COST);
11905   format %{ "add  $dst, $src1, $src2, sxth" %}
11906 
11907    ins_encode %{
11908      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11909             as_Register($src2$$reg), ext::sxth);
11910    %}
11911   ins_pipe(ialu_reg_reg);
11912 %}
11913 
11914 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11915 %{
11916   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11917   ins_cost(INSN_COST);
11918   format %{ "add  $dst, $src1, $src2, sxtw" %}
11919 
11920    ins_encode %{
11921      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11922             as_Register($src2$$reg), ext::sxtw);
11923    %}
11924   ins_pipe(ialu_reg_reg);
11925 %}
11926 
11927 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11928 %{
11929   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11930   ins_cost(INSN_COST);
11931   format %{ "add  $dst, $src1, $src2, sxtb" %}
11932 
11933    ins_encode %{
11934      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11935             as_Register($src2$$reg), ext::sxtb);
11936    %}
11937   ins_pipe(ialu_reg_reg);
11938 %}
11939 
11940 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11941 %{
11942   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11943   ins_cost(INSN_COST);
11944   format %{ "add  $dst, $src1, $src2, uxtb" %}
11945 
11946    ins_encode %{
11947      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11948             as_Register($src2$$reg), ext::uxtb);
11949    %}
11950   ins_pipe(ialu_reg_reg);
11951 %}
11952 
11953 
11954 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11955 %{
11956   match(Set dst (AddI src1 (AndI src2 mask)));
11957   ins_cost(INSN_COST);
11958   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11959 
11960    ins_encode %{
11961      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11962             as_Register($src2$$reg), ext::uxtb);
11963    %}
11964   ins_pipe(ialu_reg_reg);
11965 %}
11966 
11967 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11968 %{
11969   match(Set dst (AddI src1 (AndI src2 mask)));
11970   ins_cost(INSN_COST);
11971   format %{ "addw  $dst, $src1, $src2, uxth" %}
11972 
11973    ins_encode %{
11974      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11975             as_Register($src2$$reg), ext::uxth);
11976    %}
11977   ins_pipe(ialu_reg_reg);
11978 %}
11979 
11980 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11981 %{
11982   match(Set dst (AddL src1 (AndL src2 mask)));
11983   ins_cost(INSN_COST);
11984   format %{ "add  $dst, $src1, $src2, uxtb" %}
11985 
11986    ins_encode %{
11987      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11988             as_Register($src2$$reg), ext::uxtb);
11989    %}
11990   ins_pipe(ialu_reg_reg);
11991 %}
11992 
11993 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11994 %{
11995   match(Set dst (AddL src1 (AndL src2 mask)));
11996   ins_cost(INSN_COST);
11997   format %{ "add  $dst, $src1, $src2, uxth" %}
11998 
11999    ins_encode %{
12000      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12001             as_Register($src2$$reg), ext::uxth);
12002    %}
12003   ins_pipe(ialu_reg_reg);
12004 %}
12005 
12006 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12007 %{
12008   match(Set dst (AddL src1 (AndL src2 mask)));
12009   ins_cost(INSN_COST);
12010   format %{ "add  $dst, $src1, $src2, uxtw" %}
12011 
12012    ins_encode %{
12013      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12014             as_Register($src2$$reg), ext::uxtw);
12015    %}
12016   ins_pipe(ialu_reg_reg);
12017 %}
12018 
12019 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12020 %{
12021   match(Set dst (SubI src1 (AndI src2 mask)));
12022   ins_cost(INSN_COST);
12023   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12024 
12025    ins_encode %{
12026      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12027             as_Register($src2$$reg), ext::uxtb);
12028    %}
12029   ins_pipe(ialu_reg_reg);
12030 %}
12031 
12032 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12033 %{
12034   match(Set dst (SubI src1 (AndI src2 mask)));
12035   ins_cost(INSN_COST);
12036   format %{ "subw  $dst, $src1, $src2, uxth" %}
12037 
12038    ins_encode %{
12039      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12040             as_Register($src2$$reg), ext::uxth);
12041    %}
12042   ins_pipe(ialu_reg_reg);
12043 %}
12044 
12045 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12046 %{
12047   match(Set dst (SubL src1 (AndL src2 mask)));
12048   ins_cost(INSN_COST);
12049   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12050 
12051    ins_encode %{
12052      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12053             as_Register($src2$$reg), ext::uxtb);
12054    %}
12055   ins_pipe(ialu_reg_reg);
12056 %}
12057 
12058 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12059 %{
12060   match(Set dst (SubL src1 (AndL src2 mask)));
12061   ins_cost(INSN_COST);
12062   format %{ "sub  $dst, $src1, $src2, uxth" %}
12063 
12064    ins_encode %{
12065      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12066             as_Register($src2$$reg), ext::uxth);
12067    %}
12068   ins_pipe(ialu_reg_reg);
12069 %}
12070 
12071 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12072 %{
12073   match(Set dst (SubL src1 (AndL src2 mask)));
12074   ins_cost(INSN_COST);
12075   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12076 
12077    ins_encode %{
12078      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12079             as_Register($src2$$reg), ext::uxtw);
12080    %}
12081   ins_pipe(ialu_reg_reg);
12082 %}
12083 
12084 
12085 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12086 %{
12087   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12088   ins_cost(1.9 * INSN_COST);
12089   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12090 
12091    ins_encode %{
12092      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12093             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12094    %}
12095   ins_pipe(ialu_reg_reg_shift);
12096 %}
12097 
12098 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12099 %{
12100   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12101   ins_cost(1.9 * INSN_COST);
12102   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12103 
12104    ins_encode %{
12105      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12106             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12107    %}
12108   ins_pipe(ialu_reg_reg_shift);
12109 %}
12110 
12111 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12112 %{
12113   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12114   ins_cost(1.9 * INSN_COST);
12115   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12116 
12117    ins_encode %{
12118      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12119             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12120    %}
12121   ins_pipe(ialu_reg_reg_shift);
12122 %}
12123 
12124 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12125 %{
12126   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12127   ins_cost(1.9 * INSN_COST);
12128   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
12129 
12130    ins_encode %{
12131      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12132             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12133    %}
12134   ins_pipe(ialu_reg_reg_shift);
12135 %}
12136 
12137 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12138 %{
12139   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12140   ins_cost(1.9 * INSN_COST);
12141   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
12142 
12143    ins_encode %{
12144      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12145             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12146    %}
12147   ins_pipe(ialu_reg_reg_shift);
12148 %}
12149 
12150 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12151 %{
12152   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12153   ins_cost(1.9 * INSN_COST);
12154   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
12155 
12156    ins_encode %{
12157      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12158             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12159    %}
12160   ins_pipe(ialu_reg_reg_shift);
12161 %}
12162 
12163 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12164 %{
12165   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12166   ins_cost(1.9 * INSN_COST);
12167   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
12168 
12169    ins_encode %{
12170      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12171             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12172    %}
12173   ins_pipe(ialu_reg_reg_shift);
12174 %}
12175 
12176 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12177 %{
12178   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12179   ins_cost(1.9 * INSN_COST);
12180   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
12181 
12182    ins_encode %{
12183      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12184             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12185    %}
12186   ins_pipe(ialu_reg_reg_shift);
12187 %}
12188 
12189 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
12190 %{
12191   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12192   ins_cost(1.9 * INSN_COST);
12193   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
12194 
12195    ins_encode %{
12196      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12197             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12198    %}
12199   ins_pipe(ialu_reg_reg_shift);
12200 %}
12201 
12202 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
12203 %{
12204   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
12205   ins_cost(1.9 * INSN_COST);
12206   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
12207 
12208    ins_encode %{
12209      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12210             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12211    %}
12212   ins_pipe(ialu_reg_reg_shift);
12213 %}
12214 
12215 
12216 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12217 %{
12218   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
12219   ins_cost(1.9 * INSN_COST);
12220   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
12221 
12222    ins_encode %{
12223      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12224             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12225    %}
12226   ins_pipe(ialu_reg_reg_shift);
12227 %};
12228 
12229 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
12230 %{
12231   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
12232   ins_cost(1.9 * INSN_COST);
12233   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
12234 
12235    ins_encode %{
12236      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12237             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
12238    %}
12239   ins_pipe(ialu_reg_reg_shift);
12240 %};
12241 
12242 
12243 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12244 %{
12245   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12246   ins_cost(1.9 * INSN_COST);
12247   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
12248 
12249    ins_encode %{
12250      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12251             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12252    %}
12253   ins_pipe(ialu_reg_reg_shift);
12254 %}
12255 
12256 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12257 %{
12258   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12259   ins_cost(1.9 * INSN_COST);
12260   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
12261 
12262    ins_encode %{
12263      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12264             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12265    %}
12266   ins_pipe(ialu_reg_reg_shift);
12267 %}
12268 
12269 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12270 %{
12271   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
12272   ins_cost(1.9 * INSN_COST);
12273   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
12274 
12275    ins_encode %{
12276      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12277             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12278    %}
12279   ins_pipe(ialu_reg_reg_shift);
12280 %}
12281 
12282 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
12283 %{
12284   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12285   ins_cost(1.9 * INSN_COST);
12286   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
12287 
12288    ins_encode %{
12289      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12290             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12291    %}
12292   ins_pipe(ialu_reg_reg_shift);
12293 %}
12294 
12295 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
12296 %{
12297   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12298   ins_cost(1.9 * INSN_COST);
12299   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
12300 
12301    ins_encode %{
12302      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12303             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12304    %}
12305   ins_pipe(ialu_reg_reg_shift);
12306 %}
12307 
12308 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
12309 %{
12310   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
12311   ins_cost(1.9 * INSN_COST);
12312   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
12313 
12314    ins_encode %{
12315      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12316             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
12317    %}
12318   ins_pipe(ialu_reg_reg_shift);
12319 %}
12320 
12321 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12322 %{
12323   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12324   ins_cost(1.9 * INSN_COST);
12325   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
12326 
12327    ins_encode %{
12328      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12329             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12330    %}
12331   ins_pipe(ialu_reg_reg_shift);
12332 %}
12333 
12334 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12335 %{
12336   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
12337   ins_cost(1.9 * INSN_COST);
12338   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
12339 
12340    ins_encode %{
12341      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12342             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12343    %}
12344   ins_pipe(ialu_reg_reg_shift);
12345 %}
12346 
12347 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
12348 %{
12349   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12350   ins_cost(1.9 * INSN_COST);
12351   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
12352 
12353    ins_encode %{
12354      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12355             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
12356    %}
12357   ins_pipe(ialu_reg_reg_shift);
12358 %}
12359 
12360 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
12361 %{
12362   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
12363   ins_cost(1.9 * INSN_COST);
12364   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
12365 
12366    ins_encode %{
12367      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12368             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
12369    %}
12370   ins_pipe(ialu_reg_reg_shift);
12371 %}
12372 // END This section of the file is automatically generated. Do not edit --------------
12373 
12374 // ============================================================================
12375 // Floating Point Arithmetic Instructions
12376 
12377 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12378   match(Set dst (AddF src1 src2));
12379 
12380   ins_cost(INSN_COST * 5);
12381   format %{ "fadds   $dst, $src1, $src2" %}
12382 
12383   ins_encode %{
12384     __ fadds(as_FloatRegister($dst$$reg),
12385              as_FloatRegister($src1$$reg),
12386              as_FloatRegister($src2$$reg));
12387   %}
12388 
12389   ins_pipe(fp_dop_reg_reg_s);
12390 %}
12391 
12392 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12393   match(Set dst (AddD src1 src2));
12394 
12395   ins_cost(INSN_COST * 5);
12396   format %{ "faddd   $dst, $src1, $src2" %}
12397 
12398   ins_encode %{
12399     __ faddd(as_FloatRegister($dst$$reg),
12400              as_FloatRegister($src1$$reg),
12401              as_FloatRegister($src2$$reg));
12402   %}
12403 
12404   ins_pipe(fp_dop_reg_reg_d);
12405 %}
12406 
12407 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12408   match(Set dst (SubF src1 src2));
12409 
12410   ins_cost(INSN_COST * 5);
12411   format %{ "fsubs   $dst, $src1, $src2" %}
12412 
12413   ins_encode %{
12414     __ fsubs(as_FloatRegister($dst$$reg),
12415              as_FloatRegister($src1$$reg),
12416              as_FloatRegister($src2$$reg));
12417   %}
12418 
12419   ins_pipe(fp_dop_reg_reg_s);
12420 %}
12421 
12422 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12423   match(Set dst (SubD src1 src2));
12424 
12425   ins_cost(INSN_COST * 5);
12426   format %{ "fsubd   $dst, $src1, $src2" %}
12427 
12428   ins_encode %{
12429     __ fsubd(as_FloatRegister($dst$$reg),
12430              as_FloatRegister($src1$$reg),
12431              as_FloatRegister($src2$$reg));
12432   %}
12433 
12434   ins_pipe(fp_dop_reg_reg_d);
12435 %}
12436 
12437 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12438   match(Set dst (MulF src1 src2));
12439 
12440   ins_cost(INSN_COST * 6);
12441   format %{ "fmuls   $dst, $src1, $src2" %}
12442 
12443   ins_encode %{
12444     __ fmuls(as_FloatRegister($dst$$reg),
12445              as_FloatRegister($src1$$reg),
12446              as_FloatRegister($src2$$reg));
12447   %}
12448 
12449   ins_pipe(fp_dop_reg_reg_s);
12450 %}
12451 
12452 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12453   match(Set dst (MulD src1 src2));
12454 
12455   ins_cost(INSN_COST * 6);
12456   format %{ "fmuld   $dst, $src1, $src2" %}
12457 
12458   ins_encode %{
12459     __ fmuld(as_FloatRegister($dst$$reg),
12460              as_FloatRegister($src1$$reg),
12461              as_FloatRegister($src2$$reg));
12462   %}
12463 
12464   ins_pipe(fp_dop_reg_reg_d);
12465 %}
12466 
12467 // src1 * src2 + src3
12468 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12469   predicate(UseFMA);
12470   match(Set dst (FmaF src3 (Binary src1 src2)));
12471 
12472   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12473 
12474   ins_encode %{
12475     __ fmadds(as_FloatRegister($dst$$reg),
12476              as_FloatRegister($src1$$reg),
12477              as_FloatRegister($src2$$reg),
12478              as_FloatRegister($src3$$reg));
12479   %}
12480 
12481   ins_pipe(pipe_class_default);
12482 %}
12483 
12484 // src1 * src2 + src3
12485 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12486   predicate(UseFMA);
12487   match(Set dst (FmaD src3 (Binary src1 src2)));
12488 
12489   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12490 
12491   ins_encode %{
12492     __ fmaddd(as_FloatRegister($dst$$reg),
12493              as_FloatRegister($src1$$reg),
12494              as_FloatRegister($src2$$reg),
12495              as_FloatRegister($src3$$reg));
12496   %}
12497 
12498   ins_pipe(pipe_class_default);
12499 %}
12500 
12501 // -src1 * src2 + src3
12502 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12503   predicate(UseFMA);
12504   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
12505   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
12506 
12507   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12508 
12509   ins_encode %{
12510     __ fmsubs(as_FloatRegister($dst$$reg),
12511               as_FloatRegister($src1$$reg),
12512               as_FloatRegister($src2$$reg),
12513               as_FloatRegister($src3$$reg));
12514   %}
12515 
12516   ins_pipe(pipe_class_default);
12517 %}
12518 
12519 // -src1 * src2 + src3
12520 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12521   predicate(UseFMA);
12522   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
12523   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
12524 
12525   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12526 
12527   ins_encode %{
12528     __ fmsubd(as_FloatRegister($dst$$reg),
12529               as_FloatRegister($src1$$reg),
12530               as_FloatRegister($src2$$reg),
12531               as_FloatRegister($src3$$reg));
12532   %}
12533 
12534   ins_pipe(pipe_class_default);
12535 %}
12536 
12537 // -src1 * src2 - src3
12538 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12539   predicate(UseFMA);
12540   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
12541   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
12542 
12543   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12544 
12545   ins_encode %{
12546     __ fnmadds(as_FloatRegister($dst$$reg),
12547                as_FloatRegister($src1$$reg),
12548                as_FloatRegister($src2$$reg),
12549                as_FloatRegister($src3$$reg));
12550   %}
12551 
12552   ins_pipe(pipe_class_default);
12553 %}
12554 
12555 // -src1 * src2 - src3
12556 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12557   predicate(UseFMA);
12558   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
12559   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
12560 
12561   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12562 
12563   ins_encode %{
12564     __ fnmaddd(as_FloatRegister($dst$$reg),
12565                as_FloatRegister($src1$$reg),
12566                as_FloatRegister($src2$$reg),
12567                as_FloatRegister($src3$$reg));
12568   %}
12569 
12570   ins_pipe(pipe_class_default);
12571 %}
12572 
12573 // src1 * src2 - src3
12574 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12575   predicate(UseFMA);
12576   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
12577 
12578   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12579 
12580   ins_encode %{
12581     __ fnmsubs(as_FloatRegister($dst$$reg),
12582                as_FloatRegister($src1$$reg),
12583                as_FloatRegister($src2$$reg),
12584                as_FloatRegister($src3$$reg));
12585   %}
12586 
12587   ins_pipe(pipe_class_default);
12588 %}
12589 
12590 // src1 * src2 - src3
12591 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12592   predicate(UseFMA);
12593   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
12594 
12595   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12596 
12597   ins_encode %{
12598   // n.b. insn name should be fnmsubd
12599     __ fnmsub(as_FloatRegister($dst$$reg),
12600               as_FloatRegister($src1$$reg),
12601               as_FloatRegister($src2$$reg),
12602               as_FloatRegister($src3$$reg));
12603   %}
12604 
12605   ins_pipe(pipe_class_default);
12606 %}
12607 
12608 
12609 // Math.max(FF)F
12610 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12611   match(Set dst (MaxF src1 src2));
12612 
12613   format %{ "fmaxs   $dst, $src1, $src2" %}
12614   ins_encode %{
12615     __ fmaxs(as_FloatRegister($dst$$reg),
12616              as_FloatRegister($src1$$reg),
12617              as_FloatRegister($src2$$reg));
12618   %}
12619 
12620   ins_pipe(fp_dop_reg_reg_s);
12621 %}
12622 
12623 // Math.min(FF)F
12624 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12625   match(Set dst (MinF src1 src2));
12626 
12627   format %{ "fmins   $dst, $src1, $src2" %}
12628   ins_encode %{
12629     __ fmins(as_FloatRegister($dst$$reg),
12630              as_FloatRegister($src1$$reg),
12631              as_FloatRegister($src2$$reg));
12632   %}
12633 
12634   ins_pipe(fp_dop_reg_reg_s);
12635 %}
12636 
12637 // Math.max(DD)D
12638 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12639   match(Set dst (MaxD src1 src2));
12640 
12641   format %{ "fmaxd   $dst, $src1, $src2" %}
12642   ins_encode %{
12643     __ fmaxd(as_FloatRegister($dst$$reg),
12644              as_FloatRegister($src1$$reg),
12645              as_FloatRegister($src2$$reg));
12646   %}
12647 
12648   ins_pipe(fp_dop_reg_reg_d);
12649 %}
12650 
12651 // Math.min(DD)D
12652 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12653   match(Set dst (MinD src1 src2));
12654 
12655   format %{ "fmind   $dst, $src1, $src2" %}
12656   ins_encode %{
12657     __ fmind(as_FloatRegister($dst$$reg),
12658              as_FloatRegister($src1$$reg),
12659              as_FloatRegister($src2$$reg));
12660   %}
12661 
12662   ins_pipe(fp_dop_reg_reg_d);
12663 %}
12664 
12665 
12666 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12667   match(Set dst (DivF src1  src2));
12668 
12669   ins_cost(INSN_COST * 18);
12670   format %{ "fdivs   $dst, $src1, $src2" %}
12671 
12672   ins_encode %{
12673     __ fdivs(as_FloatRegister($dst$$reg),
12674              as_FloatRegister($src1$$reg),
12675              as_FloatRegister($src2$$reg));
12676   %}
12677 
12678   ins_pipe(fp_div_s);
12679 %}
12680 
12681 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12682   match(Set dst (DivD src1  src2));
12683 
12684   ins_cost(INSN_COST * 32);
12685   format %{ "fdivd   $dst, $src1, $src2" %}
12686 
12687   ins_encode %{
12688     __ fdivd(as_FloatRegister($dst$$reg),
12689              as_FloatRegister($src1$$reg),
12690              as_FloatRegister($src2$$reg));
12691   %}
12692 
12693   ins_pipe(fp_div_d);
12694 %}
12695 
12696 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12697   match(Set dst (NegF src));
12698 
12699   ins_cost(INSN_COST * 3);
12700   format %{ "fneg   $dst, $src" %}
12701 
12702   ins_encode %{
12703     __ fnegs(as_FloatRegister($dst$$reg),
12704              as_FloatRegister($src$$reg));
12705   %}
12706 
12707   ins_pipe(fp_uop_s);
12708 %}
12709 
12710 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12711   match(Set dst (NegD src));
12712 
12713   ins_cost(INSN_COST * 3);
12714   format %{ "fnegd   $dst, $src" %}
12715 
12716   ins_encode %{
12717     __ fnegd(as_FloatRegister($dst$$reg),
12718              as_FloatRegister($src$$reg));
12719   %}
12720 
12721   ins_pipe(fp_uop_d);
12722 %}
12723 
12724 instruct absF_reg(vRegF dst, vRegF src) %{
12725   match(Set dst (AbsF src));
12726 
12727   ins_cost(INSN_COST * 3);
12728   format %{ "fabss   $dst, $src" %}
12729   ins_encode %{
12730     __ fabss(as_FloatRegister($dst$$reg),
12731              as_FloatRegister($src$$reg));
12732   %}
12733 
12734   ins_pipe(fp_uop_s);
12735 %}
12736 
12737 instruct absD_reg(vRegD dst, vRegD src) %{
12738   match(Set dst (AbsD src));
12739 
12740   ins_cost(INSN_COST * 3);
12741   format %{ "fabsd   $dst, $src" %}
12742   ins_encode %{
12743     __ fabsd(as_FloatRegister($dst$$reg),
12744              as_FloatRegister($src$$reg));
12745   %}
12746 
12747   ins_pipe(fp_uop_d);
12748 %}
12749 
12750 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12751   match(Set dst (SqrtD src));
12752 
12753   ins_cost(INSN_COST * 50);
12754   format %{ "fsqrtd  $dst, $src" %}
12755   ins_encode %{
12756     __ fsqrtd(as_FloatRegister($dst$$reg),
12757              as_FloatRegister($src$$reg));
12758   %}
12759 
12760   ins_pipe(fp_div_s);
12761 %}
12762 
12763 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12764   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12765 
12766   ins_cost(INSN_COST * 50);
12767   format %{ "fsqrts  $dst, $src" %}
12768   ins_encode %{
12769     __ fsqrts(as_FloatRegister($dst$$reg),
12770              as_FloatRegister($src$$reg));
12771   %}
12772 
12773   ins_pipe(fp_div_d);
12774 %}
12775 
12776 // ============================================================================
12777 // Logical Instructions
12778 
12779 // Integer Logical Instructions
12780 
12781 // And Instructions
12782 
12783 
12784 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12785   match(Set dst (AndI src1 src2));
12786 
12787   format %{ "andw  $dst, $src1, $src2\t# int" %}
12788 
12789   ins_cost(INSN_COST);
12790   ins_encode %{
12791     __ andw(as_Register($dst$$reg),
12792             as_Register($src1$$reg),
12793             as_Register($src2$$reg));
12794   %}
12795 
12796   ins_pipe(ialu_reg_reg);
12797 %}
12798 
12799 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12800   match(Set dst (AndI src1 src2));
12801 
12802   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12803 
12804   ins_cost(INSN_COST);
12805   ins_encode %{
12806     __ andw(as_Register($dst$$reg),
12807             as_Register($src1$$reg),
12808             (unsigned long)($src2$$constant));
12809   %}
12810 
12811   ins_pipe(ialu_reg_imm);
12812 %}
12813 
12814 // Or Instructions
12815 
12816 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12817   match(Set dst (OrI src1 src2));
12818 
12819   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12820 
12821   ins_cost(INSN_COST);
12822   ins_encode %{
12823     __ orrw(as_Register($dst$$reg),
12824             as_Register($src1$$reg),
12825             as_Register($src2$$reg));
12826   %}
12827 
12828   ins_pipe(ialu_reg_reg);
12829 %}
12830 
12831 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12832   match(Set dst (OrI src1 src2));
12833 
12834   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12835 
12836   ins_cost(INSN_COST);
12837   ins_encode %{
12838     __ orrw(as_Register($dst$$reg),
12839             as_Register($src1$$reg),
12840             (unsigned long)($src2$$constant));
12841   %}
12842 
12843   ins_pipe(ialu_reg_imm);
12844 %}
12845 
12846 // Xor Instructions
12847 
12848 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12849   match(Set dst (XorI src1 src2));
12850 
12851   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12852 
12853   ins_cost(INSN_COST);
12854   ins_encode %{
12855     __ eorw(as_Register($dst$$reg),
12856             as_Register($src1$$reg),
12857             as_Register($src2$$reg));
12858   %}
12859 
12860   ins_pipe(ialu_reg_reg);
12861 %}
12862 
12863 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12864   match(Set dst (XorI src1 src2));
12865 
12866   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12867 
12868   ins_cost(INSN_COST);
12869   ins_encode %{
12870     __ eorw(as_Register($dst$$reg),
12871             as_Register($src1$$reg),
12872             (unsigned long)($src2$$constant));
12873   %}
12874 
12875   ins_pipe(ialu_reg_imm);
12876 %}
12877 
12878 // Long Logical Instructions
12879 // TODO
12880 
12881 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12882   match(Set dst (AndL src1 src2));
12883 
12884   format %{ "and  $dst, $src1, $src2\t# int" %}
12885 
12886   ins_cost(INSN_COST);
12887   ins_encode %{
12888     __ andr(as_Register($dst$$reg),
12889             as_Register($src1$$reg),
12890             as_Register($src2$$reg));
12891   %}
12892 
12893   ins_pipe(ialu_reg_reg);
12894 %}
12895 
12896 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12897   match(Set dst (AndL src1 src2));
12898 
12899   format %{ "and  $dst, $src1, $src2\t# int" %}
12900 
12901   ins_cost(INSN_COST);
12902   ins_encode %{
12903     __ andr(as_Register($dst$$reg),
12904             as_Register($src1$$reg),
12905             (unsigned long)($src2$$constant));
12906   %}
12907 
12908   ins_pipe(ialu_reg_imm);
12909 %}
12910 
12911 // Or Instructions
12912 
12913 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12914   match(Set dst (OrL src1 src2));
12915 
12916   format %{ "orr  $dst, $src1, $src2\t# int" %}
12917 
12918   ins_cost(INSN_COST);
12919   ins_encode %{
12920     __ orr(as_Register($dst$$reg),
12921            as_Register($src1$$reg),
12922            as_Register($src2$$reg));
12923   %}
12924 
12925   ins_pipe(ialu_reg_reg);
12926 %}
12927 
12928 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12929   match(Set dst (OrL src1 src2));
12930 
12931   format %{ "orr  $dst, $src1, $src2\t# int" %}
12932 
12933   ins_cost(INSN_COST);
12934   ins_encode %{
12935     __ orr(as_Register($dst$$reg),
12936            as_Register($src1$$reg),
12937            (unsigned long)($src2$$constant));
12938   %}
12939 
12940   ins_pipe(ialu_reg_imm);
12941 %}
12942 
12943 // Xor Instructions
12944 
12945 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12946   match(Set dst (XorL src1 src2));
12947 
12948   format %{ "eor  $dst, $src1, $src2\t# int" %}
12949 
12950   ins_cost(INSN_COST);
12951   ins_encode %{
12952     __ eor(as_Register($dst$$reg),
12953            as_Register($src1$$reg),
12954            as_Register($src2$$reg));
12955   %}
12956 
12957   ins_pipe(ialu_reg_reg);
12958 %}
12959 
12960 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12961   match(Set dst (XorL src1 src2));
12962 
12963   ins_cost(INSN_COST);
12964   format %{ "eor  $dst, $src1, $src2\t# int" %}
12965 
12966   ins_encode %{
12967     __ eor(as_Register($dst$$reg),
12968            as_Register($src1$$reg),
12969            (unsigned long)($src2$$constant));
12970   %}
12971 
12972   ins_pipe(ialu_reg_imm);
12973 %}
12974 
12975 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12976 %{
12977   match(Set dst (ConvI2L src));
12978 
12979   ins_cost(INSN_COST);
12980   format %{ "sxtw  $dst, $src\t# i2l" %}
12981   ins_encode %{
12982     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12983   %}
12984   ins_pipe(ialu_reg_shift);
12985 %}
12986 
12987 // this pattern occurs in bigmath arithmetic
12988 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12989 %{
12990   match(Set dst (AndL (ConvI2L src) mask));
12991 
12992   ins_cost(INSN_COST);
12993   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12994   ins_encode %{
12995     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12996   %}
12997 
12998   ins_pipe(ialu_reg_shift);
12999 %}
13000 
13001 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13002   match(Set dst (ConvL2I src));
13003 
13004   ins_cost(INSN_COST);
13005   format %{ "movw  $dst, $src \t// l2i" %}
13006 
13007   ins_encode %{
13008     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13009   %}
13010 
13011   ins_pipe(ialu_reg);
13012 %}
13013 
13014 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13015 %{
13016   match(Set dst (Conv2B src));
13017   effect(KILL cr);
13018 
13019   format %{
13020     "cmpw $src, zr\n\t"
13021     "cset $dst, ne"
13022   %}
13023 
13024   ins_encode %{
13025     __ cmpw(as_Register($src$$reg), zr);
13026     __ cset(as_Register($dst$$reg), Assembler::NE);
13027   %}
13028 
13029   ins_pipe(ialu_reg);
13030 %}
13031 
13032 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13033 %{
13034   match(Set dst (Conv2B src));
13035   effect(KILL cr);
13036 
13037   format %{
13038     "cmp  $src, zr\n\t"
13039     "cset $dst, ne"
13040   %}
13041 
13042   ins_encode %{
13043     __ cmp(as_Register($src$$reg), zr);
13044     __ cset(as_Register($dst$$reg), Assembler::NE);
13045   %}
13046 
13047   ins_pipe(ialu_reg);
13048 %}
13049 
13050 instruct convD2F_reg(vRegF dst, vRegD src) %{
13051   match(Set dst (ConvD2F src));
13052 
13053   ins_cost(INSN_COST * 5);
13054   format %{ "fcvtd  $dst, $src \t// d2f" %}
13055 
13056   ins_encode %{
13057     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13058   %}
13059 
13060   ins_pipe(fp_d2f);
13061 %}
13062 
13063 instruct convF2D_reg(vRegD dst, vRegF src) %{
13064   match(Set dst (ConvF2D src));
13065 
13066   ins_cost(INSN_COST * 5);
13067   format %{ "fcvts  $dst, $src \t// f2d" %}
13068 
13069   ins_encode %{
13070     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13071   %}
13072 
13073   ins_pipe(fp_f2d);
13074 %}
13075 
13076 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13077   match(Set dst (ConvF2I src));
13078 
13079   ins_cost(INSN_COST * 5);
13080   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13081 
13082   ins_encode %{
13083     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13084   %}
13085 
13086   ins_pipe(fp_f2i);
13087 %}
13088 
13089 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13090   match(Set dst (ConvF2L src));
13091 
13092   ins_cost(INSN_COST * 5);
13093   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13094 
13095   ins_encode %{
13096     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13097   %}
13098 
13099   ins_pipe(fp_f2l);
13100 %}
13101 
13102 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13103   match(Set dst (ConvI2F src));
13104 
13105   ins_cost(INSN_COST * 5);
13106   format %{ "scvtfws  $dst, $src \t// i2f" %}
13107 
13108   ins_encode %{
13109     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13110   %}
13111 
13112   ins_pipe(fp_i2f);
13113 %}
13114 
13115 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13116   match(Set dst (ConvL2F src));
13117 
13118   ins_cost(INSN_COST * 5);
13119   format %{ "scvtfs  $dst, $src \t// l2f" %}
13120 
13121   ins_encode %{
13122     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13123   %}
13124 
13125   ins_pipe(fp_l2f);
13126 %}
13127 
13128 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13129   match(Set dst (ConvD2I src));
13130 
13131   ins_cost(INSN_COST * 5);
13132   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13133 
13134   ins_encode %{
13135     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13136   %}
13137 
13138   ins_pipe(fp_d2i);
13139 %}
13140 
13141 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13142   match(Set dst (ConvD2L src));
13143 
13144   ins_cost(INSN_COST * 5);
13145   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13146 
13147   ins_encode %{
13148     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13149   %}
13150 
13151   ins_pipe(fp_d2l);
13152 %}
13153 
13154 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13155   match(Set dst (ConvI2D src));
13156 
13157   ins_cost(INSN_COST * 5);
13158   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13159 
13160   ins_encode %{
13161     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13162   %}
13163 
13164   ins_pipe(fp_i2d);
13165 %}
13166 
13167 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13168   match(Set dst (ConvL2D src));
13169 
13170   ins_cost(INSN_COST * 5);
13171   format %{ "scvtfd  $dst, $src \t// l2d" %}
13172 
13173   ins_encode %{
13174     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13175   %}
13176 
13177   ins_pipe(fp_l2d);
13178 %}
13179 
13180 // stack <-> reg and reg <-> reg shuffles with no conversion
13181 
13182 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13183 
13184   match(Set dst (MoveF2I src));
13185 
13186   effect(DEF dst, USE src);
13187 
13188   ins_cost(4 * INSN_COST);
13189 
13190   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13191 
13192   ins_encode %{
13193     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13194   %}
13195 
13196   ins_pipe(iload_reg_reg);
13197 
13198 %}
13199 
13200 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13201 
13202   match(Set dst (MoveI2F src));
13203 
13204   effect(DEF dst, USE src);
13205 
13206   ins_cost(4 * INSN_COST);
13207 
13208   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13209 
13210   ins_encode %{
13211     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13212   %}
13213 
13214   ins_pipe(pipe_class_memory);
13215 
13216 %}
13217 
13218 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13219 
13220   match(Set dst (MoveD2L src));
13221 
13222   effect(DEF dst, USE src);
13223 
13224   ins_cost(4 * INSN_COST);
13225 
13226   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13227 
13228   ins_encode %{
13229     __ ldr($dst$$Register, Address(sp, $src$$disp));
13230   %}
13231 
13232   ins_pipe(iload_reg_reg);
13233 
13234 %}
13235 
13236 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13237 
13238   match(Set dst (MoveL2D src));
13239 
13240   effect(DEF dst, USE src);
13241 
13242   ins_cost(4 * INSN_COST);
13243 
13244   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13245 
13246   ins_encode %{
13247     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13248   %}
13249 
13250   ins_pipe(pipe_class_memory);
13251 
13252 %}
13253 
13254 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13255 
13256   match(Set dst (MoveF2I src));
13257 
13258   effect(DEF dst, USE src);
13259 
13260   ins_cost(INSN_COST);
13261 
13262   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13263 
13264   ins_encode %{
13265     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13266   %}
13267 
13268   ins_pipe(pipe_class_memory);
13269 
13270 %}
13271 
13272 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13273 
13274   match(Set dst (MoveI2F src));
13275 
13276   effect(DEF dst, USE src);
13277 
13278   ins_cost(INSN_COST);
13279 
13280   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13281 
13282   ins_encode %{
13283     __ strw($src$$Register, Address(sp, $dst$$disp));
13284   %}
13285 
13286   ins_pipe(istore_reg_reg);
13287 
13288 %}
13289 
13290 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13291 
13292   match(Set dst (MoveD2L src));
13293 
13294   effect(DEF dst, USE src);
13295 
13296   ins_cost(INSN_COST);
13297 
13298   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13299 
13300   ins_encode %{
13301     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13302   %}
13303 
13304   ins_pipe(pipe_class_memory);
13305 
13306 %}
13307 
13308 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13309 
13310   match(Set dst (MoveL2D src));
13311 
13312   effect(DEF dst, USE src);
13313 
13314   ins_cost(INSN_COST);
13315 
13316   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13317 
13318   ins_encode %{
13319     __ str($src$$Register, Address(sp, $dst$$disp));
13320   %}
13321 
13322   ins_pipe(istore_reg_reg);
13323 
13324 %}
13325 
13326 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13327 
13328   match(Set dst (MoveF2I src));
13329 
13330   effect(DEF dst, USE src);
13331 
13332   ins_cost(INSN_COST);
13333 
13334   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13335 
13336   ins_encode %{
13337     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13338   %}
13339 
13340   ins_pipe(fp_f2i);
13341 
13342 %}
13343 
13344 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13345 
13346   match(Set dst (MoveI2F src));
13347 
13348   effect(DEF dst, USE src);
13349 
13350   ins_cost(INSN_COST);
13351 
13352   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13353 
13354   ins_encode %{
13355     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13356   %}
13357 
13358   ins_pipe(fp_i2f);
13359 
13360 %}
13361 
13362 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13363 
13364   match(Set dst (MoveD2L src));
13365 
13366   effect(DEF dst, USE src);
13367 
13368   ins_cost(INSN_COST);
13369 
13370   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13371 
13372   ins_encode %{
13373     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13374   %}
13375 
13376   ins_pipe(fp_d2l);
13377 
13378 %}
13379 
13380 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13381 
13382   match(Set dst (MoveL2D src));
13383 
13384   effect(DEF dst, USE src);
13385 
13386   ins_cost(INSN_COST);
13387 
13388   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13389 
13390   ins_encode %{
13391     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13392   %}
13393 
13394   ins_pipe(fp_l2d);
13395 
13396 %}
13397 
13398 // ============================================================================
13399 // clearing of an array
13400 
13401 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13402 %{
13403   match(Set dummy (ClearArray cnt base));
13404   effect(USE_KILL cnt, USE_KILL base);
13405 
13406   ins_cost(4 * INSN_COST);
13407   format %{ "ClearArray $cnt, $base" %}
13408 
13409   ins_encode %{
13410     __ zero_words($base$$Register, $cnt$$Register);
13411   %}
13412 
13413   ins_pipe(pipe_class_memory);
13414 %}
13415 
13416 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13417 %{
13418   predicate((u_int64_t)n->in(2)->get_long()
13419             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13420   match(Set dummy (ClearArray cnt base));
13421   effect(USE_KILL base);
13422 
13423   ins_cost(4 * INSN_COST);
13424   format %{ "ClearArray $cnt, $base" %}
13425 
13426   ins_encode %{
13427     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13428   %}
13429 
13430   ins_pipe(pipe_class_memory);
13431 %}
13432 
13433 // ============================================================================
13434 // Overflow Math Instructions
13435 
13436 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13437 %{
13438   match(Set cr (OverflowAddI op1 op2));
13439 
13440   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13441   ins_cost(INSN_COST);
13442   ins_encode %{
13443     __ cmnw($op1$$Register, $op2$$Register);
13444   %}
13445 
13446   ins_pipe(icmp_reg_reg);
13447 %}
13448 
13449 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13450 %{
13451   match(Set cr (OverflowAddI op1 op2));
13452 
13453   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13454   ins_cost(INSN_COST);
13455   ins_encode %{
13456     __ cmnw($op1$$Register, $op2$$constant);
13457   %}
13458 
13459   ins_pipe(icmp_reg_imm);
13460 %}
13461 
13462 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13463 %{
13464   match(Set cr (OverflowAddL op1 op2));
13465 
13466   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13467   ins_cost(INSN_COST);
13468   ins_encode %{
13469     __ cmn($op1$$Register, $op2$$Register);
13470   %}
13471 
13472   ins_pipe(icmp_reg_reg);
13473 %}
13474 
13475 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13476 %{
13477   match(Set cr (OverflowAddL op1 op2));
13478 
13479   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13480   ins_cost(INSN_COST);
13481   ins_encode %{
13482     __ cmn($op1$$Register, $op2$$constant);
13483   %}
13484 
13485   ins_pipe(icmp_reg_imm);
13486 %}
13487 
13488 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13489 %{
13490   match(Set cr (OverflowSubI op1 op2));
13491 
13492   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13493   ins_cost(INSN_COST);
13494   ins_encode %{
13495     __ cmpw($op1$$Register, $op2$$Register);
13496   %}
13497 
13498   ins_pipe(icmp_reg_reg);
13499 %}
13500 
13501 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13502 %{
13503   match(Set cr (OverflowSubI op1 op2));
13504 
13505   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13506   ins_cost(INSN_COST);
13507   ins_encode %{
13508     __ cmpw($op1$$Register, $op2$$constant);
13509   %}
13510 
13511   ins_pipe(icmp_reg_imm);
13512 %}
13513 
13514 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13515 %{
13516   match(Set cr (OverflowSubL op1 op2));
13517 
13518   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13519   ins_cost(INSN_COST);
13520   ins_encode %{
13521     __ cmp($op1$$Register, $op2$$Register);
13522   %}
13523 
13524   ins_pipe(icmp_reg_reg);
13525 %}
13526 
13527 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13528 %{
13529   match(Set cr (OverflowSubL op1 op2));
13530 
13531   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13532   ins_cost(INSN_COST);
13533   ins_encode %{
13534     __ subs(zr, $op1$$Register, $op2$$constant);
13535   %}
13536 
13537   ins_pipe(icmp_reg_imm);
13538 %}
13539 
13540 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13541 %{
13542   match(Set cr (OverflowSubI zero op1));
13543 
13544   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13545   ins_cost(INSN_COST);
13546   ins_encode %{
13547     __ cmpw(zr, $op1$$Register);
13548   %}
13549 
13550   ins_pipe(icmp_reg_imm);
13551 %}
13552 
13553 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13554 %{
13555   match(Set cr (OverflowSubL zero op1));
13556 
13557   format %{ "cmp   zr, $op1\t# overflow check long" %}
13558   ins_cost(INSN_COST);
13559   ins_encode %{
13560     __ cmp(zr, $op1$$Register);
13561   %}
13562 
13563   ins_pipe(icmp_reg_imm);
13564 %}
13565 
13566 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13567 %{
13568   match(Set cr (OverflowMulI op1 op2));
13569 
13570   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13571             "cmp   rscratch1, rscratch1, sxtw\n\t"
13572             "movw  rscratch1, #0x80000000\n\t"
13573             "cselw rscratch1, rscratch1, zr, NE\n\t"
13574             "cmpw  rscratch1, #1" %}
13575   ins_cost(5 * INSN_COST);
13576   ins_encode %{
13577     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13578     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13579     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13580     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13581     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13582   %}
13583 
13584   ins_pipe(pipe_slow);
13585 %}
13586 
13587 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13588 %{
13589   match(If cmp (OverflowMulI op1 op2));
13590   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13591             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13592   effect(USE labl, KILL cr);
13593 
13594   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13595             "cmp   rscratch1, rscratch1, sxtw\n\t"
13596             "b$cmp   $labl" %}
13597   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13598   ins_encode %{
13599     Label* L = $labl$$label;
13600     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13601     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13602     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13603     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13604   %}
13605 
13606   ins_pipe(pipe_serial);
13607 %}
13608 
13609 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13610 %{
13611   match(Set cr (OverflowMulL op1 op2));
13612 
13613   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13614             "smulh rscratch2, $op1, $op2\n\t"
13615             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13616             "movw  rscratch1, #0x80000000\n\t"
13617             "cselw rscratch1, rscratch1, zr, NE\n\t"
13618             "cmpw  rscratch1, #1" %}
13619   ins_cost(6 * INSN_COST);
13620   ins_encode %{
13621     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13622     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13623     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13624     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13625     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13626     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13627   %}
13628 
13629   ins_pipe(pipe_slow);
13630 %}
13631 
13632 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13633 %{
13634   match(If cmp (OverflowMulL op1 op2));
13635   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13636             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13637   effect(USE labl, KILL cr);
13638 
13639   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13640             "smulh rscratch2, $op1, $op2\n\t"
13641             "cmp   rscratch2, rscratch1, ASR #63\n\t"
13642             "b$cmp $labl" %}
13643   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13644   ins_encode %{
13645     Label* L = $labl$$label;
13646     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13647     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13648     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13649     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
13650     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13651   %}
13652 
13653   ins_pipe(pipe_serial);
13654 %}
13655 
13656 // ============================================================================
13657 // Compare Instructions
13658 
13659 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13660 %{
13661   match(Set cr (CmpI op1 op2));
13662 
13663   effect(DEF cr, USE op1, USE op2);
13664 
13665   ins_cost(INSN_COST);
13666   format %{ "cmpw  $op1, $op2" %}
13667 
13668   ins_encode(aarch64_enc_cmpw(op1, op2));
13669 
13670   ins_pipe(icmp_reg_reg);
13671 %}
13672 
13673 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13674 %{
13675   match(Set cr (CmpI op1 zero));
13676 
13677   effect(DEF cr, USE op1);
13678 
13679   ins_cost(INSN_COST);
13680   format %{ "cmpw $op1, 0" %}
13681 
13682   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13683 
13684   ins_pipe(icmp_reg_imm);
13685 %}
13686 
13687 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13688 %{
13689   match(Set cr (CmpI op1 op2));
13690 
13691   effect(DEF cr, USE op1);
13692 
13693   ins_cost(INSN_COST);
13694   format %{ "cmpw  $op1, $op2" %}
13695 
13696   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13697 
13698   ins_pipe(icmp_reg_imm);
13699 %}
13700 
13701 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13702 %{
13703   match(Set cr (CmpI op1 op2));
13704 
13705   effect(DEF cr, USE op1);
13706 
13707   ins_cost(INSN_COST * 2);
13708   format %{ "cmpw  $op1, $op2" %}
13709 
13710   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13711 
13712   ins_pipe(icmp_reg_imm);
13713 %}
13714 
13715 // Unsigned compare Instructions; really, same as signed compare
13716 // except it should only be used to feed an If or a CMovI which takes a
13717 // cmpOpU.
13718 
13719 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13720 %{
13721   match(Set cr (CmpU op1 op2));
13722 
13723   effect(DEF cr, USE op1, USE op2);
13724 
13725   ins_cost(INSN_COST);
13726   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13727 
13728   ins_encode(aarch64_enc_cmpw(op1, op2));
13729 
13730   ins_pipe(icmp_reg_reg);
13731 %}
13732 
13733 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13734 %{
13735   match(Set cr (CmpU op1 zero));
13736 
13737   effect(DEF cr, USE op1);
13738 
13739   ins_cost(INSN_COST);
13740   format %{ "cmpw $op1, #0\t# unsigned" %}
13741 
13742   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13743 
13744   ins_pipe(icmp_reg_imm);
13745 %}
13746 
13747 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13748 %{
13749   match(Set cr (CmpU op1 op2));
13750 
13751   effect(DEF cr, USE op1);
13752 
13753   ins_cost(INSN_COST);
13754   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13755 
13756   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13757 
13758   ins_pipe(icmp_reg_imm);
13759 %}
13760 
13761 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13762 %{
13763   match(Set cr (CmpU op1 op2));
13764 
13765   effect(DEF cr, USE op1);
13766 
13767   ins_cost(INSN_COST * 2);
13768   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13769 
13770   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13771 
13772   ins_pipe(icmp_reg_imm);
13773 %}
13774 
13775 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13776 %{
13777   match(Set cr (CmpL op1 op2));
13778 
13779   effect(DEF cr, USE op1, USE op2);
13780 
13781   ins_cost(INSN_COST);
13782   format %{ "cmp  $op1, $op2" %}
13783 
13784   ins_encode(aarch64_enc_cmp(op1, op2));
13785 
13786   ins_pipe(icmp_reg_reg);
13787 %}
13788 
13789 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
13790 %{
13791   match(Set cr (CmpL op1 zero));
13792 
13793   effect(DEF cr, USE op1);
13794 
13795   ins_cost(INSN_COST);
13796   format %{ "tst  $op1" %}
13797 
13798   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13799 
13800   ins_pipe(icmp_reg_imm);
13801 %}
13802 
13803 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13804 %{
13805   match(Set cr (CmpL op1 op2));
13806 
13807   effect(DEF cr, USE op1);
13808 
13809   ins_cost(INSN_COST);
13810   format %{ "cmp  $op1, $op2" %}
13811 
13812   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13813 
13814   ins_pipe(icmp_reg_imm);
13815 %}
13816 
13817 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13818 %{
13819   match(Set cr (CmpL op1 op2));
13820 
13821   effect(DEF cr, USE op1);
13822 
13823   ins_cost(INSN_COST * 2);
13824   format %{ "cmp  $op1, $op2" %}
13825 
13826   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13827 
13828   ins_pipe(icmp_reg_imm);
13829 %}
13830 
13831 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
13832 %{
13833   match(Set cr (CmpUL op1 op2));
13834 
13835   effect(DEF cr, USE op1, USE op2);
13836 
13837   ins_cost(INSN_COST);
13838   format %{ "cmp  $op1, $op2" %}
13839 
13840   ins_encode(aarch64_enc_cmp(op1, op2));
13841 
13842   ins_pipe(icmp_reg_reg);
13843 %}
13844 
13845 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
13846 %{
13847   match(Set cr (CmpUL op1 zero));
13848 
13849   effect(DEF cr, USE op1);
13850 
13851   ins_cost(INSN_COST);
13852   format %{ "tst  $op1" %}
13853 
13854   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13855 
13856   ins_pipe(icmp_reg_imm);
13857 %}
13858 
13859 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
13860 %{
13861   match(Set cr (CmpUL op1 op2));
13862 
13863   effect(DEF cr, USE op1);
13864 
13865   ins_cost(INSN_COST);
13866   format %{ "cmp  $op1, $op2" %}
13867 
13868   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13869 
13870   ins_pipe(icmp_reg_imm);
13871 %}
13872 
13873 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
13874 %{
13875   match(Set cr (CmpUL op1 op2));
13876 
13877   effect(DEF cr, USE op1);
13878 
13879   ins_cost(INSN_COST * 2);
13880   format %{ "cmp  $op1, $op2" %}
13881 
13882   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13883 
13884   ins_pipe(icmp_reg_imm);
13885 %}
13886 
13887 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13888 %{
13889   match(Set cr (CmpP op1 op2));
13890 
13891   effect(DEF cr, USE op1, USE op2);
13892 
13893   ins_cost(INSN_COST);
13894   format %{ "cmp  $op1, $op2\t // ptr" %}
13895 
13896   ins_encode(aarch64_enc_cmpp(op1, op2));
13897 
13898   ins_pipe(icmp_reg_reg);
13899 %}
13900 
13901 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13902 %{
13903   match(Set cr (CmpN op1 op2));
13904 
13905   effect(DEF cr, USE op1, USE op2);
13906 
13907   ins_cost(INSN_COST);
13908   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13909 
13910   ins_encode(aarch64_enc_cmpn(op1, op2));
13911 
13912   ins_pipe(icmp_reg_reg);
13913 %}
13914 
13915 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13916 %{
13917   match(Set cr (CmpP op1 zero));
13918 
13919   effect(DEF cr, USE op1, USE zero);
13920 
13921   ins_cost(INSN_COST);
13922   format %{ "cmp  $op1, 0\t // ptr" %}
13923 
13924   ins_encode(aarch64_enc_testp(op1));
13925 
13926   ins_pipe(icmp_reg_imm);
13927 %}
13928 
13929 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13930 %{
13931   match(Set cr (CmpN op1 zero));
13932 
13933   effect(DEF cr, USE op1, USE zero);
13934 
13935   ins_cost(INSN_COST);
13936   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13937 
13938   ins_encode(aarch64_enc_testn(op1));
13939 
13940   ins_pipe(icmp_reg_imm);
13941 %}
13942 
13943 // FP comparisons
13944 //
13945 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13946 // using normal cmpOp. See declaration of rFlagsReg for details.
13947 
13948 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13949 %{
13950   match(Set cr (CmpF src1 src2));
13951 
13952   ins_cost(3 * INSN_COST);
13953   format %{ "fcmps $src1, $src2" %}
13954 
13955   ins_encode %{
13956     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13957   %}
13958 
13959   ins_pipe(pipe_class_compare);
13960 %}
13961 
13962 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13963 %{
13964   match(Set cr (CmpF src1 src2));
13965 
13966   ins_cost(3 * INSN_COST);
13967   format %{ "fcmps $src1, 0.0" %}
13968 
13969   ins_encode %{
13970     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13971   %}
13972 
13973   ins_pipe(pipe_class_compare);
13974 %}
13975 // FROM HERE
13976 
13977 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13978 %{
13979   match(Set cr (CmpD src1 src2));
13980 
13981   ins_cost(3 * INSN_COST);
13982   format %{ "fcmpd $src1, $src2" %}
13983 
13984   ins_encode %{
13985     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13986   %}
13987 
13988   ins_pipe(pipe_class_compare);
13989 %}
13990 
13991 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13992 %{
13993   match(Set cr (CmpD src1 src2));
13994 
13995   ins_cost(3 * INSN_COST);
13996   format %{ "fcmpd $src1, 0.0" %}
13997 
13998   ins_encode %{
13999     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
14000   %}
14001 
14002   ins_pipe(pipe_class_compare);
14003 %}
14004 
14005 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14006 %{
14007   match(Set dst (CmpF3 src1 src2));
14008   effect(KILL cr);
14009 
14010   ins_cost(5 * INSN_COST);
14011   format %{ "fcmps $src1, $src2\n\t"
14012             "csinvw($dst, zr, zr, eq\n\t"
14013             "csnegw($dst, $dst, $dst, lt)"
14014   %}
14015 
14016   ins_encode %{
14017     Label done;
14018     FloatRegister s1 = as_FloatRegister($src1$$reg);
14019     FloatRegister s2 = as_FloatRegister($src2$$reg);
14020     Register d = as_Register($dst$$reg);
14021     __ fcmps(s1, s2);
14022     // installs 0 if EQ else -1
14023     __ csinvw(d, zr, zr, Assembler::EQ);
14024     // keeps -1 if less or unordered else installs 1
14025     __ csnegw(d, d, d, Assembler::LT);
14026     __ bind(done);
14027   %}
14028 
14029   ins_pipe(pipe_class_default);
14030 
14031 %}
14032 
14033 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14034 %{
14035   match(Set dst (CmpD3 src1 src2));
14036   effect(KILL cr);
14037 
14038   ins_cost(5 * INSN_COST);
14039   format %{ "fcmpd $src1, $src2\n\t"
14040             "csinvw($dst, zr, zr, eq\n\t"
14041             "csnegw($dst, $dst, $dst, lt)"
14042   %}
14043 
14044   ins_encode %{
14045     Label done;
14046     FloatRegister s1 = as_FloatRegister($src1$$reg);
14047     FloatRegister s2 = as_FloatRegister($src2$$reg);
14048     Register d = as_Register($dst$$reg);
14049     __ fcmpd(s1, s2);
14050     // installs 0 if EQ else -1
14051     __ csinvw(d, zr, zr, Assembler::EQ);
14052     // keeps -1 if less or unordered else installs 1
14053     __ csnegw(d, d, d, Assembler::LT);
14054     __ bind(done);
14055   %}
14056   ins_pipe(pipe_class_default);
14057 
14058 %}
14059 
14060 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14061 %{
14062   match(Set dst (CmpF3 src1 zero));
14063   effect(KILL cr);
14064 
14065   ins_cost(5 * INSN_COST);
14066   format %{ "fcmps $src1, 0.0\n\t"
14067             "csinvw($dst, zr, zr, eq\n\t"
14068             "csnegw($dst, $dst, $dst, lt)"
14069   %}
14070 
14071   ins_encode %{
14072     Label done;
14073     FloatRegister s1 = as_FloatRegister($src1$$reg);
14074     Register d = as_Register($dst$$reg);
14075     __ fcmps(s1, 0.0D);
14076     // installs 0 if EQ else -1
14077     __ csinvw(d, zr, zr, Assembler::EQ);
14078     // keeps -1 if less or unordered else installs 1
14079     __ csnegw(d, d, d, Assembler::LT);
14080     __ bind(done);
14081   %}
14082 
14083   ins_pipe(pipe_class_default);
14084 
14085 %}
14086 
14087 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14088 %{
14089   match(Set dst (CmpD3 src1 zero));
14090   effect(KILL cr);
14091 
14092   ins_cost(5 * INSN_COST);
14093   format %{ "fcmpd $src1, 0.0\n\t"
14094             "csinvw($dst, zr, zr, eq\n\t"
14095             "csnegw($dst, $dst, $dst, lt)"
14096   %}
14097 
14098   ins_encode %{
14099     Label done;
14100     FloatRegister s1 = as_FloatRegister($src1$$reg);
14101     Register d = as_Register($dst$$reg);
14102     __ fcmpd(s1, 0.0D);
14103     // installs 0 if EQ else -1
14104     __ csinvw(d, zr, zr, Assembler::EQ);
14105     // keeps -1 if less or unordered else installs 1
14106     __ csnegw(d, d, d, Assembler::LT);
14107     __ bind(done);
14108   %}
14109   ins_pipe(pipe_class_default);
14110 
14111 %}
14112 
14113 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14114 %{
14115   match(Set dst (CmpLTMask p q));
14116   effect(KILL cr);
14117 
14118   ins_cost(3 * INSN_COST);
14119 
14120   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14121             "csetw $dst, lt\n\t"
14122             "subw $dst, zr, $dst"
14123   %}
14124 
14125   ins_encode %{
14126     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14127     __ csetw(as_Register($dst$$reg), Assembler::LT);
14128     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14129   %}
14130 
14131   ins_pipe(ialu_reg_reg);
14132 %}
14133 
14134 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14135 %{
14136   match(Set dst (CmpLTMask src zero));
14137   effect(KILL cr);
14138 
14139   ins_cost(INSN_COST);
14140 
14141   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14142 
14143   ins_encode %{
14144     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14145   %}
14146 
14147   ins_pipe(ialu_reg_shift);
14148 %}
14149 
14150 // ============================================================================
14151 // Max and Min
14152 
14153 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14154 %{
14155   match(Set dst (MinI src1 src2));
14156 
14157   effect(DEF dst, USE src1, USE src2, KILL cr);
14158   size(8);
14159 
14160   ins_cost(INSN_COST * 3);
14161   format %{
14162     "cmpw $src1 $src2\t signed int\n\t"
14163     "cselw $dst, $src1, $src2 lt\t"
14164   %}
14165 
14166   ins_encode %{
14167     __ cmpw(as_Register($src1$$reg),
14168             as_Register($src2$$reg));
14169     __ cselw(as_Register($dst$$reg),
14170              as_Register($src1$$reg),
14171              as_Register($src2$$reg),
14172              Assembler::LT);
14173   %}
14174 
14175   ins_pipe(ialu_reg_reg);
14176 %}
14177 // FROM HERE
14178 
14179 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14180 %{
14181   match(Set dst (MaxI src1 src2));
14182 
14183   effect(DEF dst, USE src1, USE src2, KILL cr);
14184   size(8);
14185 
14186   ins_cost(INSN_COST * 3);
14187   format %{
14188     "cmpw $src1 $src2\t signed int\n\t"
14189     "cselw $dst, $src1, $src2 gt\t"
14190   %}
14191 
14192   ins_encode %{
14193     __ cmpw(as_Register($src1$$reg),
14194             as_Register($src2$$reg));
14195     __ cselw(as_Register($dst$$reg),
14196              as_Register($src1$$reg),
14197              as_Register($src2$$reg),
14198              Assembler::GT);
14199   %}
14200 
14201   ins_pipe(ialu_reg_reg);
14202 %}
14203 
14204 // ============================================================================
14205 // Branch Instructions
14206 
14207 // Direct Branch.
14208 instruct branch(label lbl)
14209 %{
14210   match(Goto);
14211 
14212   effect(USE lbl);
14213 
14214   ins_cost(BRANCH_COST);
14215   format %{ "b  $lbl" %}
14216 
14217   ins_encode(aarch64_enc_b(lbl));
14218 
14219   ins_pipe(pipe_branch);
14220 %}
14221 
14222 // Conditional Near Branch
14223 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14224 %{
14225   // Same match rule as `branchConFar'.
14226   match(If cmp cr);
14227 
14228   effect(USE lbl);
14229 
14230   ins_cost(BRANCH_COST);
14231   // If set to 1 this indicates that the current instruction is a
14232   // short variant of a long branch. This avoids using this
14233   // instruction in first-pass matching. It will then only be used in
14234   // the `Shorten_branches' pass.
14235   // ins_short_branch(1);
14236   format %{ "b$cmp  $lbl" %}
14237 
14238   ins_encode(aarch64_enc_br_con(cmp, lbl));
14239 
14240   ins_pipe(pipe_branch_cond);
14241 %}
14242 
14243 // Conditional Near Branch Unsigned
14244 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14245 %{
14246   // Same match rule as `branchConFar'.
14247   match(If cmp cr);
14248 
14249   effect(USE lbl);
14250 
14251   ins_cost(BRANCH_COST);
14252   // If set to 1 this indicates that the current instruction is a
14253   // short variant of a long branch. This avoids using this
14254   // instruction in first-pass matching. It will then only be used in
14255   // the `Shorten_branches' pass.
14256   // ins_short_branch(1);
14257   format %{ "b$cmp  $lbl\t# unsigned" %}
14258 
14259   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14260 
14261   ins_pipe(pipe_branch_cond);
14262 %}
14263 
14264 // Make use of CBZ and CBNZ.  These instructions, as well as being
14265 // shorter than (cmp; branch), have the additional benefit of not
14266 // killing the flags.
14267 
14268 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14269   match(If cmp (CmpI op1 op2));
14270   effect(USE labl);
14271 
14272   ins_cost(BRANCH_COST);
14273   format %{ "cbw$cmp   $op1, $labl" %}
14274   ins_encode %{
14275     Label* L = $labl$$label;
14276     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14277     if (cond == Assembler::EQ)
14278       __ cbzw($op1$$Register, *L);
14279     else
14280       __ cbnzw($op1$$Register, *L);
14281   %}
14282   ins_pipe(pipe_cmp_branch);
14283 %}
14284 
14285 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14286   match(If cmp (CmpL op1 op2));
14287   effect(USE labl);
14288 
14289   ins_cost(BRANCH_COST);
14290   format %{ "cb$cmp   $op1, $labl" %}
14291   ins_encode %{
14292     Label* L = $labl$$label;
14293     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14294     if (cond == Assembler::EQ)
14295       __ cbz($op1$$Register, *L);
14296     else
14297       __ cbnz($op1$$Register, *L);
14298   %}
14299   ins_pipe(pipe_cmp_branch);
14300 %}
14301 
14302 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14303   match(If cmp (CmpP op1 op2));
14304   effect(USE labl);
14305 
14306   ins_cost(BRANCH_COST);
14307   format %{ "cb$cmp   $op1, $labl" %}
14308   ins_encode %{
14309     Label* L = $labl$$label;
14310     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14311     if (cond == Assembler::EQ)
14312       __ cbz($op1$$Register, *L);
14313     else
14314       __ cbnz($op1$$Register, *L);
14315   %}
14316   ins_pipe(pipe_cmp_branch);
14317 %}
14318 
14319 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14320   match(If cmp (CmpN op1 op2));
14321   effect(USE labl);
14322 
14323   ins_cost(BRANCH_COST);
14324   format %{ "cbw$cmp   $op1, $labl" %}
14325   ins_encode %{
14326     Label* L = $labl$$label;
14327     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14328     if (cond == Assembler::EQ)
14329       __ cbzw($op1$$Register, *L);
14330     else
14331       __ cbnzw($op1$$Register, *L);
14332   %}
14333   ins_pipe(pipe_cmp_branch);
14334 %}
14335 
14336 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14337   match(If cmp (CmpP (DecodeN oop) zero));
14338   effect(USE labl);
14339 
14340   ins_cost(BRANCH_COST);
14341   format %{ "cb$cmp   $oop, $labl" %}
14342   ins_encode %{
14343     Label* L = $labl$$label;
14344     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14345     if (cond == Assembler::EQ)
14346       __ cbzw($oop$$Register, *L);
14347     else
14348       __ cbnzw($oop$$Register, *L);
14349   %}
14350   ins_pipe(pipe_cmp_branch);
14351 %}
14352 
14353 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14354   match(If cmp (CmpU op1 op2));
14355   effect(USE labl);
14356 
14357   ins_cost(BRANCH_COST);
14358   format %{ "cbw$cmp   $op1, $labl" %}
14359   ins_encode %{
14360     Label* L = $labl$$label;
14361     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14362     if (cond == Assembler::EQ || cond == Assembler::LS)
14363       __ cbzw($op1$$Register, *L);
14364     else
14365       __ cbnzw($op1$$Register, *L);
14366   %}
14367   ins_pipe(pipe_cmp_branch);
14368 %}
14369 
14370 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14371   match(If cmp (CmpUL op1 op2));
14372   effect(USE labl);
14373 
14374   ins_cost(BRANCH_COST);
14375   format %{ "cb$cmp   $op1, $labl" %}
14376   ins_encode %{
14377     Label* L = $labl$$label;
14378     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14379     if (cond == Assembler::EQ || cond == Assembler::LS)
14380       __ cbz($op1$$Register, *L);
14381     else
14382       __ cbnz($op1$$Register, *L);
14383   %}
14384   ins_pipe(pipe_cmp_branch);
14385 %}
14386 
14387 // Test bit and Branch
14388 
14389 // Patterns for short (< 32KiB) variants
14390 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14391   match(If cmp (CmpL op1 op2));
14392   effect(USE labl);
14393 
14394   ins_cost(BRANCH_COST);
14395   format %{ "cb$cmp   $op1, $labl # long" %}
14396   ins_encode %{
14397     Label* L = $labl$$label;
14398     Assembler::Condition cond =
14399       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14400     __ tbr(cond, $op1$$Register, 63, *L);
14401   %}
14402   ins_pipe(pipe_cmp_branch);
14403   ins_short_branch(1);
14404 %}
14405 
14406 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14407   match(If cmp (CmpI op1 op2));
14408   effect(USE labl);
14409 
14410   ins_cost(BRANCH_COST);
14411   format %{ "cb$cmp   $op1, $labl # int" %}
14412   ins_encode %{
14413     Label* L = $labl$$label;
14414     Assembler::Condition cond =
14415       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14416     __ tbr(cond, $op1$$Register, 31, *L);
14417   %}
14418   ins_pipe(pipe_cmp_branch);
14419   ins_short_branch(1);
14420 %}
14421 
14422 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14423   match(If cmp (CmpL (AndL op1 op2) op3));
14424   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14425   effect(USE labl);
14426 
14427   ins_cost(BRANCH_COST);
14428   format %{ "tb$cmp   $op1, $op2, $labl" %}
14429   ins_encode %{
14430     Label* L = $labl$$label;
14431     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14432     int bit = exact_log2($op2$$constant);
14433     __ tbr(cond, $op1$$Register, bit, *L);
14434   %}
14435   ins_pipe(pipe_cmp_branch);
14436   ins_short_branch(1);
14437 %}
14438 
14439 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14440   match(If cmp (CmpI (AndI op1 op2) op3));
14441   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14442   effect(USE labl);
14443 
14444   ins_cost(BRANCH_COST);
14445   format %{ "tb$cmp   $op1, $op2, $labl" %}
14446   ins_encode %{
14447     Label* L = $labl$$label;
14448     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14449     int bit = exact_log2($op2$$constant);
14450     __ tbr(cond, $op1$$Register, bit, *L);
14451   %}
14452   ins_pipe(pipe_cmp_branch);
14453   ins_short_branch(1);
14454 %}
14455 
14456 // And far variants
14457 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14458   match(If cmp (CmpL op1 op2));
14459   effect(USE labl);
14460 
14461   ins_cost(BRANCH_COST);
14462   format %{ "cb$cmp   $op1, $labl # long" %}
14463   ins_encode %{
14464     Label* L = $labl$$label;
14465     Assembler::Condition cond =
14466       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14467     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14468   %}
14469   ins_pipe(pipe_cmp_branch);
14470 %}
14471 
14472 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14473   match(If cmp (CmpI op1 op2));
14474   effect(USE labl);
14475 
14476   ins_cost(BRANCH_COST);
14477   format %{ "cb$cmp   $op1, $labl # int" %}
14478   ins_encode %{
14479     Label* L = $labl$$label;
14480     Assembler::Condition cond =
14481       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14482     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14483   %}
14484   ins_pipe(pipe_cmp_branch);
14485 %}
14486 
14487 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14488   match(If cmp (CmpL (AndL op1 op2) op3));
14489   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14490   effect(USE labl);
14491 
14492   ins_cost(BRANCH_COST);
14493   format %{ "tb$cmp   $op1, $op2, $labl" %}
14494   ins_encode %{
14495     Label* L = $labl$$label;
14496     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14497     int bit = exact_log2($op2$$constant);
14498     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14499   %}
14500   ins_pipe(pipe_cmp_branch);
14501 %}
14502 
14503 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14504   match(If cmp (CmpI (AndI op1 op2) op3));
14505   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14506   effect(USE labl);
14507 
14508   ins_cost(BRANCH_COST);
14509   format %{ "tb$cmp   $op1, $op2, $labl" %}
14510   ins_encode %{
14511     Label* L = $labl$$label;
14512     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14513     int bit = exact_log2($op2$$constant);
14514     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14515   %}
14516   ins_pipe(pipe_cmp_branch);
14517 %}
14518 
14519 // Test bits
14520 
14521 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14522   match(Set cr (CmpL (AndL op1 op2) op3));
14523   predicate(Assembler::operand_valid_for_logical_immediate
14524             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14525 
14526   ins_cost(INSN_COST);
14527   format %{ "tst $op1, $op2 # long" %}
14528   ins_encode %{
14529     __ tst($op1$$Register, $op2$$constant);
14530   %}
14531   ins_pipe(ialu_reg_reg);
14532 %}
14533 
14534 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14535   match(Set cr (CmpI (AndI op1 op2) op3));
14536   predicate(Assembler::operand_valid_for_logical_immediate
14537             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14538 
14539   ins_cost(INSN_COST);
14540   format %{ "tst $op1, $op2 # int" %}
14541   ins_encode %{
14542     __ tstw($op1$$Register, $op2$$constant);
14543   %}
14544   ins_pipe(ialu_reg_reg);
14545 %}
14546 
14547 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14548   match(Set cr (CmpL (AndL op1 op2) op3));
14549 
14550   ins_cost(INSN_COST);
14551   format %{ "tst $op1, $op2 # long" %}
14552   ins_encode %{
14553     __ tst($op1$$Register, $op2$$Register);
14554   %}
14555   ins_pipe(ialu_reg_reg);
14556 %}
14557 
14558 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14559   match(Set cr (CmpI (AndI op1 op2) op3));
14560 
14561   ins_cost(INSN_COST);
14562   format %{ "tstw $op1, $op2 # int" %}
14563   ins_encode %{
14564     __ tstw($op1$$Register, $op2$$Register);
14565   %}
14566   ins_pipe(ialu_reg_reg);
14567 %}
14568 
14569 
14570 // Conditional Far Branch
14571 // Conditional Far Branch Unsigned
14572 // TODO: fixme
14573 
14574 // counted loop end branch near
14575 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14576 %{
14577   match(CountedLoopEnd cmp cr);
14578 
14579   effect(USE lbl);
14580 
14581   ins_cost(BRANCH_COST);
14582   // short variant.
14583   // ins_short_branch(1);
14584   format %{ "b$cmp $lbl \t// counted loop end" %}
14585 
14586   ins_encode(aarch64_enc_br_con(cmp, lbl));
14587 
14588   ins_pipe(pipe_branch);
14589 %}
14590 
14591 // counted loop end branch near Unsigned
14592 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14593 %{
14594   match(CountedLoopEnd cmp cr);
14595 
14596   effect(USE lbl);
14597 
14598   ins_cost(BRANCH_COST);
14599   // short variant.
14600   // ins_short_branch(1);
14601   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14602 
14603   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14604 
14605   ins_pipe(pipe_branch);
14606 %}
14607 
14608 // counted loop end branch far
14609 // counted loop end branch far unsigned
14610 // TODO: fixme
14611 
14612 // ============================================================================
14613 // inlined locking and unlocking
14614 
14615 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14616 %{
14617   match(Set cr (FastLock object box));
14618   effect(TEMP tmp, TEMP tmp2);
14619 
14620   // TODO
14621   // identify correct cost
14622   ins_cost(5 * INSN_COST);
14623   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14624 
14625   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14626 
14627   ins_pipe(pipe_serial);
14628 %}
14629 
14630 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14631 %{
14632   match(Set cr (FastUnlock object box));
14633   effect(TEMP tmp, TEMP tmp2);
14634 
14635   ins_cost(5 * INSN_COST);
14636   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14637 
14638   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14639 
14640   ins_pipe(pipe_serial);
14641 %}
14642 
14643 
14644 // ============================================================================
14645 // Safepoint Instructions
14646 
14647 // TODO
14648 // provide a near and far version of this code
14649 
14650 instruct safePoint(iRegP poll)
14651 %{
14652   match(SafePoint poll);
14653 
14654   format %{
14655     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14656   %}
14657   ins_encode %{
14658     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14659   %}
14660   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14661 %}
14662 
14663 
14664 // ============================================================================
14665 // Procedure Call/Return Instructions
14666 
14667 // Call Java Static Instruction
14668 
14669 instruct CallStaticJavaDirect(method meth)
14670 %{
14671   match(CallStaticJava);
14672 
14673   effect(USE meth);
14674 
14675   ins_cost(CALL_COST);
14676 
14677   format %{ "call,static $meth \t// ==> " %}
14678 
14679   ins_encode( aarch64_enc_java_static_call(meth),
14680               aarch64_enc_call_epilog );
14681 
14682   ins_pipe(pipe_class_call);
14683 %}
14684 
14685 // TO HERE
14686 
14687 // Call Java Dynamic Instruction
14688 instruct CallDynamicJavaDirect(method meth)
14689 %{
14690   match(CallDynamicJava);
14691 
14692   effect(USE meth);
14693 
14694   ins_cost(CALL_COST);
14695 
14696   format %{ "CALL,dynamic $meth \t// ==> " %}
14697 
14698   ins_encode( aarch64_enc_java_dynamic_call(meth),
14699                aarch64_enc_call_epilog );
14700 
14701   ins_pipe(pipe_class_call);
14702 %}
14703 
14704 // Call Runtime Instruction
14705 
14706 instruct CallRuntimeDirect(method meth)
14707 %{
14708   match(CallRuntime);
14709 
14710   effect(USE meth);
14711 
14712   ins_cost(CALL_COST);
14713 
14714   format %{ "CALL, runtime $meth" %}
14715 
14716   ins_encode( aarch64_enc_java_to_runtime(meth) );
14717 
14718   ins_pipe(pipe_class_call);
14719 %}
14720 
14721 // Call Runtime Instruction
14722 
14723 instruct CallLeafDirect(method meth)
14724 %{
14725   match(CallLeaf);
14726 
14727   effect(USE meth);
14728 
14729   ins_cost(CALL_COST);
14730 
14731   format %{ "CALL, runtime leaf $meth" %}
14732 
14733   ins_encode( aarch64_enc_java_to_runtime(meth) );
14734 
14735   ins_pipe(pipe_class_call);
14736 %}
14737 
14738 // Call Runtime Instruction
14739 
14740 instruct CallLeafNoFPDirect(method meth)
14741 %{
14742   match(CallLeafNoFP);
14743 
14744   effect(USE meth);
14745 
14746   ins_cost(CALL_COST);
14747 
14748   format %{ "CALL, runtime leaf nofp $meth" %}
14749 
14750   ins_encode( aarch64_enc_java_to_runtime(meth) );
14751 
14752   ins_pipe(pipe_class_call);
14753 %}
14754 
14755 // Tail Call; Jump from runtime stub to Java code.
14756 // Also known as an 'interprocedural jump'.
14757 // Target of jump will eventually return to caller.
14758 // TailJump below removes the return address.
14759 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14760 %{
14761   match(TailCall jump_target method_oop);
14762 
14763   ins_cost(CALL_COST);
14764 
14765   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14766 
14767   ins_encode(aarch64_enc_tail_call(jump_target));
14768 
14769   ins_pipe(pipe_class_call);
14770 %}
14771 
14772 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14773 %{
14774   match(TailJump jump_target ex_oop);
14775 
14776   ins_cost(CALL_COST);
14777 
14778   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14779 
14780   ins_encode(aarch64_enc_tail_jmp(jump_target));
14781 
14782   ins_pipe(pipe_class_call);
14783 %}
14784 
14785 // Create exception oop: created by stack-crawling runtime code.
14786 // Created exception is now available to this handler, and is setup
14787 // just prior to jumping to this handler. No code emitted.
14788 // TODO check
14789 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14790 instruct CreateException(iRegP_R0 ex_oop)
14791 %{
14792   match(Set ex_oop (CreateEx));
14793 
14794   format %{ " -- \t// exception oop; no code emitted" %}
14795 
14796   size(0);
14797 
14798   ins_encode( /*empty*/ );
14799 
14800   ins_pipe(pipe_class_empty);
14801 %}
14802 
14803 // Rethrow exception: The exception oop will come in the first
14804 // argument position. Then JUMP (not call) to the rethrow stub code.
14805 instruct RethrowException() %{
14806   match(Rethrow);
14807   ins_cost(CALL_COST);
14808 
14809   format %{ "b rethrow_stub" %}
14810 
14811   ins_encode( aarch64_enc_rethrow() );
14812 
14813   ins_pipe(pipe_class_call);
14814 %}
14815 
14816 
14817 // Return Instruction
14818 // epilog node loads ret address into lr as part of frame pop
14819 instruct Ret()
14820 %{
14821   match(Return);
14822 
14823   format %{ "ret\t// return register" %}
14824 
14825   ins_encode( aarch64_enc_ret() );
14826 
14827   ins_pipe(pipe_branch);
14828 %}
14829 
14830 // Die now.
14831 instruct ShouldNotReachHere() %{
14832   match(Halt);
14833 
14834   ins_cost(CALL_COST);
14835   format %{ "ShouldNotReachHere" %}
14836 
14837   ins_encode %{
14838     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
14839     // return true
14840     __ dpcs1(0xdead + 1);
14841   %}
14842 
14843   ins_pipe(pipe_class_default);
14844 %}
14845 
14846 // ============================================================================
14847 // Partial Subtype Check
14848 //
14849 // superklass array for an instance of the superklass.  Set a hidden
14850 // internal cache on a hit (cache is checked with exposed code in
14851 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14852 // encoding ALSO sets flags.
14853 
14854 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14855 %{
14856   match(Set result (PartialSubtypeCheck sub super));
14857   effect(KILL cr, KILL temp);
14858 
14859   ins_cost(1100);  // slightly larger than the next version
14860   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14861 
14862   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14863 
14864   opcode(0x1); // Force zero of result reg on hit
14865 
14866   ins_pipe(pipe_class_memory);
14867 %}
14868 
14869 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14870 %{
14871   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14872   effect(KILL temp, KILL result);
14873 
14874   ins_cost(1100);  // slightly larger than the next version
14875   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14876 
14877   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14878 
14879   opcode(0x0); // Don't zero result reg on hit
14880 
14881   ins_pipe(pipe_class_memory);
14882 %}
14883 
14884 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14885                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14886 %{
14887   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
14888   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14889   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14890 
14891   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14892   ins_encode %{
14893     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14894     __ string_compare($str1$$Register, $str2$$Register,
14895                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14896                       $tmp1$$Register, $tmp2$$Register,
14897                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
14898   %}
14899   ins_pipe(pipe_class_memory);
14900 %}
14901 
14902 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14903                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
14904 %{
14905   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
14906   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14907   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14908 
14909   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14910   ins_encode %{
14911     __ string_compare($str1$$Register, $str2$$Register,
14912                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14913                       $tmp1$$Register, $tmp2$$Register,
14914                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
14915   %}
14916   ins_pipe(pipe_class_memory);
14917 %}
14918 
14919 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14920                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14921                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14922 %{
14923   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
14924   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14925   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14926          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14927 
14928   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14929   ins_encode %{
14930     __ string_compare($str1$$Register, $str2$$Register,
14931                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14932                       $tmp1$$Register, $tmp2$$Register,
14933                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14934                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
14935   %}
14936   ins_pipe(pipe_class_memory);
14937 %}
14938 
14939 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14940                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
14941                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
14942 %{
14943   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
14944   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14945   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
14946          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14947 
14948   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
14949   ins_encode %{
14950     __ string_compare($str1$$Register, $str2$$Register,
14951                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14952                       $tmp1$$Register, $tmp2$$Register,
14953                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
14954                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
14955   %}
14956   ins_pipe(pipe_class_memory);
14957 %}
14958 
14959 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14960        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14961        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14962 %{
14963   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14964   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14965   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14966          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14967   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
14968 
14969   ins_encode %{
14970     __ string_indexof($str1$$Register, $str2$$Register,
14971                       $cnt1$$Register, $cnt2$$Register,
14972                       $tmp1$$Register, $tmp2$$Register,
14973                       $tmp3$$Register, $tmp4$$Register,
14974                       $tmp5$$Register, $tmp6$$Register,
14975                       -1, $result$$Register, StrIntrinsicNode::UU);
14976   %}
14977   ins_pipe(pipe_class_memory);
14978 %}
14979 
14980 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14981        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
14982        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
14983 %{
14984   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
14985   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14986   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14987          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
14988   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
14989 
14990   ins_encode %{
14991     __ string_indexof($str1$$Register, $str2$$Register,
14992                       $cnt1$$Register, $cnt2$$Register,
14993                       $tmp1$$Register, $tmp2$$Register,
14994                       $tmp3$$Register, $tmp4$$Register,
14995                       $tmp5$$Register, $tmp6$$Register,
14996                       -1, $result$$Register, StrIntrinsicNode::LL);
14997   %}
14998   ins_pipe(pipe_class_memory);
14999 %}
15000 
15001 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15002        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
15003        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
15004 %{
15005   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15006   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15007   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15008          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
15009   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
15010 
15011   ins_encode %{
15012     __ string_indexof($str1$$Register, $str2$$Register,
15013                       $cnt1$$Register, $cnt2$$Register,
15014                       $tmp1$$Register, $tmp2$$Register,
15015                       $tmp3$$Register, $tmp4$$Register,
15016                       $tmp5$$Register, $tmp6$$Register,
15017                       -1, $result$$Register, StrIntrinsicNode::UL);
15018   %}
15019   ins_pipe(pipe_class_memory);
15020 %}
15021 
15022 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15023                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15024                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15025 %{
15026   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15027   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15028   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15029          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15030   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
15031 
15032   ins_encode %{
15033     int icnt2 = (int)$int_cnt2$$constant;
15034     __ string_indexof($str1$$Register, $str2$$Register,
15035                       $cnt1$$Register, zr,
15036                       $tmp1$$Register, $tmp2$$Register,
15037                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15038                       icnt2, $result$$Register, StrIntrinsicNode::UU);
15039   %}
15040   ins_pipe(pipe_class_memory);
15041 %}
15042 
15043 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15044                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15045                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15046 %{
15047   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
15048   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15049   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15050          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15051   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
15052 
15053   ins_encode %{
15054     int icnt2 = (int)$int_cnt2$$constant;
15055     __ string_indexof($str1$$Register, $str2$$Register,
15056                       $cnt1$$Register, zr,
15057                       $tmp1$$Register, $tmp2$$Register,
15058                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15059                       icnt2, $result$$Register, StrIntrinsicNode::LL);
15060   %}
15061   ins_pipe(pipe_class_memory);
15062 %}
15063 
15064 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15065                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15066                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
15067 %{
15068   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
15069   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15070   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15071          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15072   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
15073 
15074   ins_encode %{
15075     int icnt2 = (int)$int_cnt2$$constant;
15076     __ string_indexof($str1$$Register, $str2$$Register,
15077                       $cnt1$$Register, zr,
15078                       $tmp1$$Register, $tmp2$$Register,
15079                       $tmp3$$Register, $tmp4$$Register, zr, zr,
15080                       icnt2, $result$$Register, StrIntrinsicNode::UL);
15081   %}
15082   ins_pipe(pipe_class_memory);
15083 %}
15084 
15085 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
15086                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
15087                               iRegINoSp tmp3, rFlagsReg cr)
15088 %{
15089   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
15090   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
15091          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15092 
15093   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
15094 
15095   ins_encode %{
15096     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
15097                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
15098                            $tmp3$$Register);
15099   %}
15100   ins_pipe(pipe_class_memory);
15101 %}
15102 
15103 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15104                         iRegI_R0 result, rFlagsReg cr)
15105 %{
15106   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15107   match(Set result (StrEquals (Binary str1 str2) cnt));
15108   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15109 
15110   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15111   ins_encode %{
15112     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15113     __ string_equals($str1$$Register, $str2$$Register,
15114                      $result$$Register, $cnt$$Register, 1);
15115   %}
15116   ins_pipe(pipe_class_memory);
15117 %}
15118 
15119 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15120                         iRegI_R0 result, rFlagsReg cr)
15121 %{
15122   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15123   match(Set result (StrEquals (Binary str1 str2) cnt));
15124   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15125 
15126   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15127   ins_encode %{
15128     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15129     __ string_equals($str1$$Register, $str2$$Register,
15130                      $result$$Register, $cnt$$Register, 2);
15131   %}
15132   ins_pipe(pipe_class_memory);
15133 %}
15134 
15135 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15136                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15137                        iRegP_R10 tmp, rFlagsReg cr)
15138 %{
15139   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15140   match(Set result (AryEq ary1 ary2));
15141   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15142 
15143   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15144   ins_encode %{
15145     __ arrays_equals($ary1$$Register, $ary2$$Register,
15146                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15147                      $result$$Register, $tmp$$Register, 1);
15148     %}
15149   ins_pipe(pipe_class_memory);
15150 %}
15151 
15152 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15153                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
15154                        iRegP_R10 tmp, rFlagsReg cr)
15155 %{
15156   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15157   match(Set result (AryEq ary1 ary2));
15158   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
15159 
15160   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15161   ins_encode %{
15162     __ arrays_equals($ary1$$Register, $ary2$$Register,
15163                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
15164                      $result$$Register, $tmp$$Register, 2);
15165   %}
15166   ins_pipe(pipe_class_memory);
15167 %}
15168 
15169 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
15170 %{
15171   match(Set result (HasNegatives ary1 len));
15172   effect(USE_KILL ary1, USE_KILL len, KILL cr);
15173   format %{ "has negatives byte[] $ary1,$len -> $result" %}
15174   ins_encode %{
15175     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
15176   %}
15177   ins_pipe( pipe_slow );
15178 %}
15179 
15180 // fast char[] to byte[] compression
15181 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15182                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15183                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15184                          iRegI_R0 result, rFlagsReg cr)
15185 %{
15186   match(Set result (StrCompressedCopy src (Binary dst len)));
15187   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15188 
15189   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15190   ins_encode %{
15191     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15192                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15193                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15194                            $result$$Register);
15195   %}
15196   ins_pipe( pipe_slow );
15197 %}
15198 
15199 // fast byte[] to char[] inflation
15200 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15201                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15202 %{
15203   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15204   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15205 
15206   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15207   ins_encode %{
15208     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15209                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15210   %}
15211   ins_pipe(pipe_class_memory);
15212 %}
15213 
15214 // encode char[] to byte[] in ISO_8859_1
15215 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15216                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15217                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15218                           iRegI_R0 result, rFlagsReg cr)
15219 %{
15220   match(Set result (EncodeISOArray src (Binary dst len)));
15221   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15222          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15223 
15224   format %{ "Encode array $src,$dst,$len -> $result" %}
15225   ins_encode %{
15226     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15227          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15228          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15229   %}
15230   ins_pipe( pipe_class_memory );
15231 %}
15232 
15233 // ============================================================================
15234 // This name is KNOWN by the ADLC and cannot be changed.
15235 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15236 // for this guy.
15237 instruct tlsLoadP(thread_RegP dst)
15238 %{
15239   match(Set dst (ThreadLocal));
15240 
15241   ins_cost(0);
15242 
15243   format %{ " -- \t// $dst=Thread::current(), empty" %}
15244 
15245   size(0);
15246 
15247   ins_encode( /*empty*/ );
15248 
15249   ins_pipe(pipe_class_empty);
15250 %}
15251 
15252 // ====================VECTOR INSTRUCTIONS=====================================
15253 
15254 // Load vector (32 bits)
15255 instruct loadV4(vecD dst, vmem4 mem)
15256 %{
15257   predicate(n->as_LoadVector()->memory_size() == 4);
15258   match(Set dst (LoadVector mem));
15259   ins_cost(4 * INSN_COST);
15260   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15261   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15262   ins_pipe(vload_reg_mem64);
15263 %}
15264 
15265 // Load vector (64 bits)
15266 instruct loadV8(vecD dst, vmem8 mem)
15267 %{
15268   predicate(n->as_LoadVector()->memory_size() == 8);
15269   match(Set dst (LoadVector mem));
15270   ins_cost(4 * INSN_COST);
15271   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15272   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15273   ins_pipe(vload_reg_mem64);
15274 %}
15275 
15276 // Load Vector (128 bits)
15277 instruct loadV16(vecX dst, vmem16 mem)
15278 %{
15279   predicate(n->as_LoadVector()->memory_size() == 16);
15280   match(Set dst (LoadVector mem));
15281   ins_cost(4 * INSN_COST);
15282   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15283   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15284   ins_pipe(vload_reg_mem128);
15285 %}
15286 
15287 // Store Vector (32 bits)
15288 instruct storeV4(vecD src, vmem4 mem)
15289 %{
15290   predicate(n->as_StoreVector()->memory_size() == 4);
15291   match(Set mem (StoreVector mem src));
15292   ins_cost(4 * INSN_COST);
15293   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15294   ins_encode( aarch64_enc_strvS(src, mem) );
15295   ins_pipe(vstore_reg_mem64);
15296 %}
15297 
15298 // Store Vector (64 bits)
15299 instruct storeV8(vecD src, vmem8 mem)
15300 %{
15301   predicate(n->as_StoreVector()->memory_size() == 8);
15302   match(Set mem (StoreVector mem src));
15303   ins_cost(4 * INSN_COST);
15304   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15305   ins_encode( aarch64_enc_strvD(src, mem) );
15306   ins_pipe(vstore_reg_mem64);
15307 %}
15308 
15309 // Store Vector (128 bits)
15310 instruct storeV16(vecX src, vmem16 mem)
15311 %{
15312   predicate(n->as_StoreVector()->memory_size() == 16);
15313   match(Set mem (StoreVector mem src));
15314   ins_cost(4 * INSN_COST);
15315   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15316   ins_encode( aarch64_enc_strvQ(src, mem) );
15317   ins_pipe(vstore_reg_mem128);
15318 %}
15319 
15320 instruct replicate8B(vecD dst, iRegIorL2I src)
15321 %{
15322   predicate(n->as_Vector()->length() == 4 ||
15323             n->as_Vector()->length() == 8);
15324   match(Set dst (ReplicateB src));
15325   ins_cost(INSN_COST);
15326   format %{ "dup  $dst, $src\t# vector (8B)" %}
15327   ins_encode %{
15328     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15329   %}
15330   ins_pipe(vdup_reg_reg64);
15331 %}
15332 
15333 instruct replicate16B(vecX dst, iRegIorL2I src)
15334 %{
15335   predicate(n->as_Vector()->length() == 16);
15336   match(Set dst (ReplicateB src));
15337   ins_cost(INSN_COST);
15338   format %{ "dup  $dst, $src\t# vector (16B)" %}
15339   ins_encode %{
15340     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15341   %}
15342   ins_pipe(vdup_reg_reg128);
15343 %}
15344 
15345 instruct replicate8B_imm(vecD dst, immI con)
15346 %{
15347   predicate(n->as_Vector()->length() == 4 ||
15348             n->as_Vector()->length() == 8);
15349   match(Set dst (ReplicateB con));
15350   ins_cost(INSN_COST);
15351   format %{ "movi  $dst, $con\t# vector(8B)" %}
15352   ins_encode %{
15353     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15354   %}
15355   ins_pipe(vmovi_reg_imm64);
15356 %}
15357 
15358 instruct replicate16B_imm(vecX dst, immI con)
15359 %{
15360   predicate(n->as_Vector()->length() == 16);
15361   match(Set dst (ReplicateB con));
15362   ins_cost(INSN_COST);
15363   format %{ "movi  $dst, $con\t# vector(16B)" %}
15364   ins_encode %{
15365     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15366   %}
15367   ins_pipe(vmovi_reg_imm128);
15368 %}
15369 
15370 instruct replicate4S(vecD dst, iRegIorL2I src)
15371 %{
15372   predicate(n->as_Vector()->length() == 2 ||
15373             n->as_Vector()->length() == 4);
15374   match(Set dst (ReplicateS src));
15375   ins_cost(INSN_COST);
15376   format %{ "dup  $dst, $src\t# vector (4S)" %}
15377   ins_encode %{
15378     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15379   %}
15380   ins_pipe(vdup_reg_reg64);
15381 %}
15382 
15383 instruct replicate8S(vecX dst, iRegIorL2I src)
15384 %{
15385   predicate(n->as_Vector()->length() == 8);
15386   match(Set dst (ReplicateS src));
15387   ins_cost(INSN_COST);
15388   format %{ "dup  $dst, $src\t# vector (8S)" %}
15389   ins_encode %{
15390     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15391   %}
15392   ins_pipe(vdup_reg_reg128);
15393 %}
15394 
15395 instruct replicate4S_imm(vecD dst, immI con)
15396 %{
15397   predicate(n->as_Vector()->length() == 2 ||
15398             n->as_Vector()->length() == 4);
15399   match(Set dst (ReplicateS con));
15400   ins_cost(INSN_COST);
15401   format %{ "movi  $dst, $con\t# vector(4H)" %}
15402   ins_encode %{
15403     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15404   %}
15405   ins_pipe(vmovi_reg_imm64);
15406 %}
15407 
15408 instruct replicate8S_imm(vecX dst, immI con)
15409 %{
15410   predicate(n->as_Vector()->length() == 8);
15411   match(Set dst (ReplicateS con));
15412   ins_cost(INSN_COST);
15413   format %{ "movi  $dst, $con\t# vector(8H)" %}
15414   ins_encode %{
15415     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15416   %}
15417   ins_pipe(vmovi_reg_imm128);
15418 %}
15419 
15420 instruct replicate2I(vecD dst, iRegIorL2I src)
15421 %{
15422   predicate(n->as_Vector()->length() == 2);
15423   match(Set dst (ReplicateI src));
15424   ins_cost(INSN_COST);
15425   format %{ "dup  $dst, $src\t# vector (2I)" %}
15426   ins_encode %{
15427     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15428   %}
15429   ins_pipe(vdup_reg_reg64);
15430 %}
15431 
15432 instruct replicate4I(vecX dst, iRegIorL2I src)
15433 %{
15434   predicate(n->as_Vector()->length() == 4);
15435   match(Set dst (ReplicateI src));
15436   ins_cost(INSN_COST);
15437   format %{ "dup  $dst, $src\t# vector (4I)" %}
15438   ins_encode %{
15439     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15440   %}
15441   ins_pipe(vdup_reg_reg128);
15442 %}
15443 
15444 instruct replicate2I_imm(vecD dst, immI con)
15445 %{
15446   predicate(n->as_Vector()->length() == 2);
15447   match(Set dst (ReplicateI con));
15448   ins_cost(INSN_COST);
15449   format %{ "movi  $dst, $con\t# vector(2I)" %}
15450   ins_encode %{
15451     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15452   %}
15453   ins_pipe(vmovi_reg_imm64);
15454 %}
15455 
15456 instruct replicate4I_imm(vecX dst, immI con)
15457 %{
15458   predicate(n->as_Vector()->length() == 4);
15459   match(Set dst (ReplicateI con));
15460   ins_cost(INSN_COST);
15461   format %{ "movi  $dst, $con\t# vector(4I)" %}
15462   ins_encode %{
15463     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15464   %}
15465   ins_pipe(vmovi_reg_imm128);
15466 %}
15467 
15468 instruct replicate2L(vecX dst, iRegL src)
15469 %{
15470   predicate(n->as_Vector()->length() == 2);
15471   match(Set dst (ReplicateL src));
15472   ins_cost(INSN_COST);
15473   format %{ "dup  $dst, $src\t# vector (2L)" %}
15474   ins_encode %{
15475     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15476   %}
15477   ins_pipe(vdup_reg_reg128);
15478 %}
15479 
15480 instruct replicate2L_zero(vecX dst, immI0 zero)
15481 %{
15482   predicate(n->as_Vector()->length() == 2);
15483   match(Set dst (ReplicateI zero));
15484   ins_cost(INSN_COST);
15485   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15486   ins_encode %{
15487     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15488            as_FloatRegister($dst$$reg),
15489            as_FloatRegister($dst$$reg));
15490   %}
15491   ins_pipe(vmovi_reg_imm128);
15492 %}
15493 
15494 instruct replicate2F(vecD dst, vRegF src)
15495 %{
15496   predicate(n->as_Vector()->length() == 2);
15497   match(Set dst (ReplicateF src));
15498   ins_cost(INSN_COST);
15499   format %{ "dup  $dst, $src\t# vector (2F)" %}
15500   ins_encode %{
15501     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15502            as_FloatRegister($src$$reg));
15503   %}
15504   ins_pipe(vdup_reg_freg64);
15505 %}
15506 
15507 instruct replicate4F(vecX dst, vRegF src)
15508 %{
15509   predicate(n->as_Vector()->length() == 4);
15510   match(Set dst (ReplicateF src));
15511   ins_cost(INSN_COST);
15512   format %{ "dup  $dst, $src\t# vector (4F)" %}
15513   ins_encode %{
15514     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15515            as_FloatRegister($src$$reg));
15516   %}
15517   ins_pipe(vdup_reg_freg128);
15518 %}
15519 
15520 instruct replicate2D(vecX dst, vRegD src)
15521 %{
15522   predicate(n->as_Vector()->length() == 2);
15523   match(Set dst (ReplicateD src));
15524   ins_cost(INSN_COST);
15525   format %{ "dup  $dst, $src\t# vector (2D)" %}
15526   ins_encode %{
15527     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15528            as_FloatRegister($src$$reg));
15529   %}
15530   ins_pipe(vdup_reg_dreg128);
15531 %}
15532 
15533 // ====================REDUCTION ARITHMETIC====================================
15534 
15535 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
15536 %{
15537   match(Set dst (AddReductionVI src1 src2));
15538   ins_cost(INSN_COST);
15539   effect(TEMP tmp, TEMP tmp2);
15540   format %{ "umov  $tmp, $src2, S, 0\n\t"
15541             "umov  $tmp2, $src2, S, 1\n\t"
15542             "addw  $dst, $src1, $tmp\n\t"
15543             "addw  $dst, $dst, $tmp2\t add reduction2i"
15544   %}
15545   ins_encode %{
15546     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15547     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15548     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15549     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15550   %}
15551   ins_pipe(pipe_class_default);
15552 %}
15553 
15554 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15555 %{
15556   match(Set dst (AddReductionVI src1 src2));
15557   ins_cost(INSN_COST);
15558   effect(TEMP tmp, TEMP tmp2);
15559   format %{ "addv  $tmp, T4S, $src2\n\t"
15560             "umov  $tmp2, $tmp, S, 0\n\t"
15561             "addw  $dst, $tmp2, $src1\t add reduction4i"
15562   %}
15563   ins_encode %{
15564     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15565             as_FloatRegister($src2$$reg));
15566     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15567     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15568   %}
15569   ins_pipe(pipe_class_default);
15570 %}
15571 
15572 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
15573 %{
15574   match(Set dst (MulReductionVI src1 src2));
15575   ins_cost(INSN_COST);
15576   effect(TEMP tmp, TEMP dst);
15577   format %{ "umov  $tmp, $src2, S, 0\n\t"
15578             "mul   $dst, $tmp, $src1\n\t"
15579             "umov  $tmp, $src2, S, 1\n\t"
15580             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15581   %}
15582   ins_encode %{
15583     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15584     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15585     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15586     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15587   %}
15588   ins_pipe(pipe_class_default);
15589 %}
15590 
15591 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
15592 %{
15593   match(Set dst (MulReductionVI src1 src2));
15594   ins_cost(INSN_COST);
15595   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15596   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15597             "mul   $tmp, $tmp, $src2\n\t"
15598             "umov  $tmp2, $tmp, S, 0\n\t"
15599             "mul   $dst, $tmp2, $src1\n\t"
15600             "umov  $tmp2, $tmp, S, 1\n\t"
15601             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15602   %}
15603   ins_encode %{
15604     __ ins(as_FloatRegister($tmp$$reg), __ D,
15605            as_FloatRegister($src2$$reg), 0, 1);
15606     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15607            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15608     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15609     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15610     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15611     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15612   %}
15613   ins_pipe(pipe_class_default);
15614 %}
15615 
15616 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15617 %{
15618   match(Set dst (AddReductionVF src1 src2));
15619   ins_cost(INSN_COST);
15620   effect(TEMP tmp, TEMP dst);
15621   format %{ "fadds $dst, $src1, $src2\n\t"
15622             "ins   $tmp, S, $src2, 0, 1\n\t"
15623             "fadds $dst, $dst, $tmp\t add reduction2f"
15624   %}
15625   ins_encode %{
15626     __ fadds(as_FloatRegister($dst$$reg),
15627              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15628     __ ins(as_FloatRegister($tmp$$reg), __ S,
15629            as_FloatRegister($src2$$reg), 0, 1);
15630     __ fadds(as_FloatRegister($dst$$reg),
15631              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15632   %}
15633   ins_pipe(pipe_class_default);
15634 %}
15635 
15636 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15637 %{
15638   match(Set dst (AddReductionVF src1 src2));
15639   ins_cost(INSN_COST);
15640   effect(TEMP tmp, TEMP dst);
15641   format %{ "fadds $dst, $src1, $src2\n\t"
15642             "ins   $tmp, S, $src2, 0, 1\n\t"
15643             "fadds $dst, $dst, $tmp\n\t"
15644             "ins   $tmp, S, $src2, 0, 2\n\t"
15645             "fadds $dst, $dst, $tmp\n\t"
15646             "ins   $tmp, S, $src2, 0, 3\n\t"
15647             "fadds $dst, $dst, $tmp\t add reduction4f"
15648   %}
15649   ins_encode %{
15650     __ fadds(as_FloatRegister($dst$$reg),
15651              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15652     __ ins(as_FloatRegister($tmp$$reg), __ S,
15653            as_FloatRegister($src2$$reg), 0, 1);
15654     __ fadds(as_FloatRegister($dst$$reg),
15655              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15656     __ ins(as_FloatRegister($tmp$$reg), __ S,
15657            as_FloatRegister($src2$$reg), 0, 2);
15658     __ fadds(as_FloatRegister($dst$$reg),
15659              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15660     __ ins(as_FloatRegister($tmp$$reg), __ S,
15661            as_FloatRegister($src2$$reg), 0, 3);
15662     __ fadds(as_FloatRegister($dst$$reg),
15663              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15664   %}
15665   ins_pipe(pipe_class_default);
15666 %}
15667 
15668 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15669 %{
15670   match(Set dst (MulReductionVF src1 src2));
15671   ins_cost(INSN_COST);
15672   effect(TEMP tmp, TEMP dst);
15673   format %{ "fmuls $dst, $src1, $src2\n\t"
15674             "ins   $tmp, S, $src2, 0, 1\n\t"
15675             "fmuls $dst, $dst, $tmp\t add reduction4f"
15676   %}
15677   ins_encode %{
15678     __ fmuls(as_FloatRegister($dst$$reg),
15679              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15680     __ ins(as_FloatRegister($tmp$$reg), __ S,
15681            as_FloatRegister($src2$$reg), 0, 1);
15682     __ fmuls(as_FloatRegister($dst$$reg),
15683              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15684   %}
15685   ins_pipe(pipe_class_default);
15686 %}
15687 
15688 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15689 %{
15690   match(Set dst (MulReductionVF src1 src2));
15691   ins_cost(INSN_COST);
15692   effect(TEMP tmp, TEMP dst);
15693   format %{ "fmuls $dst, $src1, $src2\n\t"
15694             "ins   $tmp, S, $src2, 0, 1\n\t"
15695             "fmuls $dst, $dst, $tmp\n\t"
15696             "ins   $tmp, S, $src2, 0, 2\n\t"
15697             "fmuls $dst, $dst, $tmp\n\t"
15698             "ins   $tmp, S, $src2, 0, 3\n\t"
15699             "fmuls $dst, $dst, $tmp\t add reduction4f"
15700   %}
15701   ins_encode %{
15702     __ fmuls(as_FloatRegister($dst$$reg),
15703              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15704     __ ins(as_FloatRegister($tmp$$reg), __ S,
15705            as_FloatRegister($src2$$reg), 0, 1);
15706     __ fmuls(as_FloatRegister($dst$$reg),
15707              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15708     __ ins(as_FloatRegister($tmp$$reg), __ S,
15709            as_FloatRegister($src2$$reg), 0, 2);
15710     __ fmuls(as_FloatRegister($dst$$reg),
15711              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15712     __ ins(as_FloatRegister($tmp$$reg), __ S,
15713            as_FloatRegister($src2$$reg), 0, 3);
15714     __ fmuls(as_FloatRegister($dst$$reg),
15715              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15716   %}
15717   ins_pipe(pipe_class_default);
15718 %}
15719 
15720 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15721 %{
15722   match(Set dst (AddReductionVD src1 src2));
15723   ins_cost(INSN_COST);
15724   effect(TEMP tmp, TEMP dst);
15725   format %{ "faddd $dst, $src1, $src2\n\t"
15726             "ins   $tmp, D, $src2, 0, 1\n\t"
15727             "faddd $dst, $dst, $tmp\t add reduction2d"
15728   %}
15729   ins_encode %{
15730     __ faddd(as_FloatRegister($dst$$reg),
15731              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15732     __ ins(as_FloatRegister($tmp$$reg), __ D,
15733            as_FloatRegister($src2$$reg), 0, 1);
15734     __ faddd(as_FloatRegister($dst$$reg),
15735              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15736   %}
15737   ins_pipe(pipe_class_default);
15738 %}
15739 
15740 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15741 %{
15742   match(Set dst (MulReductionVD src1 src2));
15743   ins_cost(INSN_COST);
15744   effect(TEMP tmp, TEMP dst);
15745   format %{ "fmuld $dst, $src1, $src2\n\t"
15746             "ins   $tmp, D, $src2, 0, 1\n\t"
15747             "fmuld $dst, $dst, $tmp\t add reduction2d"
15748   %}
15749   ins_encode %{
15750     __ fmuld(as_FloatRegister($dst$$reg),
15751              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15752     __ ins(as_FloatRegister($tmp$$reg), __ D,
15753            as_FloatRegister($src2$$reg), 0, 1);
15754     __ fmuld(as_FloatRegister($dst$$reg),
15755              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15756   %}
15757   ins_pipe(pipe_class_default);
15758 %}
15759 
15760 // ====================VECTOR ARITHMETIC=======================================
15761 
15762 // --------------------------------- ADD --------------------------------------
15763 
15764 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15765 %{
15766   predicate(n->as_Vector()->length() == 4 ||
15767             n->as_Vector()->length() == 8);
15768   match(Set dst (AddVB src1 src2));
15769   ins_cost(INSN_COST);
15770   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15771   ins_encode %{
15772     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15773             as_FloatRegister($src1$$reg),
15774             as_FloatRegister($src2$$reg));
15775   %}
15776   ins_pipe(vdop64);
15777 %}
15778 
15779 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15780 %{
15781   predicate(n->as_Vector()->length() == 16);
15782   match(Set dst (AddVB src1 src2));
15783   ins_cost(INSN_COST);
15784   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15785   ins_encode %{
15786     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15787             as_FloatRegister($src1$$reg),
15788             as_FloatRegister($src2$$reg));
15789   %}
15790   ins_pipe(vdop128);
15791 %}
15792 
15793 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15794 %{
15795   predicate(n->as_Vector()->length() == 2 ||
15796             n->as_Vector()->length() == 4);
15797   match(Set dst (AddVS src1 src2));
15798   ins_cost(INSN_COST);
15799   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15800   ins_encode %{
15801     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15802             as_FloatRegister($src1$$reg),
15803             as_FloatRegister($src2$$reg));
15804   %}
15805   ins_pipe(vdop64);
15806 %}
15807 
15808 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15809 %{
15810   predicate(n->as_Vector()->length() == 8);
15811   match(Set dst (AddVS src1 src2));
15812   ins_cost(INSN_COST);
15813   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15814   ins_encode %{
15815     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15816             as_FloatRegister($src1$$reg),
15817             as_FloatRegister($src2$$reg));
15818   %}
15819   ins_pipe(vdop128);
15820 %}
15821 
15822 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15823 %{
15824   predicate(n->as_Vector()->length() == 2);
15825   match(Set dst (AddVI src1 src2));
15826   ins_cost(INSN_COST);
15827   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15828   ins_encode %{
15829     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15830             as_FloatRegister($src1$$reg),
15831             as_FloatRegister($src2$$reg));
15832   %}
15833   ins_pipe(vdop64);
15834 %}
15835 
15836 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15837 %{
15838   predicate(n->as_Vector()->length() == 4);
15839   match(Set dst (AddVI src1 src2));
15840   ins_cost(INSN_COST);
15841   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15842   ins_encode %{
15843     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15844             as_FloatRegister($src1$$reg),
15845             as_FloatRegister($src2$$reg));
15846   %}
15847   ins_pipe(vdop128);
15848 %}
15849 
15850 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15851 %{
15852   predicate(n->as_Vector()->length() == 2);
15853   match(Set dst (AddVL src1 src2));
15854   ins_cost(INSN_COST);
15855   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15856   ins_encode %{
15857     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15858             as_FloatRegister($src1$$reg),
15859             as_FloatRegister($src2$$reg));
15860   %}
15861   ins_pipe(vdop128);
15862 %}
15863 
15864 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15865 %{
15866   predicate(n->as_Vector()->length() == 2);
15867   match(Set dst (AddVF src1 src2));
15868   ins_cost(INSN_COST);
15869   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15870   ins_encode %{
15871     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15872             as_FloatRegister($src1$$reg),
15873             as_FloatRegister($src2$$reg));
15874   %}
15875   ins_pipe(vdop_fp64);
15876 %}
15877 
15878 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15879 %{
15880   predicate(n->as_Vector()->length() == 4);
15881   match(Set dst (AddVF src1 src2));
15882   ins_cost(INSN_COST);
15883   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15884   ins_encode %{
15885     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15886             as_FloatRegister($src1$$reg),
15887             as_FloatRegister($src2$$reg));
15888   %}
15889   ins_pipe(vdop_fp128);
15890 %}
15891 
15892 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15893 %{
15894   match(Set dst (AddVD src1 src2));
15895   ins_cost(INSN_COST);
15896   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15897   ins_encode %{
15898     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15899             as_FloatRegister($src1$$reg),
15900             as_FloatRegister($src2$$reg));
15901   %}
15902   ins_pipe(vdop_fp128);
15903 %}
15904 
15905 // --------------------------------- SUB --------------------------------------
15906 
15907 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15908 %{
15909   predicate(n->as_Vector()->length() == 4 ||
15910             n->as_Vector()->length() == 8);
15911   match(Set dst (SubVB src1 src2));
15912   ins_cost(INSN_COST);
15913   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15914   ins_encode %{
15915     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15916             as_FloatRegister($src1$$reg),
15917             as_FloatRegister($src2$$reg));
15918   %}
15919   ins_pipe(vdop64);
15920 %}
15921 
15922 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15923 %{
15924   predicate(n->as_Vector()->length() == 16);
15925   match(Set dst (SubVB src1 src2));
15926   ins_cost(INSN_COST);
15927   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15928   ins_encode %{
15929     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15930             as_FloatRegister($src1$$reg),
15931             as_FloatRegister($src2$$reg));
15932   %}
15933   ins_pipe(vdop128);
15934 %}
15935 
15936 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15937 %{
15938   predicate(n->as_Vector()->length() == 2 ||
15939             n->as_Vector()->length() == 4);
15940   match(Set dst (SubVS src1 src2));
15941   ins_cost(INSN_COST);
15942   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15943   ins_encode %{
15944     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15945             as_FloatRegister($src1$$reg),
15946             as_FloatRegister($src2$$reg));
15947   %}
15948   ins_pipe(vdop64);
15949 %}
15950 
15951 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15952 %{
15953   predicate(n->as_Vector()->length() == 8);
15954   match(Set dst (SubVS src1 src2));
15955   ins_cost(INSN_COST);
15956   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15957   ins_encode %{
15958     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15959             as_FloatRegister($src1$$reg),
15960             as_FloatRegister($src2$$reg));
15961   %}
15962   ins_pipe(vdop128);
15963 %}
15964 
15965 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15966 %{
15967   predicate(n->as_Vector()->length() == 2);
15968   match(Set dst (SubVI src1 src2));
15969   ins_cost(INSN_COST);
15970   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15971   ins_encode %{
15972     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15973             as_FloatRegister($src1$$reg),
15974             as_FloatRegister($src2$$reg));
15975   %}
15976   ins_pipe(vdop64);
15977 %}
15978 
15979 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15980 %{
15981   predicate(n->as_Vector()->length() == 4);
15982   match(Set dst (SubVI src1 src2));
15983   ins_cost(INSN_COST);
15984   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15985   ins_encode %{
15986     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15987             as_FloatRegister($src1$$reg),
15988             as_FloatRegister($src2$$reg));
15989   %}
15990   ins_pipe(vdop128);
15991 %}
15992 
15993 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15994 %{
15995   predicate(n->as_Vector()->length() == 2);
15996   match(Set dst (SubVL src1 src2));
15997   ins_cost(INSN_COST);
15998   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15999   ins_encode %{
16000     __ subv(as_FloatRegister($dst$$reg), __ T2D,
16001             as_FloatRegister($src1$$reg),
16002             as_FloatRegister($src2$$reg));
16003   %}
16004   ins_pipe(vdop128);
16005 %}
16006 
16007 instruct vsub2F(vecD dst, vecD src1, vecD src2)
16008 %{
16009   predicate(n->as_Vector()->length() == 2);
16010   match(Set dst (SubVF src1 src2));
16011   ins_cost(INSN_COST);
16012   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
16013   ins_encode %{
16014     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
16015             as_FloatRegister($src1$$reg),
16016             as_FloatRegister($src2$$reg));
16017   %}
16018   ins_pipe(vdop_fp64);
16019 %}
16020 
16021 instruct vsub4F(vecX dst, vecX src1, vecX src2)
16022 %{
16023   predicate(n->as_Vector()->length() == 4);
16024   match(Set dst (SubVF src1 src2));
16025   ins_cost(INSN_COST);
16026   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
16027   ins_encode %{
16028     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
16029             as_FloatRegister($src1$$reg),
16030             as_FloatRegister($src2$$reg));
16031   %}
16032   ins_pipe(vdop_fp128);
16033 %}
16034 
16035 instruct vsub2D(vecX dst, vecX src1, vecX src2)
16036 %{
16037   predicate(n->as_Vector()->length() == 2);
16038   match(Set dst (SubVD src1 src2));
16039   ins_cost(INSN_COST);
16040   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
16041   ins_encode %{
16042     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
16043             as_FloatRegister($src1$$reg),
16044             as_FloatRegister($src2$$reg));
16045   %}
16046   ins_pipe(vdop_fp128);
16047 %}
16048 
16049 // --------------------------------- MUL --------------------------------------
16050 
16051 instruct vmul4S(vecD dst, vecD src1, vecD src2)
16052 %{
16053   predicate(n->as_Vector()->length() == 2 ||
16054             n->as_Vector()->length() == 4);
16055   match(Set dst (MulVS src1 src2));
16056   ins_cost(INSN_COST);
16057   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
16058   ins_encode %{
16059     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
16060             as_FloatRegister($src1$$reg),
16061             as_FloatRegister($src2$$reg));
16062   %}
16063   ins_pipe(vmul64);
16064 %}
16065 
16066 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16067 %{
16068   predicate(n->as_Vector()->length() == 8);
16069   match(Set dst (MulVS src1 src2));
16070   ins_cost(INSN_COST);
16071   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16072   ins_encode %{
16073     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16074             as_FloatRegister($src1$$reg),
16075             as_FloatRegister($src2$$reg));
16076   %}
16077   ins_pipe(vmul128);
16078 %}
16079 
16080 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16081 %{
16082   predicate(n->as_Vector()->length() == 2);
16083   match(Set dst (MulVI src1 src2));
16084   ins_cost(INSN_COST);
16085   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16086   ins_encode %{
16087     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16088             as_FloatRegister($src1$$reg),
16089             as_FloatRegister($src2$$reg));
16090   %}
16091   ins_pipe(vmul64);
16092 %}
16093 
16094 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16095 %{
16096   predicate(n->as_Vector()->length() == 4);
16097   match(Set dst (MulVI src1 src2));
16098   ins_cost(INSN_COST);
16099   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16100   ins_encode %{
16101     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16102             as_FloatRegister($src1$$reg),
16103             as_FloatRegister($src2$$reg));
16104   %}
16105   ins_pipe(vmul128);
16106 %}
16107 
16108 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16109 %{
16110   predicate(n->as_Vector()->length() == 2);
16111   match(Set dst (MulVF src1 src2));
16112   ins_cost(INSN_COST);
16113   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16114   ins_encode %{
16115     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16116             as_FloatRegister($src1$$reg),
16117             as_FloatRegister($src2$$reg));
16118   %}
16119   ins_pipe(vmuldiv_fp64);
16120 %}
16121 
16122 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16123 %{
16124   predicate(n->as_Vector()->length() == 4);
16125   match(Set dst (MulVF src1 src2));
16126   ins_cost(INSN_COST);
16127   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16128   ins_encode %{
16129     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16130             as_FloatRegister($src1$$reg),
16131             as_FloatRegister($src2$$reg));
16132   %}
16133   ins_pipe(vmuldiv_fp128);
16134 %}
16135 
16136 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16137 %{
16138   predicate(n->as_Vector()->length() == 2);
16139   match(Set dst (MulVD src1 src2));
16140   ins_cost(INSN_COST);
16141   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16142   ins_encode %{
16143     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16144             as_FloatRegister($src1$$reg),
16145             as_FloatRegister($src2$$reg));
16146   %}
16147   ins_pipe(vmuldiv_fp128);
16148 %}
16149 
16150 // --------------------------------- MLA --------------------------------------
16151 
16152 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16153 %{
16154   predicate(n->as_Vector()->length() == 2 ||
16155             n->as_Vector()->length() == 4);
16156   match(Set dst (AddVS dst (MulVS src1 src2)));
16157   ins_cost(INSN_COST);
16158   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16159   ins_encode %{
16160     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16161             as_FloatRegister($src1$$reg),
16162             as_FloatRegister($src2$$reg));
16163   %}
16164   ins_pipe(vmla64);
16165 %}
16166 
16167 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16168 %{
16169   predicate(n->as_Vector()->length() == 8);
16170   match(Set dst (AddVS dst (MulVS src1 src2)));
16171   ins_cost(INSN_COST);
16172   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16173   ins_encode %{
16174     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16175             as_FloatRegister($src1$$reg),
16176             as_FloatRegister($src2$$reg));
16177   %}
16178   ins_pipe(vmla128);
16179 %}
16180 
16181 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16182 %{
16183   predicate(n->as_Vector()->length() == 2);
16184   match(Set dst (AddVI dst (MulVI src1 src2)));
16185   ins_cost(INSN_COST);
16186   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16187   ins_encode %{
16188     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16189             as_FloatRegister($src1$$reg),
16190             as_FloatRegister($src2$$reg));
16191   %}
16192   ins_pipe(vmla64);
16193 %}
16194 
16195 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16196 %{
16197   predicate(n->as_Vector()->length() == 4);
16198   match(Set dst (AddVI dst (MulVI src1 src2)));
16199   ins_cost(INSN_COST);
16200   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16201   ins_encode %{
16202     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16203             as_FloatRegister($src1$$reg),
16204             as_FloatRegister($src2$$reg));
16205   %}
16206   ins_pipe(vmla128);
16207 %}
16208 
16209 // dst + src1 * src2
16210 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
16211   predicate(UseFMA && n->as_Vector()->length() == 2);
16212   match(Set dst (FmaVF  dst (Binary src1 src2)));
16213   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
16214   ins_cost(INSN_COST);
16215   ins_encode %{
16216     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
16217             as_FloatRegister($src1$$reg),
16218             as_FloatRegister($src2$$reg));
16219   %}
16220   ins_pipe(vmuldiv_fp64);
16221 %}
16222 
16223 // dst + src1 * src2
16224 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
16225   predicate(UseFMA && n->as_Vector()->length() == 4);
16226   match(Set dst (FmaVF  dst (Binary src1 src2)));
16227   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
16228   ins_cost(INSN_COST);
16229   ins_encode %{
16230     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
16231             as_FloatRegister($src1$$reg),
16232             as_FloatRegister($src2$$reg));
16233   %}
16234   ins_pipe(vmuldiv_fp128);
16235 %}
16236 
16237 // dst + src1 * src2
16238 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
16239   predicate(UseFMA && n->as_Vector()->length() == 2);
16240   match(Set dst (FmaVD  dst (Binary src1 src2)));
16241   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
16242   ins_cost(INSN_COST);
16243   ins_encode %{
16244     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
16245             as_FloatRegister($src1$$reg),
16246             as_FloatRegister($src2$$reg));
16247   %}
16248   ins_pipe(vmuldiv_fp128);
16249 %}
16250 
16251 // --------------------------------- MLS --------------------------------------
16252 
16253 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16254 %{
16255   predicate(n->as_Vector()->length() == 2 ||
16256             n->as_Vector()->length() == 4);
16257   match(Set dst (SubVS dst (MulVS src1 src2)));
16258   ins_cost(INSN_COST);
16259   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16260   ins_encode %{
16261     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16262             as_FloatRegister($src1$$reg),
16263             as_FloatRegister($src2$$reg));
16264   %}
16265   ins_pipe(vmla64);
16266 %}
16267 
16268 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16269 %{
16270   predicate(n->as_Vector()->length() == 8);
16271   match(Set dst (SubVS dst (MulVS src1 src2)));
16272   ins_cost(INSN_COST);
16273   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16274   ins_encode %{
16275     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16276             as_FloatRegister($src1$$reg),
16277             as_FloatRegister($src2$$reg));
16278   %}
16279   ins_pipe(vmla128);
16280 %}
16281 
16282 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16283 %{
16284   predicate(n->as_Vector()->length() == 2);
16285   match(Set dst (SubVI dst (MulVI src1 src2)));
16286   ins_cost(INSN_COST);
16287   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16288   ins_encode %{
16289     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16290             as_FloatRegister($src1$$reg),
16291             as_FloatRegister($src2$$reg));
16292   %}
16293   ins_pipe(vmla64);
16294 %}
16295 
16296 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16297 %{
16298   predicate(n->as_Vector()->length() == 4);
16299   match(Set dst (SubVI dst (MulVI src1 src2)));
16300   ins_cost(INSN_COST);
16301   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16302   ins_encode %{
16303     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16304             as_FloatRegister($src1$$reg),
16305             as_FloatRegister($src2$$reg));
16306   %}
16307   ins_pipe(vmla128);
16308 %}
16309 
16310 // dst - src1 * src2
16311 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
16312   predicate(UseFMA && n->as_Vector()->length() == 2);
16313   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16314   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16315   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
16316   ins_cost(INSN_COST);
16317   ins_encode %{
16318     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
16319             as_FloatRegister($src1$$reg),
16320             as_FloatRegister($src2$$reg));
16321   %}
16322   ins_pipe(vmuldiv_fp64);
16323 %}
16324 
16325 // dst - src1 * src2
16326 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
16327   predicate(UseFMA && n->as_Vector()->length() == 4);
16328   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
16329   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
16330   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
16331   ins_cost(INSN_COST);
16332   ins_encode %{
16333     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
16334             as_FloatRegister($src1$$reg),
16335             as_FloatRegister($src2$$reg));
16336   %}
16337   ins_pipe(vmuldiv_fp128);
16338 %}
16339 
16340 // dst - src1 * src2
16341 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
16342   predicate(UseFMA && n->as_Vector()->length() == 2);
16343   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
16344   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
16345   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
16346   ins_cost(INSN_COST);
16347   ins_encode %{
16348     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
16349             as_FloatRegister($src1$$reg),
16350             as_FloatRegister($src2$$reg));
16351   %}
16352   ins_pipe(vmuldiv_fp128);
16353 %}
16354 
16355 // --------------------------------- DIV --------------------------------------
16356 
16357 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16358 %{
16359   predicate(n->as_Vector()->length() == 2);
16360   match(Set dst (DivVF src1 src2));
16361   ins_cost(INSN_COST);
16362   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16363   ins_encode %{
16364     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16365             as_FloatRegister($src1$$reg),
16366             as_FloatRegister($src2$$reg));
16367   %}
16368   ins_pipe(vmuldiv_fp64);
16369 %}
16370 
16371 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16372 %{
16373   predicate(n->as_Vector()->length() == 4);
16374   match(Set dst (DivVF src1 src2));
16375   ins_cost(INSN_COST);
16376   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16377   ins_encode %{
16378     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16379             as_FloatRegister($src1$$reg),
16380             as_FloatRegister($src2$$reg));
16381   %}
16382   ins_pipe(vmuldiv_fp128);
16383 %}
16384 
16385 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16386 %{
16387   predicate(n->as_Vector()->length() == 2);
16388   match(Set dst (DivVD src1 src2));
16389   ins_cost(INSN_COST);
16390   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16391   ins_encode %{
16392     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16393             as_FloatRegister($src1$$reg),
16394             as_FloatRegister($src2$$reg));
16395   %}
16396   ins_pipe(vmuldiv_fp128);
16397 %}
16398 
16399 // --------------------------------- SQRT -------------------------------------
16400 
16401 instruct vsqrt2D(vecX dst, vecX src)
16402 %{
16403   predicate(n->as_Vector()->length() == 2);
16404   match(Set dst (SqrtVD src));
16405   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16406   ins_encode %{
16407     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16408              as_FloatRegister($src$$reg));
16409   %}
16410   ins_pipe(vsqrt_fp128);
16411 %}
16412 
16413 // --------------------------------- ABS --------------------------------------
16414 
16415 instruct vabs2F(vecD dst, vecD src)
16416 %{
16417   predicate(n->as_Vector()->length() == 2);
16418   match(Set dst (AbsVF src));
16419   ins_cost(INSN_COST * 3);
16420   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16421   ins_encode %{
16422     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16423             as_FloatRegister($src$$reg));
16424   %}
16425   ins_pipe(vunop_fp64);
16426 %}
16427 
16428 instruct vabs4F(vecX dst, vecX src)
16429 %{
16430   predicate(n->as_Vector()->length() == 4);
16431   match(Set dst (AbsVF src));
16432   ins_cost(INSN_COST * 3);
16433   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16434   ins_encode %{
16435     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16436             as_FloatRegister($src$$reg));
16437   %}
16438   ins_pipe(vunop_fp128);
16439 %}
16440 
16441 instruct vabs2D(vecX dst, vecX src)
16442 %{
16443   predicate(n->as_Vector()->length() == 2);
16444   match(Set dst (AbsVD src));
16445   ins_cost(INSN_COST * 3);
16446   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16447   ins_encode %{
16448     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16449             as_FloatRegister($src$$reg));
16450   %}
16451   ins_pipe(vunop_fp128);
16452 %}
16453 
16454 // --------------------------------- NEG --------------------------------------
16455 
16456 instruct vneg2F(vecD dst, vecD src)
16457 %{
16458   predicate(n->as_Vector()->length() == 2);
16459   match(Set dst (NegVF src));
16460   ins_cost(INSN_COST * 3);
16461   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16462   ins_encode %{
16463     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16464             as_FloatRegister($src$$reg));
16465   %}
16466   ins_pipe(vunop_fp64);
16467 %}
16468 
16469 instruct vneg4F(vecX dst, vecX src)
16470 %{
16471   predicate(n->as_Vector()->length() == 4);
16472   match(Set dst (NegVF src));
16473   ins_cost(INSN_COST * 3);
16474   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16475   ins_encode %{
16476     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16477             as_FloatRegister($src$$reg));
16478   %}
16479   ins_pipe(vunop_fp128);
16480 %}
16481 
16482 instruct vneg2D(vecX dst, vecX src)
16483 %{
16484   predicate(n->as_Vector()->length() == 2);
16485   match(Set dst (NegVD src));
16486   ins_cost(INSN_COST * 3);
16487   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16488   ins_encode %{
16489     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16490             as_FloatRegister($src$$reg));
16491   %}
16492   ins_pipe(vunop_fp128);
16493 %}
16494 
16495 // --------------------------------- AND --------------------------------------
16496 
16497 instruct vand8B(vecD dst, vecD src1, vecD src2)
16498 %{
16499   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16500             n->as_Vector()->length_in_bytes() == 8);
16501   match(Set dst (AndV src1 src2));
16502   ins_cost(INSN_COST);
16503   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16504   ins_encode %{
16505     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16506             as_FloatRegister($src1$$reg),
16507             as_FloatRegister($src2$$reg));
16508   %}
16509   ins_pipe(vlogical64);
16510 %}
16511 
16512 instruct vand16B(vecX dst, vecX src1, vecX src2)
16513 %{
16514   predicate(n->as_Vector()->length_in_bytes() == 16);
16515   match(Set dst (AndV src1 src2));
16516   ins_cost(INSN_COST);
16517   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16518   ins_encode %{
16519     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16520             as_FloatRegister($src1$$reg),
16521             as_FloatRegister($src2$$reg));
16522   %}
16523   ins_pipe(vlogical128);
16524 %}
16525 
16526 // --------------------------------- OR ---------------------------------------
16527 
16528 instruct vor8B(vecD dst, vecD src1, vecD src2)
16529 %{
16530   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16531             n->as_Vector()->length_in_bytes() == 8);
16532   match(Set dst (OrV src1 src2));
16533   ins_cost(INSN_COST);
16534   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16535   ins_encode %{
16536     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16537             as_FloatRegister($src1$$reg),
16538             as_FloatRegister($src2$$reg));
16539   %}
16540   ins_pipe(vlogical64);
16541 %}
16542 
16543 instruct vor16B(vecX dst, vecX src1, vecX src2)
16544 %{
16545   predicate(n->as_Vector()->length_in_bytes() == 16);
16546   match(Set dst (OrV src1 src2));
16547   ins_cost(INSN_COST);
16548   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16549   ins_encode %{
16550     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16551             as_FloatRegister($src1$$reg),
16552             as_FloatRegister($src2$$reg));
16553   %}
16554   ins_pipe(vlogical128);
16555 %}
16556 
16557 // --------------------------------- XOR --------------------------------------
16558 
16559 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16560 %{
16561   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16562             n->as_Vector()->length_in_bytes() == 8);
16563   match(Set dst (XorV src1 src2));
16564   ins_cost(INSN_COST);
16565   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16566   ins_encode %{
16567     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16568             as_FloatRegister($src1$$reg),
16569             as_FloatRegister($src2$$reg));
16570   %}
16571   ins_pipe(vlogical64);
16572 %}
16573 
16574 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16575 %{
16576   predicate(n->as_Vector()->length_in_bytes() == 16);
16577   match(Set dst (XorV src1 src2));
16578   ins_cost(INSN_COST);
16579   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16580   ins_encode %{
16581     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16582             as_FloatRegister($src1$$reg),
16583             as_FloatRegister($src2$$reg));
16584   %}
16585   ins_pipe(vlogical128);
16586 %}
16587 
16588 // ------------------------------ Shift ---------------------------------------
16589 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
16590   predicate(n->as_Vector()->length_in_bytes() == 8);
16591   match(Set dst (LShiftCntV cnt));
16592   match(Set dst (RShiftCntV cnt));
16593   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
16594   ins_encode %{
16595     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
16596   %}
16597   ins_pipe(vdup_reg_reg64);
16598 %}
16599 
16600 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
16601   predicate(n->as_Vector()->length_in_bytes() == 16);
16602   match(Set dst (LShiftCntV cnt));
16603   match(Set dst (RShiftCntV cnt));
16604   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
16605   ins_encode %{
16606     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16607   %}
16608   ins_pipe(vdup_reg_reg128);
16609 %}
16610 
16611 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
16612   predicate(n->as_Vector()->length() == 4 ||
16613             n->as_Vector()->length() == 8);
16614   match(Set dst (LShiftVB src shift));
16615   ins_cost(INSN_COST);
16616   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16617   ins_encode %{
16618     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16619             as_FloatRegister($src$$reg),
16620             as_FloatRegister($shift$$reg));
16621   %}
16622   ins_pipe(vshift64);
16623 %}
16624 
16625 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16626   predicate(n->as_Vector()->length() == 16);
16627   match(Set dst (LShiftVB src shift));
16628   ins_cost(INSN_COST);
16629   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16630   ins_encode %{
16631     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16632             as_FloatRegister($src$$reg),
16633             as_FloatRegister($shift$$reg));
16634   %}
16635   ins_pipe(vshift128);
16636 %}
16637 
16638 // Right shifts with vector shift count on aarch64 SIMD are implemented
16639 // as left shift by negative shift count.
16640 // There are two cases for vector shift count.
16641 //
16642 // Case 1: The vector shift count is from replication.
16643 //        |            |
16644 //    LoadVector  RShiftCntV
16645 //        |       /
16646 //     RShiftVI
16647 // Note: In inner loop, multiple neg instructions are used, which can be
16648 // moved to outer loop and merge into one neg instruction.
16649 //
16650 // Case 2: The vector shift count is from loading.
16651 // This case isn't supported by middle-end now. But it's supported by
16652 // panama/vectorIntrinsics(JEP 338: Vector API).
16653 //        |            |
16654 //    LoadVector  LoadVector
16655 //        |       /
16656 //     RShiftVI
16657 //
16658 
16659 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
16660   predicate(n->as_Vector()->length() == 4 ||
16661             n->as_Vector()->length() == 8);
16662   match(Set dst (RShiftVB src shift));
16663   ins_cost(INSN_COST);
16664   effect(TEMP tmp);
16665   format %{ "negr  $tmp,$shift\t"
16666             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
16667   ins_encode %{
16668     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16669             as_FloatRegister($shift$$reg));
16670     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16671             as_FloatRegister($src$$reg),
16672             as_FloatRegister($tmp$$reg));
16673   %}
16674   ins_pipe(vshift64);
16675 %}
16676 
16677 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
16678   predicate(n->as_Vector()->length() == 16);
16679   match(Set dst (RShiftVB src shift));
16680   ins_cost(INSN_COST);
16681   effect(TEMP tmp);
16682   format %{ "negr  $tmp,$shift\t"
16683             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
16684   ins_encode %{
16685     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16686             as_FloatRegister($shift$$reg));
16687     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16688             as_FloatRegister($src$$reg),
16689             as_FloatRegister($tmp$$reg));
16690   %}
16691   ins_pipe(vshift128);
16692 %}
16693 
16694 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
16695   predicate(n->as_Vector()->length() == 4 ||
16696             n->as_Vector()->length() == 8);
16697   match(Set dst (URShiftVB src shift));
16698   ins_cost(INSN_COST);
16699   effect(TEMP tmp);
16700   format %{ "negr  $tmp,$shift\t"
16701             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
16702   ins_encode %{
16703     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16704             as_FloatRegister($shift$$reg));
16705     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16706             as_FloatRegister($src$$reg),
16707             as_FloatRegister($tmp$$reg));
16708   %}
16709   ins_pipe(vshift64);
16710 %}
16711 
16712 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
16713   predicate(n->as_Vector()->length() == 16);
16714   match(Set dst (URShiftVB src shift));
16715   ins_cost(INSN_COST);
16716   effect(TEMP tmp);
16717   format %{ "negr  $tmp,$shift\t"
16718             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
16719   ins_encode %{
16720     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16721             as_FloatRegister($shift$$reg));
16722     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16723             as_FloatRegister($src$$reg),
16724             as_FloatRegister($tmp$$reg));
16725   %}
16726   ins_pipe(vshift128);
16727 %}
16728 
16729 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16730   predicate(n->as_Vector()->length() == 4 ||
16731             n->as_Vector()->length() == 8);
16732   match(Set dst (LShiftVB src shift));
16733   ins_cost(INSN_COST);
16734   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16735   ins_encode %{
16736     int sh = (int)$shift$$constant;
16737     if (sh >= 8) {
16738       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16739              as_FloatRegister($src$$reg),
16740              as_FloatRegister($src$$reg));
16741     } else {
16742       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16743              as_FloatRegister($src$$reg), sh);
16744     }
16745   %}
16746   ins_pipe(vshift64_imm);
16747 %}
16748 
16749 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16750   predicate(n->as_Vector()->length() == 16);
16751   match(Set dst (LShiftVB src shift));
16752   ins_cost(INSN_COST);
16753   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16754   ins_encode %{
16755     int sh = (int)$shift$$constant;
16756     if (sh >= 8) {
16757       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16758              as_FloatRegister($src$$reg),
16759              as_FloatRegister($src$$reg));
16760     } else {
16761       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16762              as_FloatRegister($src$$reg), sh);
16763     }
16764   %}
16765   ins_pipe(vshift128_imm);
16766 %}
16767 
16768 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16769   predicate(n->as_Vector()->length() == 4 ||
16770             n->as_Vector()->length() == 8);
16771   match(Set dst (RShiftVB src shift));
16772   ins_cost(INSN_COST);
16773   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16774   ins_encode %{
16775     int sh = (int)$shift$$constant;
16776     if (sh >= 8) sh = 7;
16777     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16778            as_FloatRegister($src$$reg), sh);
16779   %}
16780   ins_pipe(vshift64_imm);
16781 %}
16782 
16783 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16784   predicate(n->as_Vector()->length() == 16);
16785   match(Set dst (RShiftVB src shift));
16786   ins_cost(INSN_COST);
16787   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16788   ins_encode %{
16789     int sh = (int)$shift$$constant;
16790     if (sh >= 8) sh = 7;
16791     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16792            as_FloatRegister($src$$reg), sh);
16793   %}
16794   ins_pipe(vshift128_imm);
16795 %}
16796 
16797 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16798   predicate(n->as_Vector()->length() == 4 ||
16799             n->as_Vector()->length() == 8);
16800   match(Set dst (URShiftVB src shift));
16801   ins_cost(INSN_COST);
16802   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16803   ins_encode %{
16804     int sh = (int)$shift$$constant;
16805     if (sh >= 8) {
16806       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16807              as_FloatRegister($src$$reg),
16808              as_FloatRegister($src$$reg));
16809     } else {
16810       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16811              as_FloatRegister($src$$reg), sh);
16812     }
16813   %}
16814   ins_pipe(vshift64_imm);
16815 %}
16816 
16817 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16818   predicate(n->as_Vector()->length() == 16);
16819   match(Set dst (URShiftVB src shift));
16820   ins_cost(INSN_COST);
16821   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16822   ins_encode %{
16823     int sh = (int)$shift$$constant;
16824     if (sh >= 8) {
16825       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16826              as_FloatRegister($src$$reg),
16827              as_FloatRegister($src$$reg));
16828     } else {
16829       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16830              as_FloatRegister($src$$reg), sh);
16831     }
16832   %}
16833   ins_pipe(vshift128_imm);
16834 %}
16835 
16836 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
16837   predicate(n->as_Vector()->length() == 2 ||
16838             n->as_Vector()->length() == 4);
16839   match(Set dst (LShiftVS src shift));
16840   ins_cost(INSN_COST);
16841   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16842   ins_encode %{
16843     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16844             as_FloatRegister($src$$reg),
16845             as_FloatRegister($shift$$reg));
16846   %}
16847   ins_pipe(vshift64);
16848 %}
16849 
16850 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16851   predicate(n->as_Vector()->length() == 8);
16852   match(Set dst (LShiftVS src shift));
16853   ins_cost(INSN_COST);
16854   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16855   ins_encode %{
16856     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16857             as_FloatRegister($src$$reg),
16858             as_FloatRegister($shift$$reg));
16859   %}
16860   ins_pipe(vshift128);
16861 %}
16862 
16863 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
16864   predicate(n->as_Vector()->length() == 2 ||
16865             n->as_Vector()->length() == 4);
16866   match(Set dst (RShiftVS src shift));
16867   ins_cost(INSN_COST);
16868   effect(TEMP tmp);
16869   format %{ "negr  $tmp,$shift\t"
16870             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
16871   ins_encode %{
16872     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16873             as_FloatRegister($shift$$reg));
16874     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16875             as_FloatRegister($src$$reg),
16876             as_FloatRegister($tmp$$reg));
16877   %}
16878   ins_pipe(vshift64);
16879 %}
16880 
16881 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
16882   predicate(n->as_Vector()->length() == 8);
16883   match(Set dst (RShiftVS src shift));
16884   ins_cost(INSN_COST);
16885   effect(TEMP tmp);
16886   format %{ "negr  $tmp,$shift\t"
16887             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
16888   ins_encode %{
16889     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16890             as_FloatRegister($shift$$reg));
16891     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16892             as_FloatRegister($src$$reg),
16893             as_FloatRegister($tmp$$reg));
16894   %}
16895   ins_pipe(vshift128);
16896 %}
16897 
16898 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
16899   predicate(n->as_Vector()->length() == 2 ||
16900             n->as_Vector()->length() == 4);
16901   match(Set dst (URShiftVS src shift));
16902   ins_cost(INSN_COST);
16903   effect(TEMP tmp);
16904   format %{ "negr  $tmp,$shift\t"
16905             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
16906   ins_encode %{
16907     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
16908             as_FloatRegister($shift$$reg));
16909     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16910             as_FloatRegister($src$$reg),
16911             as_FloatRegister($tmp$$reg));
16912   %}
16913   ins_pipe(vshift64);
16914 %}
16915 
16916 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
16917   predicate(n->as_Vector()->length() == 8);
16918   match(Set dst (URShiftVS src shift));
16919   ins_cost(INSN_COST);
16920   effect(TEMP tmp);
16921   format %{ "negr  $tmp,$shift\t"
16922             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
16923   ins_encode %{
16924     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
16925             as_FloatRegister($shift$$reg));
16926     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16927             as_FloatRegister($src$$reg),
16928             as_FloatRegister($tmp$$reg));
16929   %}
16930   ins_pipe(vshift128);
16931 %}
16932 
16933 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16934   predicate(n->as_Vector()->length() == 2 ||
16935             n->as_Vector()->length() == 4);
16936   match(Set dst (LShiftVS src shift));
16937   ins_cost(INSN_COST);
16938   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16939   ins_encode %{
16940     int sh = (int)$shift$$constant;
16941     if (sh >= 16) {
16942       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16943              as_FloatRegister($src$$reg),
16944              as_FloatRegister($src$$reg));
16945     } else {
16946       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16947              as_FloatRegister($src$$reg), sh);
16948     }
16949   %}
16950   ins_pipe(vshift64_imm);
16951 %}
16952 
16953 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16954   predicate(n->as_Vector()->length() == 8);
16955   match(Set dst (LShiftVS src shift));
16956   ins_cost(INSN_COST);
16957   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16958   ins_encode %{
16959     int sh = (int)$shift$$constant;
16960     if (sh >= 16) {
16961       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16962              as_FloatRegister($src$$reg),
16963              as_FloatRegister($src$$reg));
16964     } else {
16965       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16966              as_FloatRegister($src$$reg), sh);
16967     }
16968   %}
16969   ins_pipe(vshift128_imm);
16970 %}
16971 
16972 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16973   predicate(n->as_Vector()->length() == 2 ||
16974             n->as_Vector()->length() == 4);
16975   match(Set dst (RShiftVS src shift));
16976   ins_cost(INSN_COST);
16977   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16978   ins_encode %{
16979     int sh = (int)$shift$$constant;
16980     if (sh >= 16) sh = 15;
16981     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16982            as_FloatRegister($src$$reg), sh);
16983   %}
16984   ins_pipe(vshift64_imm);
16985 %}
16986 
16987 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16988   predicate(n->as_Vector()->length() == 8);
16989   match(Set dst (RShiftVS src shift));
16990   ins_cost(INSN_COST);
16991   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16992   ins_encode %{
16993     int sh = (int)$shift$$constant;
16994     if (sh >= 16) sh = 15;
16995     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16996            as_FloatRegister($src$$reg), sh);
16997   %}
16998   ins_pipe(vshift128_imm);
16999 %}
17000 
17001 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
17002   predicate(n->as_Vector()->length() == 2 ||
17003             n->as_Vector()->length() == 4);
17004   match(Set dst (URShiftVS src shift));
17005   ins_cost(INSN_COST);
17006   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
17007   ins_encode %{
17008     int sh = (int)$shift$$constant;
17009     if (sh >= 16) {
17010       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17011              as_FloatRegister($src$$reg),
17012              as_FloatRegister($src$$reg));
17013     } else {
17014       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
17015              as_FloatRegister($src$$reg), sh);
17016     }
17017   %}
17018   ins_pipe(vshift64_imm);
17019 %}
17020 
17021 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
17022   predicate(n->as_Vector()->length() == 8);
17023   match(Set dst (URShiftVS src shift));
17024   ins_cost(INSN_COST);
17025   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
17026   ins_encode %{
17027     int sh = (int)$shift$$constant;
17028     if (sh >= 16) {
17029       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17030              as_FloatRegister($src$$reg),
17031              as_FloatRegister($src$$reg));
17032     } else {
17033       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
17034              as_FloatRegister($src$$reg), sh);
17035     }
17036   %}
17037   ins_pipe(vshift128_imm);
17038 %}
17039 
17040 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
17041   predicate(n->as_Vector()->length() == 2);
17042   match(Set dst (LShiftVI src shift));
17043   ins_cost(INSN_COST);
17044   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
17045   ins_encode %{
17046     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17047             as_FloatRegister($src$$reg),
17048             as_FloatRegister($shift$$reg));
17049   %}
17050   ins_pipe(vshift64);
17051 %}
17052 
17053 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
17054   predicate(n->as_Vector()->length() == 4);
17055   match(Set dst (LShiftVI src shift));
17056   ins_cost(INSN_COST);
17057   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
17058   ins_encode %{
17059     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17060             as_FloatRegister($src$$reg),
17061             as_FloatRegister($shift$$reg));
17062   %}
17063   ins_pipe(vshift128);
17064 %}
17065 
17066 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17067   predicate(n->as_Vector()->length() == 2);
17068   match(Set dst (RShiftVI src shift));
17069   ins_cost(INSN_COST);
17070   effect(TEMP tmp);
17071   format %{ "negr  $tmp,$shift\t"
17072             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
17073   ins_encode %{
17074     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17075             as_FloatRegister($shift$$reg));
17076     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
17077             as_FloatRegister($src$$reg),
17078             as_FloatRegister($tmp$$reg));
17079   %}
17080   ins_pipe(vshift64);
17081 %}
17082 
17083 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17084   predicate(n->as_Vector()->length() == 4);
17085   match(Set dst (RShiftVI src shift));
17086   ins_cost(INSN_COST);
17087   effect(TEMP tmp);
17088   format %{ "negr  $tmp,$shift\t"
17089             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
17090   ins_encode %{
17091     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17092             as_FloatRegister($shift$$reg));
17093     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
17094             as_FloatRegister($src$$reg),
17095             as_FloatRegister($tmp$$reg));
17096   %}
17097   ins_pipe(vshift128);
17098 %}
17099 
17100 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
17101   predicate(n->as_Vector()->length() == 2);
17102   match(Set dst (URShiftVI src shift));
17103   ins_cost(INSN_COST);
17104   effect(TEMP tmp);
17105   format %{ "negr  $tmp,$shift\t"
17106             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
17107   ins_encode %{
17108     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
17109             as_FloatRegister($shift$$reg));
17110     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
17111             as_FloatRegister($src$$reg),
17112             as_FloatRegister($tmp$$reg));
17113   %}
17114   ins_pipe(vshift64);
17115 %}
17116 
17117 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
17118   predicate(n->as_Vector()->length() == 4);
17119   match(Set dst (URShiftVI src shift));
17120   ins_cost(INSN_COST);
17121   effect(TEMP tmp);
17122   format %{ "negr  $tmp,$shift\t"
17123             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
17124   ins_encode %{
17125     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17126             as_FloatRegister($shift$$reg));
17127     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
17128             as_FloatRegister($src$$reg),
17129             as_FloatRegister($tmp$$reg));
17130   %}
17131   ins_pipe(vshift128);
17132 %}
17133 
17134 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
17135   predicate(n->as_Vector()->length() == 2);
17136   match(Set dst (LShiftVI src shift));
17137   ins_cost(INSN_COST);
17138   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
17139   ins_encode %{
17140     __ shl(as_FloatRegister($dst$$reg), __ T2S,
17141            as_FloatRegister($src$$reg),
17142            (int)$shift$$constant);
17143   %}
17144   ins_pipe(vshift64_imm);
17145 %}
17146 
17147 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
17148   predicate(n->as_Vector()->length() == 4);
17149   match(Set dst (LShiftVI src shift));
17150   ins_cost(INSN_COST);
17151   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
17152   ins_encode %{
17153     __ shl(as_FloatRegister($dst$$reg), __ T4S,
17154            as_FloatRegister($src$$reg),
17155            (int)$shift$$constant);
17156   %}
17157   ins_pipe(vshift128_imm);
17158 %}
17159 
17160 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
17161   predicate(n->as_Vector()->length() == 2);
17162   match(Set dst (RShiftVI src shift));
17163   ins_cost(INSN_COST);
17164   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
17165   ins_encode %{
17166     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
17167             as_FloatRegister($src$$reg),
17168             (int)$shift$$constant);
17169   %}
17170   ins_pipe(vshift64_imm);
17171 %}
17172 
17173 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
17174   predicate(n->as_Vector()->length() == 4);
17175   match(Set dst (RShiftVI src shift));
17176   ins_cost(INSN_COST);
17177   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
17178   ins_encode %{
17179     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
17180             as_FloatRegister($src$$reg),
17181             (int)$shift$$constant);
17182   %}
17183   ins_pipe(vshift128_imm);
17184 %}
17185 
17186 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
17187   predicate(n->as_Vector()->length() == 2);
17188   match(Set dst (URShiftVI src shift));
17189   ins_cost(INSN_COST);
17190   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
17191   ins_encode %{
17192     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
17193             as_FloatRegister($src$$reg),
17194             (int)$shift$$constant);
17195   %}
17196   ins_pipe(vshift64_imm);
17197 %}
17198 
17199 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
17200   predicate(n->as_Vector()->length() == 4);
17201   match(Set dst (URShiftVI src shift));
17202   ins_cost(INSN_COST);
17203   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
17204   ins_encode %{
17205     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
17206             as_FloatRegister($src$$reg),
17207             (int)$shift$$constant);
17208   %}
17209   ins_pipe(vshift128_imm);
17210 %}
17211 
17212 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
17213   predicate(n->as_Vector()->length() == 2);
17214   match(Set dst (LShiftVL src shift));
17215   ins_cost(INSN_COST);
17216   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
17217   ins_encode %{
17218     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17219             as_FloatRegister($src$$reg),
17220             as_FloatRegister($shift$$reg));
17221   %}
17222   ins_pipe(vshift128);
17223 %}
17224 
17225 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17226   predicate(n->as_Vector()->length() == 2);
17227   match(Set dst (RShiftVL src shift));
17228   ins_cost(INSN_COST);
17229   effect(TEMP tmp);
17230   format %{ "negr  $tmp,$shift\t"
17231             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
17232   ins_encode %{
17233     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17234             as_FloatRegister($shift$$reg));
17235     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
17236             as_FloatRegister($src$$reg),
17237             as_FloatRegister($tmp$$reg));
17238   %}
17239   ins_pipe(vshift128);
17240 %}
17241 
17242 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
17243   predicate(n->as_Vector()->length() == 2);
17244   match(Set dst (URShiftVL src shift));
17245   ins_cost(INSN_COST);
17246   effect(TEMP tmp);
17247   format %{ "negr  $tmp,$shift\t"
17248             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
17249   ins_encode %{
17250     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
17251             as_FloatRegister($shift$$reg));
17252     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
17253             as_FloatRegister($src$$reg),
17254             as_FloatRegister($tmp$$reg));
17255   %}
17256   ins_pipe(vshift128);
17257 %}
17258 
17259 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
17260   predicate(n->as_Vector()->length() == 2);
17261   match(Set dst (LShiftVL src shift));
17262   ins_cost(INSN_COST);
17263   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
17264   ins_encode %{
17265     __ shl(as_FloatRegister($dst$$reg), __ T2D,
17266            as_FloatRegister($src$$reg),
17267            (int)$shift$$constant);
17268   %}
17269   ins_pipe(vshift128_imm);
17270 %}
17271 
17272 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
17273   predicate(n->as_Vector()->length() == 2);
17274   match(Set dst (RShiftVL src shift));
17275   ins_cost(INSN_COST);
17276   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
17277   ins_encode %{
17278     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
17279             as_FloatRegister($src$$reg),
17280             (int)$shift$$constant);
17281   %}
17282   ins_pipe(vshift128_imm);
17283 %}
17284 
17285 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
17286   predicate(n->as_Vector()->length() == 2);
17287   match(Set dst (URShiftVL src shift));
17288   ins_cost(INSN_COST);
17289   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
17290   ins_encode %{
17291     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
17292             as_FloatRegister($src$$reg),
17293             (int)$shift$$constant);
17294   %}
17295   ins_pipe(vshift128_imm);
17296 %}
17297 
17298 //----------PEEPHOLE RULES-----------------------------------------------------
17299 // These must follow all instruction definitions as they use the names
17300 // defined in the instructions definitions.
17301 //
17302 // peepmatch ( root_instr_name [preceding_instruction]* );
17303 //
17304 // peepconstraint %{
17305 // (instruction_number.operand_name relational_op instruction_number.operand_name
17306 //  [, ...] );
17307 // // instruction numbers are zero-based using left to right order in peepmatch
17308 //
17309 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17310 // // provide an instruction_number.operand_name for each operand that appears
17311 // // in the replacement instruction's match rule
17312 //
17313 // ---------VM FLAGS---------------------------------------------------------
17314 //
17315 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17316 //
17317 // Each peephole rule is given an identifying number starting with zero and
17318 // increasing by one in the order seen by the parser.  An individual peephole
17319 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17320 // on the command-line.
17321 //
17322 // ---------CURRENT LIMITATIONS----------------------------------------------
17323 //
17324 // Only match adjacent instructions in same basic block
17325 // Only equality constraints
17326 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17327 // Only one replacement instruction
17328 //
17329 // ---------EXAMPLE----------------------------------------------------------
17330 //
17331 // // pertinent parts of existing instructions in architecture description
17332 // instruct movI(iRegINoSp dst, iRegI src)
17333 // %{
17334 //   match(Set dst (CopyI src));
17335 // %}
17336 //
17337 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17338 // %{
17339 //   match(Set dst (AddI dst src));
17340 //   effect(KILL cr);
17341 // %}
17342 //
17343 // // Change (inc mov) to lea
17344 // peephole %{
17345 //   // increment preceeded by register-register move
17346 //   peepmatch ( incI_iReg movI );
17347 //   // require that the destination register of the increment
17348 //   // match the destination register of the move
17349 //   peepconstraint ( 0.dst == 1.dst );
17350 //   // construct a replacement instruction that sets
17351 //   // the destination to ( move's source register + one )
17352 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17353 // %}
17354 //
17355 
17356 // Implementation no longer uses movX instructions since
17357 // machine-independent system no longer uses CopyX nodes.
17358 //
17359 // peephole
17360 // %{
17361 //   peepmatch (incI_iReg movI);
17362 //   peepconstraint (0.dst == 1.dst);
17363 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17364 // %}
17365 
17366 // peephole
17367 // %{
17368 //   peepmatch (decI_iReg movI);
17369 //   peepconstraint (0.dst == 1.dst);
17370 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17371 // %}
17372 
17373 // peephole
17374 // %{
17375 //   peepmatch (addI_iReg_imm movI);
17376 //   peepconstraint (0.dst == 1.dst);
17377 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17378 // %}
17379 
17380 // peephole
17381 // %{
17382 //   peepmatch (incL_iReg movL);
17383 //   peepconstraint (0.dst == 1.dst);
17384 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17385 // %}
17386 
17387 // peephole
17388 // %{
17389 //   peepmatch (decL_iReg movL);
17390 //   peepconstraint (0.dst == 1.dst);
17391 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17392 // %}
17393 
17394 // peephole
17395 // %{
17396 //   peepmatch (addL_iReg_imm movL);
17397 //   peepconstraint (0.dst == 1.dst);
17398 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17399 // %}
17400 
17401 // peephole
17402 // %{
17403 //   peepmatch (addP_iReg_imm movP);
17404 //   peepconstraint (0.dst == 1.dst);
17405 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17406 // %}
17407 
17408 // // Change load of spilled value to only a spill
17409 // instruct storeI(memory mem, iRegI src)
17410 // %{
17411 //   match(Set mem (StoreI mem src));
17412 // %}
17413 //
17414 // instruct loadI(iRegINoSp dst, memory mem)
17415 // %{
17416 //   match(Set dst (LoadI mem));
17417 // %}
17418 //
17419 
17420 //----------SMARTSPILL RULES---------------------------------------------------
17421 // These must follow all instruction definitions as they use the names
17422 // defined in the instructions definitions.
17423 
17424 // Local Variables:
17425 // mode: c++
17426 // End: